+.include <CoreOS/Standard/Commands.mk>
+.include <CoreOS/Standard/Variables.mk>
+
ALLARCHS = arm i386 ppc ppc64 x86_64 # installsrc doesn't set RC_ARCHS
-PWD != pwd
+TOP != ${PWD}
.ifdef DSTROOT
DESTDIR = $(DSTROOT)
.else
.endif
.endif
.ifndef OBJROOT
-OBJROOT = $(PWD)/OBJROOT
+OBJROOT = $(TOP)/OBJROOT
.endif
.ifndef SRCROOT
-SRCROOT = $(PWD)
+SRCROOT = $(TOP)
.endif
.ifndef SYMROOT
-SYMROOT = $(PWD)/SYMROOT
+SYMROOT = $(TOP)/SYMROOT
.endif
-ARCH != arch
+MYARCH != ${ARCH}
.ifndef RC_ARCHS
-RC_ARCHS = $(ARCH)
+RC_ARCHS = $(MYARCH)
RC_$(RC_ARCHS) = 1
.endif
+FIRST_ARCH != ${PERL} -e 'print $$ARGV[0]' ${RC_ARCHS}
.ifndef RC_NONARCH_CFLAGS
RC_NONARCH_CFLAGS = -pipe
.endif
.else
LIBSYS = $(SDKROOT)/usr/local/lib/system
.endif
-NJOBS != perl -e '$$n = `/usr/sbin/sysctl -n hw.ncpu`; printf "%d\n", $$n < 2 ? 2 : ($$n * 1.5)'
-BSDMAKE = bsdmake -f Makefile
-#BSDMAKEJ = $(BSDMAKE) -j $(NJOBS) -P
-BSDMAKEJ = $(BSDMAKE) -j $(NJOBS)
+NJOBS != ${PERL} -e '$$n = `$(SYSCTL) -n hw.ncpu`; printf "%d\n", $$n < 2 ? 2 : ($$n * 1.5)'
+.ifdef DEBUG
+MYBSDMAKE = $(BSDMAKE) -f Makefile -P
+.else
+MYBSDMAKE = $(BSDMAKE) -f Makefile
+.endif
+MYBSDMAKEJ = $(MYBSDMAKE) -j $(NJOBS)
# Set the DONT-BUILD-arch-form variable to non-empty to turn off building
#DONT-BUILD-x86_64-static = 1
static = static
# Map RC_ARCHS to MACHINE_ARCH
-.for A in $(RC_ARCHS) $(ARCH) # {
+.for A in $(RC_ARCHS) $(MYARCH) # {
MACHINE_ARCH-$(A) = $(A:C/^armv.*/arm/)
.endfor # RC_ARCHS }
.for R in $(ROOTS) # {
roots: $($(R))
$($(R)):
- mkdir -p '$($(R))'
+ ${MKDIR} '$($(R))'
.endfor # ROOTS }
# These are the non B&I defaults
.endif
$(FRAMEWORKS):
$(SRCROOT)/patchHeaders $(FRAMEWORKPATH)/$(PRIVATEHEADERPATH) $(FRAMEWORKS)/$(PRIVATEHEADERPATH:H)
- ln -fs $(VERSIONSB)/PrivateHeaders $(FRAMEWORKS)/$(SYSTEMFRAMEWORK)/PrivateHeaders
+ ${LN} -fs $(VERSIONSB)/PrivateHeaders $(FRAMEWORKS)/$(SYSTEMFRAMEWORK)/PrivateHeaders
AUTOPATCHED = $(SRCROOT)/.autopatched
PARTIAL = -partial
.if empty(DONT-BUILD-$(A)-$(F)) # {
ARCHS-$(F) += $(A)
build-$(A)-$(F):
- mkdir -p $(OBJROOT)/obj.$(A) && \
+ ${MKDIR} $(OBJROOT)/obj.$(A) && \
MAKEOBJDIR="$(OBJROOT)/obj.$(A)" MACHINE_ARCH=$(MACHINE_ARCH-$(A)) CCARCH=$(A) \
DSTROOT=$(DSTROOT) OBJROOT=$(OBJROOT) SYMROOT=$(SYMROOT) \
RC_NONARCH_CFLAGS="$(RC_NONARCH_CFLAGS)" MAKEFLAGS="" \
OBJSUFFIX="$(OBJSUFFIX-$(F))" \
- $(BSDMAKEJ) libc$(SUFFIX-$(F)).a
+ $(MYBSDMAKEJ) libc$(SUFFIX-$(F)).a
.else # } {
build-$(A)-$(F):
@echo Not building libc$(PSUFFIX-$(F)).a for $(A)
.endif # }
.endfor # RC_ARCHS }
-NARCHS-$(F) != echo $(ARCHS-$(F)) | wc -w
+NARCHS-$(F) != ${ECHO} $(ARCHS-$(F)) | ${WC} -w
build-$(F): $(FRAMEWORKS) $(AUTOPATCHED)
.for A in $(RC_ARCHS) # {
build-$(F):
@echo No libc$(PSUFFIX-$(F)).a built
.else # } {
-LIPOARGS-$(F) != perl -e 'printf "%s\n", join(" ", map(qq(-arch $$_ \"$(OBJROOT)/obj.$$_/libc$(SUFFIX-$(F)).a\"), qw($(ARCHS-$(F)))))'
+LIPOARGS-$(F) != ${PERL} -e 'printf "%s\n", join(" ", map(qq(-arch $$_ \"$(OBJROOT)/obj.$$_/libc$(SUFFIX-$(F)).a\"), qw($(ARCHS-$(F)))))'
+.if $(dynamic) == $(F) # {
+LIPODYLDARGS-$(F) != ${PERL} -e 'printf "%s\n", join(" ", map(qq(-arch $$_ \"$(OBJROOT)/obj.$$_/libc-dyld.a\"), qw($(ARCHS-$(F)))))'
+.endif # }
build-$(F):
.if $(NARCHS-$(F)) == 1 # {
- cp -p "$(OBJROOT)/obj.$(RC_ARCHS)/libc$(SUFFIX-$(F)).a" "$(SYMROOT)/libc$(PSUFFIX-$(F)).a"
+ ${CP} "$(OBJROOT)/obj.$(RC_ARCHS)/libc$(SUFFIX-$(F)).a" "$(SYMROOT)/libc$(PSUFFIX-$(F)).a"
+.if $(dynamic) == $(F) # {
+ ${CP} "$(OBJROOT)/obj.$(RC_ARCHS)/libc-dyld.a" "$(SYMROOT)/libc-dyld.a"
+.endif # }
.else # } {
- lipo -create $(LIPOARGS-$(F)) -output "$(SYMROOT)/libc$(PSUFFIX-$(F)).a"
+ ${LIPO} -create $(LIPOARGS-$(F)) -output "$(SYMROOT)/libc$(PSUFFIX-$(F)).a"
+.if $(dynamic) == $(F) # {
+ ${LIPO} -create $(LIPODYLDARGS-$(F)) -output "$(SYMROOT)/libc-dyld.a"
+.endif # }
.endif # }
.endif # }
.endfor # FORMS }
$(AUTOPATCHED):
.for A in $(ALLARCHS) # {
MACHINE_ARCH=$(A) SRCROOT="$(SRCROOT)" \
- $(BSDMAKE) -C "$(SRCROOT)" autopatch
+ $(MYBSDMAKE) -C "$(SRCROOT)" autopatch
.endfor # ALLARCHS # }
touch $(AUTOPATCHED)
copysrc:
- pax -rw -p p . "$(SRCROOT)"
+ ${PAX} -rw -p p . "$(SRCROOT)"
installsrc: copysrc $(AUTOPATCHED)
installhdrs-real:
MAKEOBJDIR="$(OBJROOT)" DESTDIR="$(DSTROOT)" MAKEFLAGS="" \
DSTROOT=$(DSTROOT) OBJROOT=$(OBJROOT) SYMROOT=$(SYMROOT) \
- $(BSDMAKEJ) installhdrs
+ $(MYBSDMAKEJ) installhdrs
.for A in $(RC_ARCHS) # {
- mkdir -p "$(OBJROOT)/obj.$(A)" && \
+ ${MKDIR} "$(OBJROOT)/obj.$(A)" && \
MAKEOBJDIR="$(OBJROOT)/obj.$(A)" MACHINE_ARCH=$(MACHINE_ARCH-$(A)) CCARCH=$(A) \
DSTROOT=$(DSTROOT) OBJROOT=$(OBJROOT) SYMROOT=$(SYMROOT) \
MAKEFLAGS="" RC_NONARCH_CFLAGS="$(RC_NONARCH_CFLAGS)" \
- $(BSDMAKEJ) installhdrs-md
+ $(MYBSDMAKEJ) installhdrs-md
.endfor # RC_ARCHS # }
.for F in $(FORMS) # {
BI-install-$(F): build-$(F)
- mkdir -p $(DSTROOT)/usr/local/lib/system
+ ${MKDIR} $(DSTROOT)/usr/local/lib/system
if [ -f "$(SYMROOT)/libc$(PSUFFIX-$(F)).a" ]; then \
- echo "Installing libc$(PSUFFIX-$(F)).a" && \
- install -c -m 444 "$(SYMROOT)/libc$(PSUFFIX-$(F)).a" \
+ ${ECHO} "Installing libc$(PSUFFIX-$(F)).a" && \
+ ${INSTALL} -m 444 "$(SYMROOT)/libc$(PSUFFIX-$(F)).a" \
$(DSTROOT)/usr/local/lib/system && \
- ranlib "$(DSTROOT)/usr/local/lib/system/libc$(PSUFFIX-$(F)).a" || exit 1; \
+ ${RANLIB} "$(DSTROOT)/usr/local/lib/system/libc$(PSUFFIX-$(F)).a" || exit 1; \
fi
.if $(dynamic) == $(F) # {
+ if [ -f "$(SYMROOT)/libc-dyld.a" ]; then \
+ ${ECHO} "Installing libc-dyld.a" && \
+ ${INSTALL} -m 444 "$(SYMROOT)/libc-dyld.a" \
+ $(DSTROOT)/usr/local/lib/system && \
+ ${RANLIB} "$(DSTROOT)/usr/local/lib/system/libc-dyld.a" || exit 1; \
+ fi
.for A in $(RC_ARCHS) # {
MAKEOBJDIR="$(OBJROOT)/obj.$(A)" MACHINE_ARCH=$(MACHINE_ARCH-$(A)) CCARCH=$(A) \
DSTROOT=$(DSTROOT) OBJROOT=$(OBJROOT) SYMROOT=$(SYMROOT) \
DSTROOT=$(DSTROOT) OBJROOT=$(OBJROOT) SYMROOT=$(SYMROOT) \
MAKEFLAGS="" RC_NONARCH_CFLAGS="$(RC_NONARCH_CFLAGS)" \
- $(BSDMAKE) copyfiles
+ $(MYBSDMAKE) copyfiles
.endfor # RC_ARCHS # }
.endif # }
.endfor # FORMS }
# Don't use -j here; it may try to make links before the files are copied
-MANARGS != test `id -u` -eq 0 || echo MINSTALL=/usr/bin/install
+MANARGS != ${TEST} `id -u` -eq 0 || ${ECHO} MINSTALL=/usr/bin/install
+# Variables.mk defines MANDIR=${SHAREDIR}/man, but bsd.man.mk expects that
+# MANDIR=${SHAREDIR}/man/man, so we override.
+MANARGS += MANDIR=${SHAREDIR}/man/man
install-man:
- mkdir -p $(DSTROOT)/usr/share/man/man2
- mkdir -p $(DSTROOT)/usr/share/man/man3
- mkdir -p $(DSTROOT)/usr/share/man/man4
- mkdir -p $(DSTROOT)/usr/share/man/man5
- mkdir -p $(DSTROOT)/usr/share/man/man7
+ ${MKDIR} $(DSTROOT)/usr/share/man/man2
+ ${MKDIR} $(DSTROOT)/usr/share/man/man3
+ ${MKDIR} $(DSTROOT)/usr/share/man/man4
+ ${MKDIR} $(DSTROOT)/usr/share/man/man5
+ ${MKDIR} $(DSTROOT)/usr/share/man/man7
MAKEOBJDIR="$(OBJROOT)" DESTDIR="$(DSTROOT)" \
DSTROOT='$(DSTROOT)' OBJROOT='$(OBJROOT)' SYMROOT='$(SYMROOT)' \
- MACHINE_ARCH="$(MACHINE_ARCH-$(ARCH))" CCARCH=$(ARCH) MAKEFLAGS="" \
+ MACHINE_ARCH="$(MACHINE_ARCH-$(FIRST_ARCH))" CCARCH=$(FIRST_ARCH) MAKEFLAGS="" \
RC_NONARCH_CFLAGS="$(RC_NONARCH_CFLAGS)" \
- $(BSDMAKE) all-man maninstall $(MANARGS)
+ $(MYBSDMAKE) all-man maninstall $(MANARGS)
install-all: build install-man
.for F in $(FORMS) # {
clean:
.for F in $(FORMS) # {
- rm -f $(SYMROOT)/libc$(PSUFFIX-$(F)).a
+ ${RM} $(SYMROOT)/libc$(PSUFFIX-$(F)).a
.endfor # FORMS }
.for A in $(RC_ARCHS) # {
- rm -rf $(OBJROOT)/obj.$(A)
+ ${RMDIR} $(OBJROOT)/obj.$(A)
.endfor # RC_ARCHS # }
+# Remove any NEXT_ROOT argument
+override MAKEOVERRIDES := $(filter-out NEXT_ROOT=%,$(MAKEOVERRIDES))
+override MAKEFILEPATH := $(subst $(NEXT_ROOT),,$(MAKEFILEPATH))
+unexport NEXT_ROOT
+
+include $(MAKEFILEPATH)/CoreOS/Standard/Commands.make
+
all:
- @bsdmake
+ @$(BSDMAKE)
.DEFAULT:
- @bsdmake $@
+ @$(BSDMAKE) $@
LIB=c
SHLIB_MAJOR= 1
SHLIB_MINOR= 0
-.if (${MACHINE_ARCH} == unknown)
-MACHINE_ARCH != /usr/bin/arch
-.endif
-.if !empty $(MACHINE_ARCH:M*64)
+
+.include <CoreOS/Standard/Commands.mk>
+.include <CoreOS/Standard/Variables.mk>
+
+.if !empty(MACHINE_ARCH:M*64)
LP64 = 1
.endif
# RC_TARGET_CONFIG may not be set, so default to MacOSX (which is good enough
RC_TARGET_CONFIG = MacOSX
.endif
-#use default compiler
-#CC = gcc-4.0
-GCC_VERSION != cc -dumpversion | sed -e 's/^\([^.]*\.[^.]*\).*/\1/'
-GCC_42 != perl -e "print ($(GCC_VERSION) >= 4.2 ? 'YES' : 'NO')"
+# Use default compiler, so comment out OTHERCC
+#OTHERCC = gcc-4.0
+# HOSTCC is the compiler on the local host, so we need to unset any SDKROOT
+# to before calling PATH_OF_COMMAND
+.ifdef OTHERCC
+MYCC != ${PATH_OF_COMMAND} ${OTHERCC}
+HOSTCC != export -n SDKROOT && ${PATH_OF_COMMAND} ${OTHERCC}
+.else
+MYCC = ${CC}
+HOSTCC != export -n SDKROOT && ${PATH_OF_COMMAND} cc
+.endif
+GCC_VERSION != ${MYCC} -dumpversion | ${SED} -e 's/^\([^.]*\.[^.]*\).*/\1/'
+GCC_42 != ${PERL} -e "print ($(GCC_VERSION) >= 4.2 ? 'YES' : 'NO')"
.ifdef ALTLIBCHEADERS
INCLUDEDIR = ${ALTLIBCHEADERS}
LIBCFLAGS += ${PRIVINC}
SYMROOTINC = ${SYMROOT}/include
-CFLAGS = -g -arch ${CCARCH} ${RC_NONARCH_CFLAGS} -std=gnu99 -fno-common -Wmost
-CFLAGS += -D__LIBC__ -D__DARWIN_UNIX03=1 -D__DARWIN_64_BIT_INO_T=1 -D__DARWIN_NON_CANCELABLE=1 -D__DARWIN_VERS_1050=1
+CFLAGS = -g -arch ${CCARCH} ${RC_NONARCH_CFLAGS} -std=gnu99 -fno-common -fno-builtin -Wmost
+CFLAGS += -D__LIBC__ -D__DARWIN_UNIX03=1 -D__DARWIN_64_BIT_INO_T=1 -D__DARWIN_NON_CANCELABLE=1 -D__DARWIN_VERS_1050=1 -D_FORTIFY_SOURCE=0
CFLAGS += -DNOID -DLIBC_MAJOR=${SHLIB_MAJOR}
CFLAGS += -I${.OBJDIR} -I${SYMROOTINC} -I${.CURDIR}/include
AINC = -g -arch ${CCARCH} ${RC_NONARCH_CFLAGS}
SRCROOT ?= ${.CURDIR}
.ifndef SYMROOT
SYMROOT = ${.CURDIR}/SYMROOT
-_x_ != test -d ${SYMROOT} || mkdir -p ${SYMROOT}
+_x_ != ${TEST} -d ${SYMROOT} || ${MKDIR} ${SYMROOT}
.endif
DESTDIR ?= ${DSTROOT}
MAKEOBJDIR ?= ${OBJROOT}
.include "${.CURDIR}/Makefile.inc"
.include "Makefile.xbs"
-MANFILTER = unifdef -t ${UNIFDEFARGS}
-.if exists(/usr/share/mk/bsd.init.mk)
+MANFILTER = ${UNIFDEF} -t ${UNIFDEFARGS}
.include <bsd.init.mk>
-.endif
.include <bsd.man.mk>
.include "Platforms/${RC_TARGET_CONFIG}/Makefile.inc"
+# Have to use || to avoid warning message if && is used
+.ifdef CCARCH
+_BLOCKS != ${MYCC} -arch ${CCARCH} -E -dD -x c /dev/null | fgrep -q __BLOCKS__ || echo NO
+.if ${_BLOCKS} != NO
+FEATURE_BLOCKS = YES
+.endif # _BLOCKS
+.endif # CCARCH
+
${SYMROOTINC}/${MACHINE_ARCH}/libc-features.h:
- mkdir -p ${.TARGET:H}
- echo creating ${.TARGET}
+ ${MKDIR} ${.TARGET:H}
+ ${ECHO} creating ${.TARGET}
@echo '#ifndef _LIBC_FEATURES_H_' > ${.TARGET}
@echo '#define _LIBC_FEATURES_H_' >> ${.TARGET}
@echo >> ${.TARGET}
@echo >> ${.TARGET}
@echo '#endif /* _LIBC_FEATURES_H_ */' >> ${.TARGET}
+.ifdef FEATURE_BLOCKS
+UNIFDEFARGS += -DUNIFDEF_BLOCKS
+.else
+UNIFDEFARGS += -UUNIFDEF_BLOCKS
+.endif
.ifdef FEATURE_LEGACY_64_APIS
UNIFDEFARGS += -DUNIFDEF_LEGACY_64_APIS
.else
# errors if the included makefiles don't change these:
MDSRCS=
MISRCS=
-MDASM=
-MIASM=
-NOASM=
# SUPPRESSSRCS is used to prevent machine-independent files from being
# built, when a machine-dependent file defines multiple symbols.
# Auto-patch (or symlink)
_AUTOPATCH: .USE
@if [ -f ${.ALLSRC}.patch ]; then \
- echo cp ${.ALLSRC} ${.TARGET}; \
- cp ${.ALLSRC} ${.TARGET}; \
- echo patch ${.TARGET} ${.ALLSRC}.patch; \
- patch ${.TARGET} ${.ALLSRC}.patch; \
+ ${ECHO} ${CP} ${.ALLSRC} ${.TARGET}; \
+ ${CP} ${.ALLSRC} ${.TARGET}; \
+ ${ECHO} ${PATCH} ${.TARGET} ${.ALLSRC}.patch; \
+ ${PATCH} ${.TARGET} ${.ALLSRC}.patch; \
else \
- t=`basename ${.ALLSRC}` && x=`dirname ${.ALLSRC}` && d=`basename $$x`; \
- echo ln -fs $$d/$$t ${.TARGET}; \
- ln -fs $$d/$$t ${.TARGET}; \
+ t=`${BASENAME} ${.ALLSRC}` && x=`${DIRNAME} ${.ALLSRC}` && d=`${BASENAME} $$x`; \
+ ${ECHO} ${LN} -fs $$d/$$t ${.TARGET}; \
+ ${LN} -fs $$d/$$t ${.TARGET}; \
fi
# Standard compilation for the various forms
_STANDARD_STATIC: .USE
- ${CC} -static \
+ ${MYCC} -static \
${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
_STANDARD_PROFILE: .USE
- ${CC} -pg -DPROFILE \
+ ${MYCC} -pg -DPROFILE \
${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
_STANDARD_DYNAMIC: .USE
- ${CC} \
+ ${MYCC} \
${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
_STANDARD_DEBUG: .USE
- ${CC} -g -DDEBUG \
+ ${MYCC} -g -DDEBUG \
${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${LIBCFLAGS} \
# should only be installed once as well. Both of these get done when
# we're invoked as "Libc".
-BSDMAKE = bsdmake -f Makefile
+MYBSDMAKE = ${BSDMAKE} -f Makefile
.PATH: .
.MAIN: all
# specify sources that will build with both the CFLAGS_XXX and CFLAGS_YYY
# flags set. The variants are always in alphabetic order.
#
-VARIANTS = DARWINEXTSN CANCELABLE
+# The DYLD variant is special; it builds a separate archive for use by dyld,
+# and contains customized binaries that are used to override the behavior of
+# the corresponding binaries in libc.a. The DYLD variant by itself does not
+# cause BUILDING_VARIANT to be defined, and object files are in DYLDOBJS.
+#
+VARIANTS = CANCELABLE DARWINEXTSN DYLD
.ifndef FEATURE_ONLY_64_BIT_INO_T
VARIANTS += INODE32
MDSRCS += ldbl64.s
.endif
-CFLAGS_LEGACY= -U__DARWIN_UNIX03 -D__DARWIN_UNIX03=0 -U__DARWIN_64_BIT_INO_T -D__DARWIN_64_BIT_INO_T=0 -DVARIANT_LEGACY
-CFLAGS_LDBL= -mlong-double-64 -DLDBL_COMPAT
+CFLAGS_CANCELABLE= -DVARIANT_CANCELABLE
CFLAGS_DARWINEXTSN= -DVARIANT_DARWINEXTSN
+CFLAGS_DYLD= -DVARIANT_DYLD
CFLAGS_INODE32= -U__DARWIN_64_BIT_INO_T -D__DARWIN_64_BIT_INO_T=0 -DVARIANT_INODE32
-CFLAGS_CANCELABLE= -DVARIANT_CANCELABLE
+CFLAGS_LDBL= -mlong-double-64 -DLDBL_COMPAT
+CFLAGS_LEGACY= -U__DARWIN_UNIX03 -D__DARWIN_UNIX03=0 -U__DARWIN_64_BIT_INO_T -D__DARWIN_64_BIT_INO_T=0 -DVARIANT_LEGACY
CFLAGS_PRE1050= -U__DARWIN_VERS_1050 -D__DARWIN_VERS_1050=0 -DVARIANT_PRE1050
+# to insure a variable expansion of the left-hand side of a comparison
+DYLD = DYLD
+
.for _v in ${VARIANTS}
COMBOARGS+= ${_v}/${CFLAGS_${_v}}
.endfor
.endif
# For each of the variant combinations as target (and possibly suffixed with
# _D, _P, and _S for the four kinds on compile styles, we create a subdirectory
-# to do the compiling in, and then call $(BSDMAKE) with the new directory, and
+# to do the compiling in, and then call ${MYBSDMAKE} with the new directory, and
# setting VARIANTCFLAGS, which are the extra flags to be added to CFLAGS.
.for _v in ${VARIANTCOMBOCFLAGS}
VARIANTCOMBOS+= ${_v:H:S,/,,}
+.if ${DYLD} != ${_v:H:S,/,,}
+${_v:H:S,/,,}_BUILDING_VARIANT = -DBUILDING_VARIANT
+.endif
${_v:H:S,/,,}:
.ifdef ${_v:H:S,/,,}SRCS
- mkdir -p ${MAKEOBJDIR}/${_v:H:S,/,,}; \
- MAKEOBJDIR="$(MAKEOBJDIR)/${_v:H:S,/,,}" VARIANTCFLAGS="-DBUILDING_VARIANT ${_v:T:S/@/ /g}" $(BSDMAKE) -C "${.CURDIR}" build_${_v:H:S,/,,}
+ ${MKDIR} ${MAKEOBJDIR}/${_v:H:S,/,,}; \
+ MAKEOBJDIR="${MAKEOBJDIR}/${_v:H:S,/,,}" VARIANTCFLAGS="-DBUILDING_VARIANT ${_v:T:S/@/ /g}" ${MYBSDMAKE} -C "${.CURDIR}" build_${_v:H:S,/,,}
.endif
${_v:H:S,/,,}_D:
.ifdef ${_v:H:S,/,,}SRCS
- mkdir -p ${MAKEOBJDIR}/${_v:H:S,/,,}; \
- MAKEOBJDIR="$(MAKEOBJDIR)/${_v:H:S,/,,}" VARIANTCFLAGS="-DBUILDING_VARIANT ${_v:T:S/@/ /g}" $(BSDMAKE) -C "${.CURDIR}" build_${_v:H:S,/,,}_D
+ ${MKDIR} ${MAKEOBJDIR}/${_v:H:S,/,,}; \
+ MAKEOBJDIR="${MAKEOBJDIR}/${_v:H:S,/,,}" VARIANTCFLAGS="-DBUILDING_VARIANT ${_v:T:S/@/ /g}" ${MYBSDMAKE} -C "${.CURDIR}" build_${_v:H:S,/,,}_D
.endif
${_v:H:S,/,,}_P:
.ifdef ${_v:H:S,/,,}SRCS
- mkdir -p ${MAKEOBJDIR}/${_v:H:S,/,,}; \
- MAKEOBJDIR="$(MAKEOBJDIR)/${_v:H:S,/,,}" VARIANTCFLAGS="-DBUILDING_VARIANT ${_v:T:S/@/ /g}" $(BSDMAKE) -C "${.CURDIR}" build_${_v:H:S,/,,}_P
+ ${MKDIR} ${MAKEOBJDIR}/${_v:H:S,/,,}; \
+ MAKEOBJDIR="${MAKEOBJDIR}/${_v:H:S,/,,}" VARIANTCFLAGS="-DBUILDING_VARIANT ${_v:T:S/@/ /g}" ${MYBSDMAKE} -C "${.CURDIR}" build_${_v:H:S,/,,}_P
.endif
${_v:H:S,/,,}_S:
.ifdef ${_v:H:S,/,,}SRCS
- mkdir -p ${MAKEOBJDIR}/${_v:H:S,/,,}; \
- MAKEOBJDIR="$(MAKEOBJDIR)/${_v:H:S,/,,}" VARIANTCFLAGS="-DBUILDING_VARIANT ${_v:T:S/@/ /g}" $(BSDMAKE) -C "${.CURDIR}" build_${_v:H:S,/,,}_S
+ ${MKDIR} ${MAKEOBJDIR}/${_v:H:S,/,,}; \
+ MAKEOBJDIR="${MAKEOBJDIR}/${_v:H:S,/,,}" VARIANTCFLAGS="${${_v:H:S,/,,}_BUILDING_VARIANT} ${_v:T:S/@/ /g}" ${MYBSDMAKE} -C "${.CURDIR}" build_${_v:H:S,/,,}_S
.endif
.endfor
-.for _v in ${VARIANTCOMBOS}
+# The non-DYLD variants
+.for _v in ${VARIANTCOMBOS:N*DYLD*}
# Variant suffix rules so the filenames are unique and Libsystem won't complain
# because of ambiguities with -sectorder
.SUFFIXES: -${_v}.o -${_v}.do -${_v}.po -${_v}.So
.o-${_v}.o .do-${_v}.do .po-${_v}.po .So-${_v}.So:
- mv ${.IMPSRC} ${.TARGET}
+ ${MV} ${.IMPSRC} ${.TARGET}
-# These are the build targets of the above $(BSDMAKE) calls, which just builds
+# These are the build targets of the above ${MYBSDMAKE} calls, which just builds
# using the standard rules, but with the additional flags.
VARIANTOBJS+= ${${_v}SRCS:N*.h:R:S/$/-${_v}.o/g:S,^,${_v}/,g}
build_${_v}: ${${_v}SRCS:N*.h:R:S/$/-${_v}.o/g}
-build_${_v}_D: ${${_v}SRCS:N*.h:R:S/$/-${_v}.do/g} ${${_v}SRCS2:N*.h:R:S/$/-${_v}.do/g}
-build_${_v}_P: ${${_v}SRCS:N*.h:R:S/$/-${_v}.po/g} ${${_v}SRCS2:N*.h:R:S/$/-${_v}.po/g}
-build_${_v}_S: ${${_v}SRCS:N*.h:R:S/$/-${_v}.So/g} ${${_v}SRCS2:N*.h:R:S/$/-${_v}.So/g}
+build_${_v}_D: ${${_v}SRCS:N*.h:R:S/$/-${_v}.do/g}
+build_${_v}_P: ${${_v}SRCS:N*.h:R:S/$/-${_v}.po/g}
+build_${_v}_S: ${${_v}SRCS:N*.h:R:S/$/-${_v}.So/g}
.endfor
# These variables, plus VARIANTOBJS above, are the extra binaries that get
# added to the .a files. Note that these objects are in subdirectories
VARIANTDOBJS+= ${VARIANTOBJS:.o=.do}
VARIANTPOBJS+= ${VARIANTOBJS:.o=.po}
VARIANTSOBJS+= ${VARIANTOBJS:.o=.So}
+
+# Now for the DYLD variants
+.for _v in ${VARIANTCOMBOS:M*DYLD*}
+# Variant suffix rules so the filenames are unique and Libsystem won't complain
+# because of ambiguities with -sectorder
+.SUFFIXES: -${_v}.o -${_v}.do -${_v}.po -${_v}.So
+.o-${_v}.o .do-${_v}.do .po-${_v}.po .So-${_v}.So:
+ ${MV} ${.IMPSRC} ${.TARGET}
+
+# These are the build targets of the above ${MYBSDMAKE} calls, which just builds
+# using the standard rules, but with the additional flags.
+DYLDOBJS+= ${${_v}SRCS:N*.h:R:S/$/-${_v}.So/g:S,^,${_v}/,g}
+build_${_v}:
+build_${_v}_D:
+build_${_v}_P:
+build_${_v}_S: ${${_v}SRCS:N*.h:R:S/$/-${_v}.So/g}
+.endfor
#### FreeBSD Rules ##################################################
PREFBSDFLAGS= -I${.CURDIR}/fbsdcompat
FBSDFLAGS= -include _fbsd_compat_.h
-fbsd.c.o:
- ${CC} -static \
+ ${MYCC} -static \
${PREFBSDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${FBSDFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
-fbsd.c.po:
- ${CC} -pg -DPROFILE \
+ ${MYCC} -pg -DPROFILE \
${PREFBSDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${FBSDFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
-fbsd.c.So:
- ${CC} \
+ ${MYCC} \
${PREFBSDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${FBSDFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
-fbsd.c.do:
- ${CC} -DDEBUG \
+ ${MYCC} -DDEBUG \
${PREFBSDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${FBSDFLAGS} ${LIBCFLAGS} \
NBSDFLAGS= -include _nbsd_compat_.h
-nbsd.c.o:
- ${CC} -static \
+ ${MYCC} -static \
${PRENBSDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${NBSDFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
-nbsd.c.po:
- ${CC} -pg -DPROFILE \
+ ${MYCC} -pg -DPROFILE \
${PRENBSDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${NBSDFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
-nbsd.c.So:
- ${CC} \
+ ${MYCC} \
${PRENBSDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${NBSDFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
-nbsd.c.do:
- ${CC} -DDEBUG \
+ ${MYCC} -DDEBUG \
${PRENBSDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${NBSDFLAGS} ${LIBCFLAGS} \
OBSDFLAGS=
-obsd.c.o:
- ${CC} -static \
+ ${MYCC} -static \
${PREOBSDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${OBSDFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
-obsd.c.po:
- ${CC} -pg -DPROFILE \
+ ${MYCC} -pg -DPROFILE \
${PREOBSDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${OBSDFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
-obsd.c.So:
- ${CC} \
+ ${MYCC} \
${PREOBSDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${OBSDFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
-obsd.c.do:
- ${CC} -DDEBUG \
+ ${MYCC} -DDEBUG \
${PREOBSDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${OBSDFLAGS} ${LIBCFLAGS} \
UUIDFLAGS= -include uuid-config.h
-uuid.c.o:
- ${CC} -static \
+ ${MYCC} -static \
${PREUUIDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${UUIDFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
-uuid.c.po:
- ${CC} -pg -DPROFILE \
+ ${MYCC} -pg -DPROFILE \
${PREUUIDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${UUIDFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
-uuid.c.So:
- ${CC} \
+ ${MYCC} \
${PREUUIDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${UUIDFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
-uuid.c.do:
- ${CC} -DDEBUG \
+ ${MYCC} -DDEBUG \
${PREUUIDFLAGS} ${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${UUIDFLAGS} ${LIBCFLAGS} \
-c ${.IMPSRC} -o ${.TARGET}
.3-uuid.in.3:
- sed -f ${SRCROOT}/uuid/uuidman.sed ${.IMPSRC} > ${.TARGET}
+ ${SED} -f ${SRCROOT}/uuid/uuidman.sed ${.IMPSRC} > ${.TARGET}
#### Standard C Rules #################################################
# If you change any of these standard rule, make corresponding changes
# to the _STANDARD_* macros in Makefile.inc
#######################################################################
.c.o User.cUser.o Server.cServer.o:
- ${CC} -static \
+ ${MYCC} -static \
${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
.c.po User.cUser.po Server.cServer.po:
- ${CC} -pg -DPROFILE \
+ ${MYCC} -pg -DPROFILE \
${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
.c.So User.cUser.So Server.cServer.So:
- ${CC} \
+ ${MYCC} \
${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${LIBCFLAGS} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
.c.do User.cUser.do Server.cServer.do:
- ${CC} -DDEBUG \
+ ${MYCC} -DDEBUG \
${PRECFLAGS} ${PRECFLAGS-${.IMPSRC:T}} \
${CFLAGS} ${CFLAGS-${.IMPSRC:T}} \
${VARIANTCFLAGS} ${LIBCFLAGS} \
#### Standard Assembler Rules #########################################
.s.o .S.o:
- ${CC} -static -x assembler-with-cpp \
+ ${MYCC} -static -x assembler-with-cpp \
${PRECFLAGS:M-[BIDFU]*} ${PRECFLAGS-${.IMPSRC:T}:M-[BIDFU]*} \
${CFLAGS:M-[BIDFU]*} ${CFLAGS-${.IMPSRC:T}:M-[BIDFU]*} ${AINC} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
.s.po .S.po:
- ${CC} -pg -x assembler-with-cpp -DPROFILE \
+ ${MYCC} -pg -x assembler-with-cpp -DPROFILE \
${PRECFLAGS:M-[BIDFU]*} ${PRECFLAGS-${.IMPSRC:T}:M-[BIDFU]*} \
${CFLAGS:M-[BIDFU]*} ${CFLAGS-${.IMPSRC:T}:M-[BIDFU]*} ${AINC} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
.s.So .S.So:
- ${CC} -x assembler-with-cpp \
+ ${MYCC} -x assembler-with-cpp \
${PRECFLAGS:M-[BIDFU]*} ${PRECFLAGS-${.IMPSRC:T}:M-[BIDFU]*} \
${CFLAGS:M-[BIDFU]*} ${CFLAGS-${.IMPSRC:T}:M-[BIDFU]*} ${AINC} \
-Os ${OPTIMIZE-${.IMPSRC:T}} -c ${.IMPSRC} -o ${.TARGET}
.s.do .S.do:
- ${CC} -x assembler-with-cpp -DDEBUG \
+ ${MYCC} -x assembler-with-cpp -DDEBUG \
${PRECFLAGS:M-[BIDFU]*} ${PRECFLAGS-${.IMPSRC:T}:M-[BIDFU]*} \
${CFLAGS:M-[BIDFU]*} ${CFLAGS-${.IMPSRC:T}:M-[BIDFU]*} ${AINC} \
-c ${.IMPSRC} -o ${.TARGET}
#### mig Rules ########################################################
.defs.h .defsUser.c .defsServer.c:
- mig -arch ${CCARCH} -cc ${CC} -user ${.PREFIX}User.c -server ${.PREFIX}Server.c -header ${.PREFIX}.h ${.IMPSRC}
+ ${MIG} -arch ${CCARCH} -cc ${MYCC} -user ${.PREFIX}User.c -server ${.PREFIX}Server.c -header ${.PREFIX}.h ${.IMPSRC}
gen_mig_defs: ${SRVMIGHDRS} ${MIGHDRS}
gen_md_mig_defs: ${MD_MIGHDRS}
#### Library Rules ####################################################
-lib${LIB}_static.a:: ${SYMROOTINC}/${MACHINE_ARCH}/libc-features.h ${VARIANTCOMBOS} ${OBJS} ${STATICOBJS}
+${VARIANTCOMBOS:N*DYLD*} ${OBJS} ${STATICOBJS}: ${SYMROOTINC}/${MACHINE_ARCH}/libc-features.h
+lib${LIB}_static.a:: ${VARIANTCOMBOS:N*DYLD*} ${OBJS} ${STATICOBJS}
@${ECHO} building static ${LIB} library
- @rm -f lib${LIB}_static.a
- @${AR} cq lib${LIB}_static.a `lorder ${OBJS} ${STATICOBJS} ${VARIANTOBJS} | tsort -q` ${ARADD}
+ @${RM} lib${LIB}_static.a
+ @${AR} cq lib${LIB}_static.a `${LORDER} ${OBJS} ${STATICOBJS} ${VARIANTOBJS} | ${TSORT} -q`
${RANLIB} lib${LIB}_static.a
-lib${LIB}_profile.a:: ${SYMROOTINC}/${MACHINE_ARCH}/libc-features.h ${VARIANTCOMBOS:S/$/_P/g} ${POBJS} ${POBJS2}
+${VARIANTCOMBOS:N*DYLD*:S/$/_P/g} ${POBJS}: ${SYMROOTINC}/${MACHINE_ARCH}/libc-features.h
+lib${LIB}_profile.a:: ${VARIANTCOMBOS:N*DYLD*:S/$/_P/g} ${POBJS}
@${ECHO} building profiled ${LIB} library
- @rm -f lib${LIB}_profile.a
- @${AR} cq lib${LIB}_profile.a `lorder ${POBJS} ${VARIANTPOBJS} | tsort -q` ${ARADD}
+ @${RM} lib${LIB}_profile.a
+ @${AR} cq lib${LIB}_profile.a `${LORDER} ${POBJS} ${VARIANTPOBJS} | ${TSORT} -q`
${RANLIB} lib${LIB}_profile.a
-lib${LIB}_debug.a:: ${SYMROOTINC}/${MACHINE_ARCH}/libc-features.h ${VARIANTCOMBOS:S/$/_D/g} ${DOBJS} ${DOBJS2}
+${VARIANTCOMBOS:N*DYLD*:S/$/_D/g} ${DOBJS}: ${SYMROOTINC}/${MACHINE_ARCH}/libc-features.h
+lib${LIB}_debug.a:: ${VARIANTCOMBOS:N*DYLD*:S/$/_D/g} ${DOBJS}
@${ECHO} building debug ${LIB} library
- @rm -f lib${LIB}_debug.a
- @${AR} cq lib${LIB}_debug.a `lorder ${DOBJS} ${VARIANTDOBJS} | tsort -q` ${ARADD}
+ @${RM} lib${LIB}_debug.a
+ @${AR} cq lib${LIB}_debug.a `${LORDER} ${DOBJS} ${VARIANTDOBJS} | ${TSORT} -q`
${RANLIB} lib${LIB}_debug.a
-lib${LIB}.a:: ${SYMROOTINC}/${MACHINE_ARCH}/libc-features.h ${VARIANTCOMBOS:S/$/_S/g} ${SOBJS} ${SOBJS2}
+${VARIANTCOMBOS:S/$/_S/g} ${SOBJS}: ${SYMROOTINC}/${MACHINE_ARCH}/libc-features.h
+lib${LIB}.a:: ${VARIANTCOMBOS:S/$/_S/g} ${SOBJS}
@${ECHO} building standard ${LIB} library
- @rm -f lib${LIB}.a
- @${AR} cq lib${LIB}.a `lorder ${SOBJS} ${VARIANTSOBJS} | tsort -q` ${ARADD}
+ @${RM} lib${LIB}.a
+ @${AR} cq lib${LIB}.a `${LORDER} ${SOBJS} ${VARIANTSOBJS} | ${TSORT} -q`
${RANLIB} lib${LIB}.a
+ @${ECHO} building custom ${LIB} library for dyld
+ @${RM} lib${LIB}-dyld.a
+ @${AR} cq lib${LIB}-dyld.a `${LORDER} ${DYLDOBJS} | ${TSORT} -q`
+ ${RANLIB} lib${LIB}-dyld.a
CLEANFILES += ${DOBJS} libc_static.a libc_profile.a libc_debug.a
PRIVHDRS = ${SYSTEMFRAMEWORK}/Versions/B/PrivateHeaders
KERNELFRAMEWORK = ${DESTDIR}/System/Library/Frameworks/Kernel.framework
PRIVKERNELHDRS = ${KERNELFRAMEWORK}/Versions/A/PrivateHeaders
-INSTALLMODE != test `id -u` -eq 0 && echo 444 || echo 644
+INSTALLMODE != ${TEST} `id -u` -eq 0 && ${ECHO} 444 || ${ECHO} 644
installhdrs-md: gen_md_mig_defs
installhdrs: modifyhdrs
copyhdrs: gen_mig_defs
- mkdir -p ${INCDIR}/arpa
- mkdir -p ${INCDIR}/libkern
- mkdir -p ${INCDIR}/machine
- mkdir -p ${INCDIR}/malloc
+ ${MKDIR} ${INCDIR}/arpa
+ ${MKDIR} ${INCDIR}/libkern
+ ${MKDIR} ${INCDIR}/malloc
.ifdef OBJC_INSTHDRS
- mkdir -p ${INCDIR}/objc
+ ${MKDIR} ${INCDIR}/objc
.endif
- mkdir -p ${INCDIR}/protocols
- mkdir -p ${INCDIR}/secure
- mkdir -p ${INCDIR}/sys
- mkdir -p ${INCDIR}/xlocale
- ${INSTALL} -c -m ${INSTALLMODE} ${INSTHDRS} ${INCDIR}
- ${INSTALL} -c -m ${INSTALLMODE} ${ARPA_INSTHDRS} ${INCDIR}/arpa
- ${INSTALL} -c -m ${INSTALLMODE} ${LIBKERN_INSTHDRS} ${INCDIR}/libkern
- ${INSTALL} -c -m ${INSTALLMODE} ${MACHINE_INSTHDRS} ${INCDIR}/machine
- ${INSTALL} -c -m ${INSTALLMODE} ${MALLOC_INSTHDRS} ${INCDIR}/malloc
+ ${MKDIR} ${INCDIR}/protocols
+ ${MKDIR} ${INCDIR}/secure
+ ${MKDIR} ${INCDIR}/sys
+ ${MKDIR} ${INCDIR}/xlocale
+ ${INSTALL} -m ${INSTALLMODE} ${INSTHDRS} ${INCDIR}
+ ${INSTALL} -m ${INSTALLMODE} ${ARPA_INSTHDRS} ${INCDIR}/arpa
+ ${INSTALL} -m ${INSTALLMODE} ${LIBKERN_INSTHDRS} ${INCDIR}/libkern
+ ${INSTALL} -m ${INSTALLMODE} ${MALLOC_INSTHDRS} ${INCDIR}/malloc
.ifdef OBJC_INSTHDRS
- ${INSTALL} -c -m ${INSTALLMODE} ${OBJC_INSTHDRS} ${INCDIR}/objc
+ ${INSTALL} -m ${INSTALLMODE} ${OBJC_INSTHDRS} ${INCDIR}/objc
.endif
- ${INSTALL} -c -m ${INSTALLMODE} ${PROTO_INSTHDRS} ${INCDIR}/protocols
- ${INSTALL} -c -m ${INSTALLMODE} ${SECURE_INSTHDRS} ${INCDIR}/secure
- ${INSTALL} -c -m ${INSTALLMODE} ${SYS_INSTHDRS} ${INCDIR}/sys
- ${INSTALL} -c -m ${INSTALLMODE} ${XLOCALE_INSTHDRS} ${INCDIR}/xlocale
- mkdir -p ${LOCINCDIR}
- ${INSTALL} -c -m ${INSTALLMODE} ${LOCALHDRS} ${LOCINCDIR}
- mkdir -p ${PRIVHDRS}/btree
- mkdir -p ${PRIVHDRS}/machine
- mkdir -p ${PRIVHDRS}/objc
- mkdir -p ${PRIVHDRS}/uuid
- mkdir -p ${PRIVHDRS}/sys
- mkdir -p ${PRIVKERNELHDRS}/uuid
- ${INSTALL} -c -m ${INSTALLMODE} ${PRIV_INSTHDRS} ${PRIVHDRS}
- ${INSTALL} -c -m ${INSTALLMODE} ${INSTBTREEPRIVHDRS_AUTOPATCH} ${PRIVHDRS}/btree
- ${INSTALL} -c -m ${INSTALLMODE} ${SRCROOT}/internat/NXCType.h ${PRIVHDRS}/objc
- mv ${DESTDIR}/usr/include/asm.h ${PRIVHDRS}/machine
- ${INSTALL} -c -m ${INSTALLMODE} ${SYS_INSTHDRS} ${PRIVHDRS}/sys
- ${INSTALL} -c -m ${INSTALLMODE} ${PRIVUUID_INSTHDRS} ${PRIVHDRS}/uuid
- ${INSTALL} -c -m ${INSTALLMODE} ${PRIVUUID_INSTHDRS} ${PRIVKERNELHDRS}/uuid
+ ${INSTALL} -m ${INSTALLMODE} ${PROTO_INSTHDRS} ${INCDIR}/protocols
+ ${INSTALL} -m ${INSTALLMODE} ${SECURE_INSTHDRS} ${INCDIR}/secure
+ ${INSTALL} -m ${INSTALLMODE} ${SYS_INSTHDRS} ${INCDIR}/sys
+ ${INSTALL} -m ${INSTALLMODE} ${XLOCALE_INSTHDRS} ${INCDIR}/xlocale
+ ${MKDIR} ${LOCINCDIR}
+ ${INSTALL} -m ${INSTALLMODE} ${LOCALHDRS} ${LOCINCDIR}
+ ${MKDIR} ${PRIVHDRS}/btree
+ ${MKDIR} ${PRIVHDRS}/machine
+ ${MKDIR} ${PRIVHDRS}/objc
+ ${MKDIR} ${PRIVHDRS}/uuid
+ ${MKDIR} ${PRIVHDRS}/sys
+ ${MKDIR} ${PRIVKERNELHDRS}/uuid
+ ${INSTALL} -m ${INSTALLMODE} ${PRIV_INSTHDRS} ${PRIVHDRS}
+ ${INSTALL} -m ${INSTALLMODE} ${INSTBTREEPRIVHDRS_AUTOPATCH} ${PRIVHDRS}/btree
+ ${INSTALL} -m ${INSTALLMODE} ${SRCROOT}/internat/NXCType.h ${PRIVHDRS}/objc
+ ${MV} ${DESTDIR}/usr/include/asm.h ${PRIVHDRS}/machine
+ ${INSTALL} -m ${INSTALLMODE} ${SYS_INSTHDRS} ${PRIVHDRS}/sys
+ ${INSTALL} -m ${INSTALLMODE} ${PRIVUUID_INSTHDRS} ${PRIVHDRS}/uuid
+ ${INSTALL} -m ${INSTALLMODE} ${PRIVUUID_INSTHDRS} ${PRIVKERNELHDRS}/uuid
modifyhdrs: copyhdrs
- @for i in `find '${DESTDIR}' -name \*.h -print0 | xargs -0 grep -l '^//Begin-Libc'`; do \
- chmod u+w $$i && \
- echo ed - $$i \< ${.CURDIR}/strip-header.ed && \
- ed - $$i < ${.CURDIR}/strip-header.ed && \
- chmod u-w $$i || exit 1; \
+ @for i in `${FIND} '${DESTDIR}' -name \*.h -print0 | ${XARGS} -0 ${GREP} -l '^//Begin-Libc'`; do \
+ ${CHMOD} u+w $$i && \
+ ${ECHO} ${ED} - $$i \< ${.CURDIR}/strip-header.ed && \
+ ${ED} - $$i < ${.CURDIR}/strip-header.ed && \
+ ${CHMOD} u-w $$i || exit 1; \
done
- @for i in `find '${DESTDIR}' -name \*.h -print0 | xargs -0 fgrep -l UNIFDEF`; do \
- chmod u+w $$i && \
- cp $$i $$i.orig && \
- echo unifdef ${UNIFDEFARGS} $$i.orig \> $$i && \
- { unifdef ${UNIFDEFARGS} $$i.orig > $$i || [ $$? -ne 2 ]; } && \
- rm -f $$i.orig && \
- chmod u-w $$i || exit 1; \
+ @for i in `${FIND} '${DESTDIR}' -name \*.h -print0 | ${XARGS} -0 ${FGREP} -l UNIFDEF`; do \
+ ${CHMOD} u+w $$i && \
+ ${CP} $$i $$i.orig && \
+ ${ECHO} ${UNIFDEF} ${UNIFDEFARGS} $$i.orig \> $$i && \
+ { ${UNIFDEF} ${UNIFDEFARGS} $$i.orig > $$i || [ $$? -ne 2 ]; } && \
+ ${RM} $$i.orig && \
+ ${CHMOD} u-w $$i || exit 1; \
done
install_lib${LIB}_static.a:
- ${INSTALL} -c -m ${INSTALLMODE} lib${LIB}_static.a ${DESTDIR}/usr/local/lib/system/
+ ${INSTALL} -m ${INSTALLMODE} lib${LIB}_static.a ${DESTDIR}/usr/local/lib/system/
install_lib${LIB}_profile.a:
- ${INSTALL} -c -m ${INSTALLMODE} lib${LIB}_profile.a ${DESTDIR}/usr/local/lib/system
+ ${INSTALL} -m ${INSTALLMODE} lib${LIB}_profile.a ${DESTDIR}/usr/local/lib/system
install_lib${LIB}_debug.a:
- ${INSTALL} -c -m ${INSTALLMODE} lib${LIB}_debug.a ${DESTDIR}/usr/local/lib/system/
+ ${INSTALL} -m ${INSTALLMODE} lib${LIB}_debug.a ${DESTDIR}/usr/local/lib/system/
install_lib${LIB}.a:
- ${INSTALL} -c -m ${INSTALLMODE} lib${LIB}.a ${DESTDIR}/usr/local/lib/system/
+ ${INSTALL} -m ${INSTALLMODE} lib${LIB}.a ${DESTDIR}/usr/local/lib/system/
autopatch: ${AUTOPATCHHDRS} ${AUTOPATCHSRCS} ${AUTOPATCHMAN}
copyfiles:
-.if !empty $(COPYFILES)
- $(INSTALL) -c -m 444 ${COPYFILES} ${DESTDIR}/usr/local/lib/system
+.if !empty(COPYFILES)
+ ${INSTALL} -m ${INSTALLMODE} ${COPYFILES} ${DESTDIR}/usr/local/lib/system
.endif
-.if !empty $(MDCOPYFILES)
- $(INSTALL) -c -m 444 ${MDCOPYFILES} ${DESTDIR}/usr/local/lib/system
+.if !empty(MDCOPYFILES)
+ ${INSTALL} -m ${INSTALLMODE} ${MDCOPYFILES} ${DESTDIR}/usr/local/lib/system
.endif
clean:
- rm -f ${OBJS} ${POBJS} ${DOBJS} ${SOBJS} ${CLEANFILES}
- rm -f lib${LIB}.a lib${LIB}_static.a lib${LIB}_profile.a \
+ ${RM} ${OBJS} ${POBJS} ${DOBJS} ${SOBJS} ${CLEANFILES}
+ ${RM} lib${LIB}.a lib${LIB}_static.a lib${LIB}_profile.a \
lib${LIB}_debug.a
FEATURE_PATCH_3333969 = 1
# Patch 3375657
-FEATURE_PATCH_3375657 = 1
+#FEATURE_PATCH_3375657 = 1
# Patch 3417676
FEATURE_PATCH_3417676 = 1
_sem_wait ___sem_wait_nocancel
_sem_wait$NOCANCEL$UNIX2003 ___sem_wait_nocancel
_sem_wait$UNIX2003 ___sem_wait
+_semctl$UNIX2003 ___semctl
_sendmsg$NOCANCEL$UNIX2003 ___sendmsg_nocancel
_sendmsg$UNIX2003 ___sendmsg
_sendto$NOCANCEL$UNIX2003 ___sendto_nocancel
_sem_wait ___sem_wait_nocancel
_sem_wait$NOCANCEL$UNIX2003 ___sem_wait_nocancel
_sem_wait$UNIX2003 ___sem_wait
+_semctl$UNIX2003 ___semctl
_sendmsg$NOCANCEL$UNIX2003 ___sendmsg_nocancel
_sendmsg$UNIX2003 ___sendmsg
_sendto$NOCANCEL$UNIX2003 ___sendto_nocancel
_sem_wait ___sem_wait_nocancel
_sem_wait$NOCANCEL$UNIX2003 ___sem_wait_nocancel
_sem_wait$UNIX2003 ___sem_wait
+_semctl$UNIX2003 ___semctl
_sendmsg$NOCANCEL$UNIX2003 ___sendmsg_nocancel
_sendmsg$UNIX2003 ___sendmsg
_sendto$NOCANCEL$UNIX2003 ___sendto_nocancel
.if defined(CCARCH) && ${CCARCH} == armv6
CFLAGS += -mthumb
.endif
+
+_arm_arch_6 != ${MYCC} -E -dD ${CFLAGS:M-[BIDFU]*} ${AINC} -include arm/arch.h -x c /dev/null | grep -q -w _ARM_ARCH_6 || echo NO
+.if ${_arm_arch_6} != NO
+FEATURE_ARM_ARCH_6 = 1
+.endif
.text
.align 2
-#include "../sys/SYS.h"
+#include <mach/arm/syscall_sw.h>
/* void sys_icache_invalidate(addr_t start, int length) */
.globl _sys_icache_invalidate
+++ /dev/null
-.PATH: ${.CURDIR}/arm/mach
-MDSRCS += mach_absolute_time.s
+++ /dev/null
-/*
- * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-#include <stdint.h>
-
-int mach_absolute_time(void)
-{
- static uint32_t abs_time = 0;
- return abs_time++;
-}
+++ /dev/null
-/*
- * Copyright (c) 2002 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
- .text
- .align 2
- .globl _mach_absolute_time
-_mach_absolute_time:
- mov r12, #-3
- swi 0x80
- bx lr
* @APPLE_LICENSE_HEADER_END@
*/
#include "pthread_machdep.h"
+#include <arm/arch.h>
#define __APPLE_API_PRIVATE
#include <machine/cpu_capabilities.h>
.align 4
.globl _pthread_getspecific
_pthread_getspecific:
+#ifdef _ARM_ARCH_6
+ mrc p15, 0, r1, c13, c0, 3
+ add r0, r1, r0, lsl #2
+#else
add r0, r9, r0, lsl #2
+#endif
ldr r0, [r0, #_PTHREAD_TSD_OFFSET]
bx lr
*
* @APPLE_LICENSE_HEADER_END@
*/
+#include <arm/arch.h>
+
#define __APPLE_API_PRIVATE
#include <machine/cpu_capabilities.h>
#undef __APPLE_API_PRIVATE
.align 2
.globl _pthread_self
_pthread_self:
+#ifdef _ARM_ARCH_6
+ mrc p15, 0, r0, c13, c0, 3
+#else
mov r0, r9
+#endif
bx lr
*
* @APPLE_LICENSE_HEADER_END@
*/
+
+#include <arm/arch.h>
+#include <mach/arm/syscall_sw.h>
+
.text
.align 2
.globl ___pthread_set_self
___pthread_set_self:
+#ifndef _ARM_ARCH_6
mov r9, r0
+#endif
+ /* fast trap for thread_set_cthread */
+ mov r3, #2
+ mov r12, #0x80000000
+ swi #SWI_SYSCALL
bx lr
strcmp.s \
strlen.s
+.if defined(FEATURE_ARM_ARCH_6)
+MDSRCS += memset_pattern.s
+.endif
+
SUPPRESSSRCS += memcpy.c memmove.c memset.c strlen.c
*/
#include <mach/machine/asm.h>
-
+#include <architecture/arm/asm_help.h>
+
/*
* A reasonably well-optimized bzero/memset. Should work equally well on arm11 and arm9 based
* cores.
bge L_64ormorealigned
b L_lessthan64aligned
+X_LEAF(___bzero, _bzero)
--- /dev/null
+/*
+ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <arm/arch.h>
+
+#if defined(_ARM_ARCH_6)
+
+#include <mach/machine/asm.h>
+
+/*
+ * This file contains the following functions:
+ *
+ * void memset_pattern4(void *b, const void *c4, size_t len)
+ * void memset_pattern8(void *b, const void *c8, size_t len)
+ * void memset_pattern16(void *b, const void *c16, size_t len)
+ *
+ * The memset() is implemented in the bzero.s file.
+ *
+ * This is a reasonably well optimized version of memset_pattern* routines
+ * implemented for the ARM9 and ARM11 processors using the ARMv6 instruction
+ * set. These routines use the ARM's core registers.
+ *
+ * The algorithm is to align the destination pointer on a 16 byte boundary
+ * and then blast data 64 bytes at a time, in two stores of 32 bytes per loop.
+ *
+ */
+ .text
+ .align 2
+ .syntax unified
+
+/*----------------------------------------------------------------------------*/
+/* void memset_pattern4(void *ptr, const void *pattern4, size_t len); */
+/* */
+/* r0 << destination pointer */
+/* r1 << pointer to 4-byte pattern */
+/* r2 << 'len' (length of destination buffer in bytes) */
+/*----------------------------------------------------------------------------*/
+ .globl _memset_pattern4
+_memset_pattern4:
+ cmp r2, #0 /* check if len is zero */
+ bxeq lr /* return if length is zero */
+
+ /* We need some registers, so save volatiles on stack */
+ /* Avoid r7 (frame pointer) and r9 (thread register) */
+ stmfd sp!, {r4-r7, lr}
+ add r7, sp, #12 /* establish frame */
+ stmfd sp!, {r8, r10-r11}
+
+ /* copy destination base pointer r0 to r12 and leave r0 alone */
+ /* so that we return original pointer back to the caller */
+ mov r12, r0
+
+ /* Check if 'len' is long enough to bother alignment of destination */
+ /* pointer */
+ cmp r2, #32 /* long enough to bother aligning? */
+ movlt r3, #4 /* move pattern length into r3 */
+ movlt r10, #4 /* pattern index */
+ movlt r11, r1 /* move pattern pointer into r11 */
+ blt L_Short /* no */
+
+ /* move 'len' into r1, get 4-byte pattern in r2 */
+ mov r6, r2 /* temporarily move 'len' in to r6 */
+ ldr r2, [r1]/* load 4-byte pattern into r2 */
+ mov r1, r6 /* move 'len' from r6 to r1 */
+
+ mov r3, r2 /* copy 4-byte pattern into r3, r4 and r5 registers */
+ mov r4, r2
+ mov r5, r2
+
+L_NotShort:
+
+ /* Check for 16 or 32 byte aligned destination pointer */
+ tst r12, #0x1F /* check for 32 byte aligned */
+ beq L_Aligned
+ tst r12, #0xF /* check for 16 byte aligned */
+ beq L_16ByteAligned
+ b L_Unaligned /* yes */
+
+L_Bytewise:
+ ldrb r4, [r11], #1
+ strb r4, [r12], #1
+ subs r10, #1
+ moveq r10, r3
+ moveq r11, r1
+ sub r2, #1
+
+L_Short:
+ cmp r2, #0 /* more bytes left? */
+ bne L_Bytewise
+ ldm sp!, {r8, r10-r11} /* restores registers from stack */
+ ldm sp!, {r4-r7, pc} /* restore & return from subroutine */
+
+/* 'len' is long enough to justify aligning the destination pointer */
+/* */
+/* By the time we reach here, data is stored in registers as follows: */
+/* r1 << 'len' (length of destination buffer in bytes) */
+/* r2-r5 << pattern; either 4x4byte OR 2x8byte OR 1x16-byte */
+/* r12 << destination pointer copy (scratch register) */
+/* r0 << destination pointer original */
+/* */
+/* Use r11 as scratch register to store the #bytes offset to 16-byte align */
+/* */
+/* Unaligned on 32-byte boundary, store 1-15 bytes until 16-byte aligned */
+/* As we store these bytes, we rotate the pattern stored in r2-r5 to reflect */
+/* the alignment. */
+
+L_Unaligned:
+ mov r11, r12, lsl #28
+ rsb r11, r11, #0
+ msr cpsr_f, r11 /* Bits[31:28] of cpsr now contain #bytes to align*/
+
+L_Store15BytesAndRotatePattern:
+ strbvs r2, [r12], #1 /* v is set, unaligned in the 1s column */
+ andvs r6, r2, #0xFF /* Rotate pattern right in r2-r5 by 1-byte */
+ andvs r8, r3, #0xFF /* Consider register r2-r5 and a contiguous */
+ andvs r10, r4, #0xFF /* 16-byte register with r2 containing LSB */
+ andvs r11, r5, #0xFF /* and r5 containing MSB */
+ lsrvs r2, r2, #8
+ lsrvs r3, r3, #8
+ lsrvs r4, r4, #8
+ lsrvs r5, r5, #8
+ orrvs r2, r2, r8, lsl #24
+ orrvs r3, r3, r10, lsl #24
+ orrvs r4, r4, r11, lsl #24
+ orrvs r5, r5, r6, lsl #24
+
+ strhcs r2, [r12], #2 /* c is set, unaligned in the 2s column */
+ movcs r6, r2, lsl #16 /* Rotate pattern right in r2-r5 by 2-bytes */
+ movcs r8, r3, lsl #16
+ movcs r10, r4, lsl #16
+ movcs r11, r5, lsl #16
+ lsrcs r2, r2, #16
+ lsrcs r3, r3, #16
+ lsrcs r4, r4, #16
+ lsrcs r5, r5, #16
+ orrcs r2, r2, r8
+ orrcs r3, r3, r10
+ orrcs r4, r4, r11
+ orrcs r5, r5, r6
+
+ streq r2, [r12], #4 /* z is set, unaligned in the 4s column */
+ moveq r6, r2 /* Rotate pattern right in r2-r5 by 4-bytes */
+ moveq r2, r3
+ moveq r3, r4
+ moveq r4, r5
+ moveq r5, r6
+
+ stmmi r12!, {r2-r3} /* n is set, unaligned in the 8s column */
+ movmi r6, r2 /* Rotate pattern right in r2-r5 by 4-bytes */
+ movmi r8, r3
+ movmi r2, r4
+ movmi r3, r5
+ movmi r4, r6
+ movmi r5, r8
+
+ mrs r11, cpsr /*copy cpsr in to r11 */
+ subs r1, r1, r11, lsr #28
+ ldmeq sp!, {r8, r10-r11} /* restores registers from stack */
+ ldmeq sp!, {r4-r7, pc} /* restore & return from subroutine */
+
+/* By the time we reach here, we are 16-byte aligned and r2-r5 contains */
+/* rotated pattern. Now lets make sure we are 32-byte aligned. */
+L_16ByteAligned:
+ tst r12, #(1 << 4)
+ stmne r12!, {r2-r5}
+ subsne r1, r1, #16
+
+/* By the time we reach here, data is stored in registers as follows: */
+/* r1 << 'len' (remaining length of destination buffer in bytes) */
+/* r2-r5 << rotated pattern; either 4x4byte OR 2x8byte OR 1x16-byte */
+/* r12 << aligned destination pointer copy (scratch register) */
+L_Aligned:
+ cmp r1, #64
+ blt L_AlignedLessThan64
+
+/* Copy pattern in four more registers so that we can do 64 byte transfers */
+ mov r6, r2
+ mov r8, r3
+ mov r10, r4
+ mov r11, r5
+
+/* At this point, we are 16-byte aligned and 'len' is greater than 64 bytes */
+/* Lets transfer 64 bytes at a time until len becomes less than 64 bytes */
+ sub r1, r1, #64 /* pre-subtract to avoid extra compare in loop */
+L_Loop64:
+ stm r12!, {r2-r6, r8, r10-r11}
+ subs r1, r1, #64
+ stm r12!, {r2-r6, r8, r10-r11}
+ bge L_Loop64
+
+ /* return if 'len' is zero */
+ adds r1, r1, #64 /* readjust length; previously subtracted extra 64*/
+ ldmeq sp!, {r8, r10-r11} /* restores registers from stack */
+ ldmeq sp!, {r4-r7, pc} /* restore & return from subroutine */
+
+L_AlignedLessThan64:
+ /* do we have 16 or more bytes left */
+ cmp r1, #16
+ stmge r12!, {r2-r5}
+ subsge r1, r1, #16
+ bgt L_AlignedLessThan64
+ ldmeq sp!, {r8, r10-r11} /* restores registers from stack */
+ ldmeq sp!, {r4-r7, pc} /* restore & return from subroutine */
+
+L_AlignedLessThan16:
+ /* store last up-to 15 bytes */
+ /* move the remaining len bits [3:0] to the flags area of cpsr */
+ mov r1, r1, lsl #28
+ msr cpsr_f, r1
+
+ stmmi r12!, {r2-r3} /* n is set, store 8 bytes */
+ movmi r2, r4 /* shift vector down 8 bytes */
+ movmi r3, r5
+
+ streq r2, [r12], #4 /* z is set, store 4 bytes */
+ moveq r2, r3 /* shift vector down 4 bytes */
+
+ strhcs r2, [r12], #2 /* c is set, store 2 bytes */
+ lsrcs r2, #16 /* shift register right 2 bytes */
+
+ strbvs r2, [r12], #1 /* v is set, store 1 byte */
+ ldm sp!, {r8, r10-r11} /* restores registers from stack */
+ ldm sp!, {r4-r7, pc} /* restore & return from subroutine */
+
+/*----------------------------------------------------------------------------*/
+/* void memset_pattern8(void *ptr, const void *pattern8, size_t len); */
+/* */
+/* r0 << destination pointer */
+/* r1 << pointer to 8-byte pattern */
+/* r2 << 'len' (length of destination buffer in bytes) */
+/*----------------------------------------------------------------------------*/
+ .globl _memset_pattern8
+_memset_pattern8:
+ cmp r2, #0 /* check if len is zero */
+ bxeq lr /* return if length is zero */
+
+ /* We need some registers, so save volatiles on stack */
+ /* Avoid r7 (frame pointer) and r9 (thread register) */
+ stmfd sp!, {r4-r7, lr}
+ add r7, sp, #12 /* establish frame */
+ stmfd sp!, {r8, r10-r11}
+
+ /* copy destination base pointer r0 to r12 and leave r0 alone */
+ /* so that we return original pointer back to the caller */
+ mov r12, r0
+
+ /* Check if 'len' is long enough to bother alignment of destination */
+ /* pointer */
+ cmp r2, #32 /* long enough to bother aligning? */
+ movlt r3, #8 /* move pattern length into r3 */
+ movlt r10, #8 /* pattern index */
+ movlt r11, r1 /* move pattern pointer into r11 */
+ blt L_Short /* no */
+
+ /* move 'len' into r1, get 8-byte pattern in r2-r3 */
+ mov r6, r2 /* temporarily move 'len' in to r6 */
+ ldr r2, [r1], #4 /* load 8-byte pattern into r2-r3 */
+ ldr r3, [r1], #4
+ mov r1, r6 /* move 'len' from r6 to r1 */
+
+ mov r4, r2 /* copy 8-byte pattern into r4-r5 registers */
+ mov r5, r3
+ b L_NotShort /* yes */
+
+
+/*----------------------------------------------------------------------------*/
+/* void memset_pattern16(void *ptr, const void *pattern16, size_t len); */
+/* */
+/* r0 << destination pointer */
+/* r1 << pointer to 16-byte pattern */
+/* r2 << 'len' (length of destination buffer in bytes) */
+/*----------------------------------------------------------------------------*/
+ .globl _memset_pattern16
+_memset_pattern16:
+ cmp r2, #0 /* check if len is zero */
+ bxeq lr /* return if length is zero */
+
+ /* We need some registers, so save volatiles on stack */
+ /* Avoid r7 (frame pointer) and r9 (thread register) */
+ stmfd sp!, {r4-r7, lr}
+ add r7, sp, #12 /* establish frame */
+ stmfd sp!, {r8, r10-r11}
+
+ /* copy destination base pointer r0 to r12 and leave r0 alone */
+ /* so that we return original pointer back to the caller */
+ mov r12, r0
+
+ /* Check if 'len' is long enough to bother alignment of destination */
+ /* pointer */
+ cmp r2, #32 /* long enough to bother aligning? */
+ movlt r3, #16 /* move pattern length into r3 */
+ movlt r10, #16 /* pattern index */
+ movlt r11, r1 /* move pattern pointer into r11 */
+ blt L_Short /* no */
+
+ /* move 'len' into r1, get 16-byte pattern in r2-r5 */
+ mov r6, r2 /* temporarily move 'len' in to r6 */
+ ldr r2, [r1], #4 /* load 16-byte pattern into r2-r5 */
+ ldr r3, [r1], #4
+ ldr r4, [r1], #4
+ ldr r5, [r1], #4
+ mov r1, r6 /* move 'len' from r6 to r1 */
+
+ b L_NotShort /* yes */
+
+
+#endif /* _ARM_ARCH_6 */
.PATH: ${.CURDIR}/arm/sys
MDSRCS+= \
- OSAtomic-v4.c \
OSAtomic.s \
+ gcc_atomic.c \
_longjmp.s \
_setjmp.s \
arm_commpage_gettimeofday.c \
longjmp.s \
setjmp.s
+.if !defined(FEATURE_ARM_ARCH_6)
+MDSRCS+= OSAtomic-v4.c
+.endif
+
MDCOPYFILES+= ${.CURDIR}/Platforms/${RC_TARGET_CONFIG}/arm/libc.syscall.arm
/*
- * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
static OSSpinLock _atomic_lock = OS_SPINLOCK_INIT;
-int32_t OSAtomicAdd32( int32_t theAmount, int32_t *theValue )
+int32_t OSAtomicAdd32( int32_t theAmount, volatile int32_t *theValue )
{
int32_t result;
return result;
}
-int32_t OSAtomicAdd32Barrier( int32_t theAmount, int32_t *theValue )
+int32_t OSAtomicAdd32Barrier( int32_t theAmount, volatile int32_t *theValue )
{
return OSAtomicAdd32(theAmount, theValue);
}
-int32_t OSAtomicOr32( uint32_t theMask, uint32_t *theValue )
+int64_t OSAtomicAdd64( int64_t theAmount, volatile int64_t *theValue )
+{
+ int64_t result;
+
+ OSSpinLockLock(&_atomic_lock);
+ result = (*theValue += theAmount);
+ OSSpinLockUnlock(&_atomic_lock);
+
+ return result;
+}
+
+int64_t OSAtomicAdd64Barrier( int64_t theAmount, volatile int64_t *theValue )
+{
+ return OSAtomicAdd64(theAmount, theValue);
+}
+
+int32_t OSAtomicOr32( uint32_t theMask, volatile uint32_t *theValue )
{
int32_t result;
return result;
}
-int32_t OSAtomicOr32Barrier( uint32_t theMask, uint32_t *theValue )
+int32_t OSAtomicOr32Barrier( uint32_t theMask, volatile uint32_t *theValue )
{
return OSAtomicOr32(theMask, theValue);
}
-int32_t OSAtomicAnd32( uint32_t theMask, uint32_t *theValue )
+int32_t OSAtomicOr32Orig( uint32_t theMask, volatile uint32_t *theValue )
+{
+ int32_t result;
+
+ OSSpinLockLock(&_atomic_lock);
+ result = *theValue;
+ *theValue |= theMask;
+ OSSpinLockUnlock(&_atomic_lock);
+
+ return result;
+}
+
+int32_t OSAtomicOr32OrigBarrier( uint32_t theMask, volatile uint32_t *theValue )
+{
+ return OSAtomicOr32Orig(theMask, theValue);
+}
+
+int32_t OSAtomicAnd32( uint32_t theMask, volatile uint32_t *theValue )
{
int32_t result;
return result;
}
-int32_t OSAtomicAnd32Barrier( uint32_t theMask, uint32_t *theValue )
+int32_t OSAtomicAnd32Barrier( uint32_t theMask, volatile uint32_t *theValue )
{
return OSAtomicAnd32(theMask, theValue);
}
-int32_t OSAtomicXor32( uint32_t theMask, uint32_t *theValue )
+int32_t OSAtomicAnd32Orig( uint32_t theMask, volatile uint32_t *theValue )
+{
+ int32_t result;
+
+ OSSpinLockLock(&_atomic_lock);
+ result = *theValue;
+ *theValue &= theMask;
+ OSSpinLockUnlock(&_atomic_lock);
+
+ return result;
+}
+
+int32_t OSAtomicAnd32OrigBarrier( uint32_t theMask, volatile uint32_t *theValue )
+{
+ return OSAtomicAnd32Orig(theMask, theValue);
+}
+
+int32_t OSAtomicXor32( uint32_t theMask, volatile uint32_t *theValue )
{
int32_t result;
return result;
}
-int32_t OSAtomicXor32Barrier( uint32_t theMask, uint32_t *theValue )
+int32_t OSAtomicXor32Barrier( uint32_t theMask, volatile uint32_t *theValue )
{
return OSAtomicXor32(theMask, theValue);
}
-bool OSAtomicCompareAndSwap32( int32_t oldValue, int32_t newValue, int32_t *theValue )
+int32_t OSAtomicXor32Orig( uint32_t theMask, volatile uint32_t *theValue )
+{
+ int32_t result;
+
+ OSSpinLockLock(&_atomic_lock);
+ result = *theValue;
+ *theValue ^= theMask;
+ OSSpinLockUnlock(&_atomic_lock);
+
+ return result;
+}
+
+int32_t OSAtomicXor32OrigBarrier( uint32_t theMask, volatile uint32_t *theValue )
+{
+ return OSAtomicXor32Orig(theMask, theValue);
+}
+
+bool OSAtomicCompareAndSwap32( int32_t oldValue, int32_t newValue, volatile int32_t *theValue )
{
bool result;
return result;
}
-bool OSAtomicCompareAndSwap32Barrier( int32_t oldValue, int32_t newValue, int32_t *theValue )
+bool OSAtomicCompareAndSwap32Barrier( int32_t oldValue, int32_t newValue, volatile int32_t *theValue )
{
return OSAtomicCompareAndSwap32(oldValue, newValue, theValue);
}
-bool OSAtomicTestAndSet( uint32_t n, void *theAddress )
+bool OSAtomicCompareAndSwap64( int64_t oldValue, int64_t newValue, volatile int64_t *theValue )
+{
+ bool result;
+
+ OSSpinLockLock(&_atomic_lock);
+ result = (*theValue == oldValue);
+ if (result) *theValue = newValue;
+ OSSpinLockUnlock(&_atomic_lock);
+
+ return result;
+}
+
+bool OSAtomicCompareAndSwap64Barrier( int64_t oldValue, int64_t newValue, volatile int64_t *theValue )
+{
+ return OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
+}
+
+bool OSAtomicTestAndSet( uint32_t n, volatile void *theAddress )
{
char *byteAddress = ((char*)theAddress + (n>>3));
uint32_t byteBit = (0x80>>(n&7));
return result;
}
-bool OSAtomicTestAndSetBarrier( uint32_t n, void *theAddress )
+bool OSAtomicTestAndSetBarrier( uint32_t n, volatile void *theAddress )
{
return OSAtomicTestAndSet(n, theAddress);
}
-bool OSAtomicTestAndClear( uint32_t n, void *theAddress )
+bool OSAtomicTestAndClear( uint32_t n, volatile void *theAddress )
{
char *byteAddress = ((char*)theAddress + (n>>3));
uint32_t byteBit = (0x80>>(n&7));
return result;
}
-bool OSAtomicTestAndClearBarrier( uint32_t n, void *theAddress )
+bool OSAtomicTestAndClearBarrier( uint32_t n, volatile void *theAddress )
{
return OSAtomicTestAndClear(n, theAddress);
}
return;
}
+
+bool OSAtomicCompareAndSwapPtrBarrier( void *__oldValue, void *__newValue, void * volatile *__theValue )
+{
+ return OSAtomicCompareAndSwapPtr(__oldValue, __newValue, __theValue);
+}
+
+bool OSAtomicCompareAndSwapPtr( void *__oldValue, void *__newValue, void * volatile *__theValue )
+{
+ bool result;
+
+ OSSpinLockLock(&_atomic_lock);
+ result = (*__theValue == __oldValue);
+ if (result) *__theValue = __newValue;
+ OSSpinLockUnlock(&_atomic_lock);
+
+ return result;
+}
+
#endif /* !defined(_ARM_ARCH_6) */
*/
#include <machine/cpu_capabilities.h>
-#include "SYS.h"
+#include <architecture/arm/asm_help.h>
#include <arm/arch.h>
.text
#define ATOMIC_ARITHMETIC(op) \
1: ldrex r2, [r1] /* load existing value and tag memory */ ;\
op r3, r2, r0 /* compute new value */ ;\
- strex r2, r3, [r1] /* store new value if memory is still tagged */ ;\
- cmp r2, #0 /* check if the store succeeded */ ;\
+ strex ip, r3, [r1] /* store new value if memory is still tagged */ ;\
+ cmp ip, #0 /* check if the store succeeded */ ;\
bne 1b /* if not, try again */ ;\
mov r0, r3 /* return new value */
-MI_ENTRY_POINT(_OSAtomicAdd32Barrier)
-MI_ENTRY_POINT(_OSAtomicAdd32)
+#define ATOMIC_ARITHMETIC_ORIG(op) \
+1: ldrex r2, [r1] /* load existing value and tag memory */ ;\
+ op r3, r2, r0 /* compute new value */ ;\
+ strex ip, r3, [r1] /* store new value if memory is still tagged */ ;\
+ cmp ip, #0 /* check if the store succeeded */ ;\
+ bne 1b /* if not, try again */ ;\
+ mov r0, r2 /* return orig value */
+
+ENTRY_POINT(_OSAtomicAdd32Barrier)
+ENTRY_POINT(_OSAtomicAdd32)
ATOMIC_ARITHMETIC(add)
bx lr
-MI_ENTRY_POINT(_OSAtomicOr32Barrier)
-MI_ENTRY_POINT(_OSAtomicOr32)
+ENTRY_POINT(_OSAtomicOr32Barrier)
+ENTRY_POINT(_OSAtomicOr32)
ATOMIC_ARITHMETIC(orr)
bx lr
-MI_ENTRY_POINT(_OSAtomicAnd32Barrier)
-MI_ENTRY_POINT(_OSAtomicAnd32)
+ENTRY_POINT(_OSAtomicOr32OrigBarrier)
+ENTRY_POINT(_OSAtomicOr32Orig)
+ ATOMIC_ARITHMETIC_ORIG(orr)
+ bx lr
+
+ENTRY_POINT(_OSAtomicAnd32Barrier)
+ENTRY_POINT(_OSAtomicAnd32)
ATOMIC_ARITHMETIC(and)
bx lr
-MI_ENTRY_POINT(_OSAtomicXor32Barrier)
-MI_ENTRY_POINT(_OSAtomicXor32)
+ENTRY_POINT(_OSAtomicAnd32OrigBarrier)
+ENTRY_POINT(_OSAtomicAnd32Orig)
+ ATOMIC_ARITHMETIC_ORIG(and)
+ bx lr
+
+ENTRY_POINT(_OSAtomicXor32Barrier)
+ENTRY_POINT(_OSAtomicXor32)
ATOMIC_ARITHMETIC(eor)
bx lr
-MI_ENTRY_POINT(_OSAtomicCompareAndSwap32Barrier)
-MI_ENTRY_POINT(_OSAtomicCompareAndSwap32)
-MI_ENTRY_POINT(_OSAtomicCompareAndSwapIntBarrier)
-MI_ENTRY_POINT(_OSAtomicCompareAndSwapInt)
-MI_ENTRY_POINT(_OSAtomicCompareAndSwapLongBarrier)
-MI_ENTRY_POINT(_OSAtomicCompareAndSwapLong)
-MI_ENTRY_POINT(_OSAtomicCompareAndSwapPtrBarrier)
-MI_ENTRY_POINT(_OSAtomicCompareAndSwapPtr)
+ENTRY_POINT(_OSAtomicXor32OrigBarrier)
+ENTRY_POINT(_OSAtomicXor32Orig)
+ ATOMIC_ARITHMETIC_ORIG(eor)
+ bx lr
+
+ENTRY_POINT(_OSAtomicCompareAndSwap32Barrier)
+ENTRY_POINT(_OSAtomicCompareAndSwap32)
+ENTRY_POINT(_OSAtomicCompareAndSwapIntBarrier)
+ENTRY_POINT(_OSAtomicCompareAndSwapInt)
+ENTRY_POINT(_OSAtomicCompareAndSwapLongBarrier)
+ENTRY_POINT(_OSAtomicCompareAndSwapLong)
+ENTRY_POINT(_OSAtomicCompareAndSwapPtrBarrier)
+ENTRY_POINT(_OSAtomicCompareAndSwapPtr)
1: ldrex r3, [r2] // load existing value and tag memory
teq r3, r0 // is it the same as oldValue?
movne r0, #0 // if not, return 0 immediately
ands r0, r2, r0 /* mask off the bit from the old value */ ;\
movne r0, #1 /* if non-zero, return exactly 1 */
-MI_ENTRY_POINT(_OSAtomicTestAndSetBarrier)
-MI_ENTRY_POINT(_OSAtomicTestAndSet)
+ENTRY_POINT(_OSAtomicTestAndSetBarrier)
+ENTRY_POINT(_OSAtomicTestAndSet)
ATOMIC_BITOP(orr)
bx lr
-MI_ENTRY_POINT(_OSAtomicTestAndClearBarrier)
-MI_ENTRY_POINT(_OSAtomicTestAndClear)
+ENTRY_POINT(_OSAtomicTestAndClearBarrier)
+ENTRY_POINT(_OSAtomicTestAndClear)
ATOMIC_BITOP(bic)
bx lr
-MI_ENTRY_POINT(_OSMemoryBarrier)
+ENTRY_POINT(_OSMemoryBarrier)
bx lr
#if defined(_ARM_ARCH_6K)
/* If we can use LDREXD/STREXD, then we can implement 64-bit atomic operations */
-MI_ENTRY_POINT(_OSAtomicAdd64)
+ENTRY_POINT(_OSAtomicAdd64Barrier)
+ENTRY_POINT(_OSAtomicAdd64)
// R0,R1 contain the amount to add
// R2 contains the pointer
- stmfd sp!, {r4, r5, r6, r8, lr}
+ stmfd sp!, {r4, r5, r8, r9, lr}
1:
- ldrexd r4, [r2] // load existing value to R4/R5 and tag memory
- adds r6, r4, r0 // add lower half of new value into R6 and set carry bit
- adc r8, r5, r1 // add upper half of new value into R8 with carry
- strexd r3, r6, [r2] // store new value if memory is still tagged
+ ldrexd r4, r5, [r2] // load existing value to R4/R5 and tag memory
+ adds r8, r4, r0 // add lower half of new value into R6 and set carry bit
+ adc r9, r5, r1 // add upper half of new value into R8 with carry
+ strexd r3, r8, r9, [r2] // store new value if memory is still tagged
cmp r3, #0 // check if store succeeded
bne 1b // if so, try again
- mov r0, r6 // return new value
- mov r1, r8
- ldmfd sp!, {r4, r5, r6, r8, pc}
+ mov r0, r8 // return new value
+ mov r1, r9
+ ldmfd sp!, {r4, r5, r8, r9, pc}
-MI_ENTRY_POINT(_OSAtomicCompareAndSwap64)
+ENTRY_POINT(_OSAtomicCompareAndSwap64Barrier)
+ENTRY_POINT(_OSAtomicCompareAndSwap64)
// R0,R1 contains the old value
// R2,R3 contains the new value
// the pointer is pushed onto the stack
* Lock the lock pointed to by p. Spin (possibly forever) until the next
* lock is available.
*/
-MI_ENTRY_POINT(_spin_lock)
-MI_ENTRY_POINT(__spin_lock)
-MI_ENTRY_POINT(_OSSpinLockLock)
+ENTRY_POINT(_spin_lock)
+ENTRY_POINT(__spin_lock)
+ENTRY_POINT(_OSSpinLockLock)
L_spin_lock_loop:
mov r1, #1
swp r2, r1, [r0]
ldmfd sp!, {r0, r8}
b L_spin_lock_loop
-MI_ENTRY_POINT(_spin_lock_try)
-MI_ENTRY_POINT(__spin_lock_try)
-MI_ENTRY_POINT(_OSSpinLockTry)
+ENTRY_POINT(_spin_lock_try)
+ENTRY_POINT(__spin_lock_try)
+ENTRY_POINT(_OSSpinLockTry)
mov r1, #1
swp r2, r1, [r0]
bic r0, r1, r2
*
* Unlock the lock pointed to by p.
*/
-MI_ENTRY_POINT(_spin_unlock)
-MI_ENTRY_POINT(__spin_unlock)
-MI_ENTRY_POINT(_OSSpinLockUnlock)
+ENTRY_POINT(_spin_unlock)
+ENTRY_POINT(__spin_unlock)
+ENTRY_POINT(_OSSpinLockUnlock)
mov r1, #0
str r1, [r0]
bx lr
+++ /dev/null
-/*
- * Copyright (c) 1999-2006 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-/*
- * Header files.
- */
-#import <sys/syscall.h>
-#define SWI_SYSCALL 0x80 // from <mach/vm_param.h>
-
-/*
- * ARM system call interface:
- *
- * swi 0x80
- * args: r0-r6
- * return code: r0
- * on error, carry bit is set in the psr, otherwise carry bit is cleared.
- */
-
-/*
- * Macros.
- */
-
-/*
- * until we update the architecture project, these live here
- */
-
-#if defined(__DYNAMIC__)
-#define MI_GET_ADDRESS(reg,var) \
- ldr reg, 4f ;\
-3: ldr reg, [pc, reg] ;\
- b 5f ;\
-4: .long 6f - (3b + 8) ;\
-5: ;\
- .non_lazy_symbol_pointer ;\
-6: ;\
- .indirect_symbol var ;\
- .long 0 ;\
- .text ;\
- .align 2
-#else
-#define MI_GET_ADDRESS(reg,var) \
- ldr reg, 3f ;\
- b 4f ;\
-3: .long var ;\
-4:
-#endif
-
-#if defined(__DYNAMIC__)
-#define MI_BRANCH_EXTERNAL(var) \
- .globl var ;\
- MI_GET_ADDRESS(ip, var) ;\
- bx ip
-#else
-#define MI_BRANCH_EXTERNAL(var) ;\
- .globl var ;\
- b var
-#endif
-
-#if defined(__DYNAMIC__)
-#define MI_CALL_EXTERNAL(var) \
- .globl var ;\
- MI_GET_ADDRESS(ip,var) ;\
- mov lr, pc ;\
- bx ip
-#else
-#define MI_CALL_EXTERNAL(var) \
- .globl var ;\
- bl var
-#endif
-
-#define MI_ENTRY_POINT(name) \
- .align 2 ;\
- .globl name ;\
- .text ;\
-name:
-
-/* load the syscall number into r12 and trap */
-#define DO_SYSCALL(num) \
- .if (((num) & 0xff) == (num)) ;\
- mov r12, #(num) ;\
- .elseif (((num) & 0x3fc) == (num)) ;\
- mov r12, #(num) ;\
- .else ;\
- mov r12, #((num) & 0xffffff00) /* top half of the syscall number */ ;\
- orr r12, r12, #((num) & 0xff) /* bottom half */ ;\
- .endif ;\
- swi #SWI_SYSCALL
-
-/* simple syscalls (0 to 4 args) */
-#define SYSCALL_0to4(name) \
- MI_ENTRY_POINT(_##name) ;\
- DO_SYSCALL(SYS_##name) ;\
- bxcc lr /* return if carry is clear (no error) */ ; \
-1: MI_BRANCH_EXTERNAL(cerror)
-
-/* syscalls with 5 args is different, because of the single arg register load */
-#define SYSCALL_5(name) \
- MI_ENTRY_POINT(_##name) ;\
- mov ip, sp /* save a pointer to the args */ ; \
- stmfd sp!, { r4-r5 } /* save r4-r5 */ ;\
- ldr r4, [ip] /* load 5th arg */ ; \
- DO_SYSCALL(SYS_##name) ;\
- ldmfd sp!, { r4-r5 } /* restore r4-r5 */ ; \
- bxcc lr /* return if carry is clear (no error) */ ; \
-1: MI_BRANCH_EXTERNAL(cerror)
-
-/* syscalls with 6 to 8 args */
-#define SYSCALL_6to8(name, save_regs, arg_regs) \
- MI_ENTRY_POINT(_##name) ;\
- mov ip, sp /* save a pointer to the args */ ; \
- stmfd sp!, { save_regs } /* callee saved regs */ ;\
- ldmia ip, { arg_regs } /* load arg regs */ ; \
- DO_SYSCALL(SYS_##name) ;\
- ldmfd sp!, { save_regs } /* restore callee saved regs */ ; \
- bxcc lr /* return if carry is clear (no error) */ ; \
-1: MI_BRANCH_EXTERNAL(cerror)
-
-#define COMMA ,
-
-#define SYSCALL_0(name) SYSCALL_0to4(name)
-#define SYSCALL_1(name) SYSCALL_0to4(name)
-#define SYSCALL_2(name) SYSCALL_0to4(name)
-#define SYSCALL_3(name) SYSCALL_0to4(name)
-#define SYSCALL_4(name) SYSCALL_0to4(name)
-/* SYSCALL_5 declared above */
-#define SYSCALL_6(name) SYSCALL_6to8(name, r4-r5, r4-r5)
-#define SYSCALL_7(name) SYSCALL_6to8(name, r4-r6 COMMA r8, r4-r6)
-/* there are no 8-argument syscalls currently defined */
-
-/* select the appropriate syscall code, based on the number of arguments */
-#define SYSCALL(name, nargs) SYSCALL_##nargs(name)
-
-#define SYSCALL_NONAME_0to4(name) \
- DO_SYSCALL(SYS_##name) ;\
- bcc 1f /* branch if carry bit is clear (no error) */ ; \
- MI_BRANCH_EXTERNAL(cerror) /* call cerror */ ; \
-1:
-
-#define SYSCALL_NONAME_5(name) \
- mov ip, sp /* save a pointer to the args */ ; \
- stmfd sp!, { r4-r5 } /* save r4-r5 */ ;\
- ldr r4, [ip] /* load 5th arg */ ; \
- DO_SYSCALL(SYS_##name) ;\
- ldmfd sp!, { r4-r5 } /* restore r4-r7 */ ; \
- bcc 1f /* branch if carry bit is clear (no error) */ ; \
- MI_BRANCH_EXTERNAL(cerror) /* call cerror */ ; \
-1:
-
-#define SYSCALL_NONAME_6to8(name, save_regs, arg_regs) \
- mov ip, sp /* save a pointer to the args */ ; \
- stmfd sp!, { save_regs } /* callee save regs */ ;\
- ldmia ip, { arg_regs } /* load arguments */ ; \
- DO_SYSCALL(SYS_##name) ;\
- ldmfd sp!, { save_regs } /* restore callee saved regs */ ; \
- bcc 1f /* branch if carry bit is clear (no error) */ ; \
- MI_BRANCH_EXTERNAL(cerror) /* call cerror */ ; \
-1:
-
-#define SYSCALL_NONAME_0(name) SYSCALL_NONAME_0to4(name)
-#define SYSCALL_NONAME_1(name) SYSCALL_NONAME_0to4(name)
-#define SYSCALL_NONAME_2(name) SYSCALL_NONAME_0to4(name)
-#define SYSCALL_NONAME_3(name) SYSCALL_NONAME_0to4(name)
-#define SYSCALL_NONAME_4(name) SYSCALL_NONAME_0to4(name)
-/* SYSCALL_NONAME_5 declared above */
-#define SYSCALL_NONAME_6(name) SYSCALL_NONAME_6to8(name, r4-r5, r4-r5)
-#define SYSCALL_NONAME_7(name) SYSCALL_NONAME_6to8(name, r4-r6 COMMA r8, r4-r6)
-/* there are no 8-argument syscalls currently defined */
-
-/* select the appropriate syscall code, based on the number of arguments */
-#define SYSCALL_NONAME(name, nargs) SYSCALL_NONAME_##nargs(name)
-
-#define PSEUDO(pseudo, name, nargs) \
- .globl _##pseudo ;\
- .text ;\
- .align 2 ;\
-_##pseudo: ;\
- SYSCALL_NONAME(name, nargs)
-
-#undef END
-#import <mach/arm/syscall_sw.h>
-
-#if !defined(SYS___pthread_canceled)
-#define SYS___pthread_markcancel 332
-#define SYS___pthread_canceled 333
-#define SYS___semwait_signal 334
-#endif
*/
/*
- * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1998-2008 Apple Inc. All rights reserved.
*
* Implements _longjmp()
*
*/
-#include "SYS.h"
+#include <architecture/arm/asm_help.h>
#include "_setjmp.h"
#include <arm/arch.h>
/* int _longjmp(jmp_buf env, int val); */
-MI_ENTRY_POINT(__longjmp)
+ENTRY_POINT(__longjmp)
ldmia r0!, { r4-r8, r10-r11, sp, lr }
#ifdef _ARM_ARCH_6
fldmiax r0, { d8-d15 }
*/
/*
- * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1998-2008 Apple Inc. All rights reserved.
*
* Implements _setjmp()
*
*/
-#include "SYS.h"
+#include <architecture/arm/asm_help.h>
#include "_setjmp.h"
#include <arm/arch.h>
-MI_ENTRY_POINT(__setjmp)
+ENTRY_POINT(__setjmp)
stmia r0!, { r4-r8, r10-r11, sp, lr }
#ifdef _ARM_ARCH_6
fstmiax r0, { d8-d15 }
+/*
+ * Copyright (c) 2008 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
#include <time.h>
#include <tzfile.h>
#include <sys/time.h>
+++ /dev/null
-/*
- * Copyright (c) 1999-2005 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-/* Copyright 1998 Apple Computer, Inc. */
-
-#include "SYS.h"
-
-/*
- * This syscall is special cased: the timeval is returned in r0/r1.
- */
-MI_ENTRY_POINT(___gettimeofday)
- mov r3, r0 // save ptr to timeval
- SYSCALL_NONAME(gettimeofday,2)
- stmia r3, { r0, r1 }
- mov r0, #0
- bx lr
--- /dev/null
+/*
+ * Copyright (c) 2008 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+#include <stdint.h>
+#include <libkern/OSAtomic.h>
+#include <arm/arch.h>
+#include <stdbool.h>
+
+/*
+ * Implement gcc atomic "builtins" in terms of OSAtomic routines,
+ * since the ARM GCC target does not currently support them
+ */
+
+int32_t __sync_fetch_and_add_4 (int32_t *ptr, int32_t value)
+{
+ return OSAtomicAdd32(value, ptr) - value;
+}
+
+int32_t __sync_fetch_and_sub_4 (int32_t *ptr, int32_t value)
+{
+ return OSAtomicAdd32(-value, ptr) + value;
+}
+
+uint32_t __sync_fetch_and_or_4(uint32_t *ptr, uint32_t value)
+{
+ return OSAtomicOr32Orig(value, ptr);
+}
+
+uint32_t __sync_fetch_and_and_4(uint32_t *ptr, uint32_t value)
+{
+ return OSAtomicAnd32Orig(value, ptr);
+}
+
+uint32_t __sync_fetch_and_xor_4(uint32_t *ptr, uint32_t value)
+{
+ OSAtomicXor32Orig(value, ptr);
+}
+
+int32_t __sync_add_and_fetch_4 (int32_t *ptr, int32_t value)
+{
+ return OSAtomicAdd32(value, ptr);
+}
+
+int32_t __sync_sub_and_fetch_4 (int32_t *ptr, int32_t value)
+{
+ return OSAtomicAdd32(-value, ptr);
+}
+
+uint32_t __sync_or_and_fetch_4 (uint32_t *ptr, int32_t value)
+{
+ return OSAtomicOr32(value, ptr);
+}
+
+uint32_t __sync_and_and_fetch_4 (uint32_t *ptr, int32_t value)
+{
+ return OSAtomicAnd32(value, ptr);
+}
+
+uint32_t __sync_xor_and_fetch_4 (uint32_t *ptr, int32_t value)
+{
+ return OSAtomicXor32(value, ptr);
+}
+
+bool __sync_bool_compare_and_swap_4(int32_t *ptr, int32_t oldval, int32_t newval)
+{
+ return OSAtomicCompareAndSwap32(oldval, newval, ptr);
+}
+
+int32_t __sync_val_compare_and_swap_4(int32_t *ptr, int32_t oldval, int32_t newval)
+{
+ int32_t old = *ptr;
+ OSAtomicCompareAndSwap32(oldval, newval, ptr);
+ return old;
+}
+
+int32_t __sync_lock_test_and_set_4(int32_t *ptr, int32_t value)
+{
+ int32_t old;
+
+ do {
+ old = *ptr;
+ } while (!OSAtomicCompareAndSwap32(old, value, ptr));
+
+ return old;
+}
+
+void __sync_lock_release_4(int32_t *ptr)
+{
+ *ptr = 0;
+}
* @APPLE_LICENSE_HEADER_END@
*/
/*
- * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1998-2008 Apple Inc. All rights reserved.
*
- * File: sys/ppc/longjmp.s
+ * File: sys/arm/longjmp.s
*
* Implements siglongjmp(), longjmp(), _longjmp()
*
*/
-#include "SYS.h"
+#include <architecture/arm/asm_help.h>
#include "_setjmp.h"
/*
/* void siglongjmp(sigjmp_buf env, int val); */
-MI_ENTRY_POINT(_siglongjmp)
+ENTRY_POINT(_siglongjmp)
ldr r2, [ r0, #JMP_SIGFLAG ] // load sigflag
tst r2, #0 // test if zero
beq L__exit // if zero do _longjmp()
/* void longjmp(jmp_buf env, int val); */
-MI_ENTRY_POINT(_longjmp)
+ENTRY_POINT(_longjmp)
mov r6, r0 // preserve args across _sigsetmask
mov r8, r1
ldr r0, [ r0, #JMP_sig ] // restore the signal mask
- MI_CALL_EXTERNAL(_sigsetmask) // make a (deprecated!) syscall to set the mask
+ CALL_EXTERNAL(_sigsetmask) // make a (deprecated!) syscall to set the mask
mov r1, r8
mov r0, r6
L__exit:
- MI_BRANCH_EXTERNAL(__longjmp)
+ BRANCH_EXTERNAL(__longjmp)
* @APPLE_LICENSE_HEADER_END@
*/
/*
- * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1998-2008 Apple Inc. All rights reserved.
*
- * File: sys/ppc/setjmp.s
+ * File: sys/arm/setjmp.s
*
* Implements sigsetjmp(), setjmp(), _setjmp()
*
*/
-#include "SYS.h"
+#include <architecture/arm/asm_help.h>
#include "_setjmp.h"
/*
/* int sigsetjmp(sigjmp_buf env, int savemask); */
-MI_ENTRY_POINT(_sigsetjmp)
+ENTRY_POINT(_sigsetjmp)
str r1, [ r0, #JMP_SIGFLAG ] // save sigflag
tst r1, #0 // test if r1 is 0
beq L__exit // if r1 == 0 do _setjmp()
/* int setjmp(jmp_buf env); */
-MI_ENTRY_POINT(_setjmp)
+ENTRY_POINT(_setjmp)
str lr, [ r0, #JMP_lr ]
str r8, [ r0, #JMP_r8 ]
mov r8, r0
mov r0, #1 // get the previous signal mask
mov r1, #0 //
add r2, r8, #JMP_sig // get address where previous mask needs to be
- MI_CALL_EXTERNAL(_sigprocmask) // make a syscall to get mask
+ CALL_EXTERNAL(_sigprocmask) // make a syscall to get mask
mov r0, r8 // restore jmp_buf ptr
ldr r8, [ r0, #JMP_r8 ]
ldr lr, [ r0, #JMP_lr ]
L__exit:
- MI_BRANCH_EXTERNAL(__setjmp)
+ BRANCH_EXTERNAL(__setjmp)
return path;
}
+char *
+__user_local_mkdir_p(char *path)
+{
+ char *next;
+ int res;
+
+ next = path + strlen(VAR_FOLDERS_PATH);
+ while ((next = strchr(next, '/')) != NULL) {
+ *next = 0; // temporarily truncate
+ res = mkdir(path, 0755);
+ if (res != 0 && errno != EEXIST)
+ return NULL;
+ *next++ = '/'; // restore the slash and increment
+ }
+ return path;
+}
+
__private_extern__ char *
_dirhelper(dirhelper_which_t which, char *path, size_t pathlen)
{
if(!*userdir) {
MUTEX_LOCK(&lock);
- if(!*userdir) {
- mach_port_t mp;
-
- if(bootstrap_look_up(bootstrap_port, DIRHELPER_BOOTSTRAP_NAME, &mp) != KERN_SUCCESS) {
- errno = EPERM;
- MUTEX_UNLOCK(&lock);
- return NULL;
- }
+ if (!*userdir) {
+
if(__user_local_dirname(geteuid(), DIRHELPER_USER_LOCAL, userdir, sizeof(userdir)) == NULL) {
-server_error:
- mach_port_deallocate(mach_task_self(), mp);
MUTEX_UNLOCK(&lock);
return NULL;
}
/*
- * check if userdir exists, and if not, call
- * __dirhelper_create_user_local to create it
- * (we have to check again afterwards).
+ * check if userdir exists, and if not, either do the work
+ * ourself if we are root, or call
+ * __dirhelper_create_user_local to create it (we have to
+ * check again afterwards).
*/
if(stat(userdir, &sb) < 0) {
+ mach_port_t mp;
+
if(errno != ENOENT) { /* some unknown error */
*userdir = 0;
- goto server_error;
- }
- if(__dirhelper_create_user_local(mp) != KERN_SUCCESS) {
- errno = EPERM;
- *userdir = 0;
- goto server_error;
+ MUTEX_UNLOCK(&lock);
+ return NULL;
}
- /* double check that the directory really got created */
- if(stat(userdir, &sb) < 0) {
- *userdir = 0;
- goto server_error;
+ /*
+ * If we are root, lets do what dirhelper does for us.
+ */
+ if (geteuid() == 0) {
+ if (__user_local_mkdir_p(userdir) == NULL) {
+ *userdir = 0;
+ MUTEX_UNLOCK(&lock);
+ return NULL;
+ }
+ } else {
+ if(bootstrap_look_up(bootstrap_port, DIRHELPER_BOOTSTRAP_NAME, &mp) != KERN_SUCCESS) {
+ errno = EPERM;
+ server_error:
+ mach_port_deallocate(mach_task_self(), mp);
+ MUTEX_UNLOCK(&lock);
+ return NULL;
+ }
+ if(__dirhelper_create_user_local(mp) != KERN_SUCCESS) {
+ errno = EPERM;
+ goto server_error;
+ }
+ /* double check that the directory really got created */
+ if(stat(userdir, &sb) < 0) {
+ goto server_error;
+ }
+ mach_port_deallocate(mach_task_self(), mp);
}
}
- mach_port_deallocate(mach_task_self(), mp);
}
MUTEX_UNLOCK(&lock);
}
__BEGIN_DECLS
char *__user_local_dirname(uid_t uid, dirhelper_which_t which, char *path,
size_t pathlen);
+char *__user_local_mkdir_p(char *path);
__END_DECLS
#endif /* _DIRHELPER_PRIV_H_ */
struct proc_bsdinfo pbsd;
- if (buffersize < 2*MAXCOMLEN) {
+ if (buffersize < sizeof(pbsd.pbi_name)) {
errno = ENOMEM;
return(0);
}
retval = proc_pidinfo(pid, PROC_PIDTBSDINFO, (uint64_t)0, &pbsd, sizeof(struct proc_bsdinfo));
- if (retval != -1) {
- bcopy(&pbsd.pbi_name, buffer, 2* 2*MAXCOMLEN);
- len = strlen(&pbsd.pbi_name[0]);
+ if (retval != 0) {
+ if (pbsd.pbi_name[0]) {
+ bcopy(&pbsd.pbi_name, buffer, sizeof(pbsd.pbi_name));
+ } else {
+ bcopy(&pbsd.pbi_comm, buffer, sizeof(pbsd.pbi_comm));
+ }
+ len = strlen(buffer);
return(len);
}
return(0);
}
-int proc_libversion(int *major, int * minor)
+int
+proc_libversion(int *major, int * minor)
{
if (major != NULL)
return(0);
}
+int
+proc_setpcontrol(const int control)
+{
+ int retval ;
+
+ if (control < PROC_SETPC_NONE || control > PROC_SETPC_TERMINATE)
+ return(EINVAL);
+
+ if ((retval = __proc_info(5, getpid(), PROC_SELFSET_PCONTROL,(uint64_t)control, NULL, 0)) == -1)
+ return(errno);
+
+ return(0);
+}
+
+
int proc_kmsgbuf(void * buffer, uint32_t buffersize);
int proc_pidpath(int pid, void * buffer, uint32_t buffersize);
int proc_libversion(int *major, int * minor);
+/*
+ * A process can use the following api to set its own process control
+ * state on resoure starvation. The argument can have one of the PROC_SETPC_XX values
+ */
+#define PROC_SETPC_NONE 0
+#define PROC_SETPC_THROTTLEMEM 1
+#define PROC_SETPC_SUSPEND 2
+#define PROC_SETPC_TERMINATE 3
+int proc_setpcontrol(const int control);
__END_DECLS
#endif /*_LIBPROC_H_ */
/*
- * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
int pids_count;
size_t pids_size;
+ // threads
+ uint64_t *threads;
+ int thr_count;
+ size_t thr_size;
+
// open file descriptors
struct proc_fdinfo *fds;
int fds_count;
info->pids_count = 0;
info->pids_size = 0;
+ info->threads = NULL;
+ info->thr_count = 0;
+ info->thr_size = 0;
+
info->fds = NULL;
info->fds_count = 0;
info->fds_size = 0;
free(info->pids);
}
+ if (info->threads != NULL) {
+ free(info->threads);
+ }
+
if (info->fds != NULL) {
free(info->fds);
}
return -1;
}
- //if (rwpi.prp_vip.vip_path[0])
status = check_file(info, &rwpi.prp_vip.vip_vi.vi_stat);
if (status != 0) {
// if error or match
if ((buf_used + sizeof(struct proc_fdinfo)) >= info->fds_size) {
// if not enough room in the buffer for an extra fd
- buf_used = info->fds_size;
+ buf_used = info->fds_size + sizeof(struct proc_fdinfo);
continue;
}
}
+/*
+ * check_process_threads
+ * check [process] thread working directories
+ *
+ * in : pid
+ * out : -1 if error
+ * 0 if no match
+ * 1 if match
+ */
+static int
+check_process_threads(fdOpenInfoRef info, int pid)
+{
+ int buf_used;
+ int status;
+ struct proc_taskallinfo tai;
+
+ buf_used = proc_pidinfo(pid, PROC_PIDTASKALLINFO, 0, &tai, sizeof(tai));
+ if (buf_used <= 0) {
+ if (errno == ESRCH) {
+ // if the process is gone
+ return 0;
+ }
+ return -1;
+ } else if (buf_used < sizeof(tai)) {
+ // if we didn't get enough information
+ return -1;
+ }
+
+ // check thread info
+ if (tai.pbsd.pbi_flags & PROC_FLAG_THCWD) {
+ int i;
+
+ // get list of threads
+ buf_used = tai.ptinfo.pti_threadnum * sizeof(uint64_t);
+
+ while (1) {
+ if (buf_used > info->thr_size) {
+ // if we need to allocate [more] space
+ while (buf_used > info->thr_size) {
+ info->thr_size += (sizeof(uint64_t) * 32);
+ }
+
+ if (info->threads == NULL) {
+ info->threads = malloc(info->thr_size);
+ } else {
+ info->threads = reallocf(info->threads, info->thr_size);
+ }
+ if (info->threads == NULL) {
+ return -1;
+ }
+ }
+
+ buf_used = proc_pidinfo(pid, PROC_PIDLISTTHREADS, 0, info->threads, info->thr_size);
+ if (buf_used <= 0) {
+ return -1;
+ }
+
+ if ((buf_used + sizeof(uint64_t)) >= info->thr_size) {
+ // if not enough room in the buffer for an extra thread
+ buf_used = info->thr_size + sizeof(uint64_t);
+ continue;
+ }
+
+ info->thr_count = buf_used / sizeof(uint64_t);
+ break;
+ }
+
+ // iterate through each thread
+ for (i = 0; i < info->thr_count; i++) {
+ uint64_t thr = info->threads[i];
+ struct proc_threadwithpathinfo tpi;
+
+ buf_used = proc_pidinfo(pid, PROC_PIDTHREADPATHINFO, thr, &tpi, sizeof(tpi));
+ if (buf_used <= 0) {
+ if ((errno == ESRCH) || (errno == EINVAL)) {
+ // if the process or thread is gone
+ continue;
+ }
+ } else if (buf_used < sizeof(tai)) {
+ // if we didn't get enough information
+ return -1;
+ }
+
+ status = check_file(info, &tpi.pvip.vip_vi.vi_stat);
+ if (status != 0) {
+ // if error or match
+ return status;
+ }
+ }
+ }
+
+ return 0;
+}
+
+
/*
* check_process
* check [process] current working and root directories
return status;
}
+ // check per-thread working directories
+ status = check_process_threads(info, pid);
+ if (status != 0) {
+ // if error or match
+ return status;
+ }
+
return 0;
}
}
buffersize -= (buffersize % sizeof(int)); // make whole number of ints
- if (buffersize < sizeof(int)) {
+ if (buffersize < sizeof(int)) {
// if we can't even return a single PID
errno = ENOMEM;
return -1;
- }
+ }
// init
info = check_init(path, pathflags);
if ((buf_used + sizeof(int)) >= info->pids_size) {
// if not enough room in the buffer for an extra pid
- buf_used = info->pids_size;
+ buf_used = info->pids_size + sizeof(int);
continue;
}
--- /dev/null
+--- bt_overflow.c.orig 2008-09-07 11:37:54.000000000 -0700
++++ bt_overflow.c 2008-09-07 12:00:45.000000000 -0700
+@@ -97,7 +97,7 @@ __ovfl_get(t, p, ssz, buf, bufsz)
+
+ #ifdef DEBUG
+ if (pg == P_INVALID || sz == 0)
+- abort();
++ LIBC_ABORT("%s", pg == P_INVALID ? "pg == P_INVALID" : "sz == 0");
+ #endif
+ /* Make the buffer bigger as necessary. */
+ if (*bufsz < sz) {
+@@ -206,7 +206,7 @@ __ovfl_delete(t, p)
+
+ #ifdef DEBUG
+ if (pg == P_INVALID || sz == 0)
+- abort();
++ LIBC_ABORT("%s", pg == P_INVALID ? "pg == P_INVALID" : "sz == 0");
+ #endif
+ if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
+ return (RET_ERROR);
--- /dev/null
+--- bt_seq.c.orig 2008-10-09 11:40:48.000000000 -0700
++++ bt_seq.c 2008-10-09 11:43:28.000000000 -0700
+@@ -387,18 +387,19 @@ __bt_first(t, key, erval, exactp)
+ * occurs.
+ */
+ if (ep->index == 0) {
++ PAGE *hprev;
+ if (h->prevpg == P_INVALID)
+ break;
+ if (h->pgno != save.page->pgno)
+ mpool_put(t->bt_mp, h, 0);
+- if ((h = mpool_get(t->bt_mp,
++ if ((hprev = mpool_get(t->bt_mp,
+ h->prevpg, 0)) == NULL) {
+ if (h->pgno == save.page->pgno)
+ mpool_put(t->bt_mp,
+ save.page, 0);
+ return (RET_ERROR);
+ }
+- ep->page = h;
++ ep->page = h = hprev;
+ ep->index = NEXTINDEX(h);
+ }
+ --ep->index;
--- /dev/null
+--- bt_split.c.orig 2008-09-07 11:37:54.000000000 -0700
++++ bt_split.c 2008-09-07 12:29:24.000000000 -0700
+@@ -210,7 +210,7 @@ __bt_split(t, sp, key, data, flags, ilen
+ nbytes = NRINTERNAL;
+ break;
+ default:
+- abort();
++ LIBC_ABORT("illegal rchild->flags & P_TYPE (0x%x)", rchild->flags & P_TYPE);
+ }
+
+ /* Split the parent page if necessary or shift the indices. */
+@@ -285,7 +285,7 @@ __bt_split(t, sp, key, data, flags, ilen
+ ((RINTERNAL *)dest)->pgno = rchild->pgno;
+ break;
+ default:
+- abort();
++ LIBC_ABORT("illegal rchild->flags & P_TYPE (0x%x)", rchild->flags & P_TYPE);
+ }
+
+ /* Unpin the held pages. */
+@@ -580,7 +580,7 @@ bt_broot(t, h, l, r)
+ ((BINTERNAL *)dest)->pgno = r->pgno;
+ break;
+ default:
+- abort();
++ LIBC_ABORT("illegal h->flags & P_TYPE (0x%x)", h->flags & P_TYPE);
+ }
+
+ /* There are two keys on the page. */
+@@ -663,7 +663,7 @@ bt_psplit(t, h, l, r, pskip, ilen)
+ isbigkey = 0;
+ break;
+ default:
+- abort();
++ LIBC_ABORT("illegal h->flags & P_TYPE (0x%x)", h->flags & P_TYPE);
+ }
+
+ /*
+@@ -756,7 +756,7 @@ bt_psplit(t, h, l, r, pskip, ilen)
+ nbytes = NRLEAF(rl);
+ break;
+ default:
+- abort();
++ LIBC_ABORT("illegal h->flags & P_TYPE (0x%x)", h->flags & P_TYPE);
+ }
+ ++nxt;
+ r->linp[off] = r->upper -= nbytes;
CWD := ${.CURDIR}/db/btree
.include "Makefile.fbsd_begin"
-FBSDMISRCS= bt_close.c bt_conv.c bt_debug.c bt_delete.c bt_get.c bt_open.c \
+FBSDMISRCS= bt_close.c bt_conv.c bt_delete.c bt_get.c bt_open.c \
bt_overflow.c bt_page.c bt_put.c bt_search.c bt_seq.c bt_split.c \
bt_utils.c
.for _src in ${FBSDMISRCS}
CFLAGS-${_src:R}-fbsd.${_src:E} += -D__DBINTERFACE_PRIVATE
.endfor
-.for _src in bt_debug.c bt_open.c bt_overflow.c
+.for _src in bt_open.c bt_overflow.c
CFLAGS-${_src:R}-fbsd.${_src:E} += -UDEBUG
.endfor
FBSDHDRS= btree.h
+++ /dev/null
-./bt_debug.c
\ No newline at end of file
+++ /dev/null
-./bt_overflow.c
\ No newline at end of file
--- /dev/null
+/*-
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)bt_overflow.c 8.5 (Berkeley) 7/16/94";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/db/btree/bt_overflow.c,v 1.3 2002/03/22 21:52:01 obrien Exp $");
+
+#include <sys/param.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <db.h>
+#include "btree.h"
+
+/*
+ * Big key/data code.
+ *
+ * Big key and data entries are stored on linked lists of pages. The initial
+ * reference is byte string stored with the key or data and is the page number
+ * and size. The actual record is stored in a chain of pages linked by the
+ * nextpg field of the PAGE header.
+ *
+ * The first page of the chain has a special property. If the record is used
+ * by an internal page, it cannot be deleted and the P_PRESERVE bit will be set
+ * in the header.
+ *
+ * XXX
+ * A single DBT is written to each chain, so a lot of space on the last page
+ * is wasted. This is a fairly major bug for some data sets.
+ */
+
+/*
+ * __OVFL_GET -- Get an overflow key/data item.
+ *
+ * Parameters:
+ * t: tree
+ * p: pointer to { pgno_t, u_int32_t }
+ * buf: storage address
+ * bufsz: storage size
+ *
+ * Returns:
+ * RET_ERROR, RET_SUCCESS
+ */
+int
+__ovfl_get(t, p, ssz, buf, bufsz)
+ BTREE *t;
+ void *p;
+ size_t *ssz;
+ void **buf;
+ size_t *bufsz;
+{
+ PAGE *h;
+ pgno_t pg;
+ size_t nb, plen;
+ u_int32_t sz;
+
+ memmove(&pg, p, sizeof(pgno_t));
+ memmove(&sz, (char *)p + sizeof(pgno_t), sizeof(u_int32_t));
+ *ssz = sz;
+
+#ifdef DEBUG
+ if (pg == P_INVALID || sz == 0)
+ LIBC_ABORT("%s", pg == P_INVALID ? "pg == P_INVALID" : "sz == 0");
+#endif
+ /* Make the buffer bigger as necessary. */
+ if (*bufsz < sz) {
+ *buf = (char *)(*buf == NULL ? malloc(sz) : reallocf(*buf, sz));
+ if (*buf == NULL)
+ return (RET_ERROR);
+ *bufsz = sz;
+ }
+
+ /*
+ * Step through the linked list of pages, copying the data on each one
+ * into the buffer. Never copy more than the data's length.
+ */
+ plen = t->bt_psize - BTDATAOFF;
+ for (p = *buf;; p = (char *)p + nb, pg = h->nextpg) {
+ if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
+ return (RET_ERROR);
+
+ nb = MIN(sz, plen);
+ memmove(p, (char *)h + BTDATAOFF, nb);
+ mpool_put(t->bt_mp, h, 0);
+
+ if ((sz -= nb) == 0)
+ break;
+ }
+ return (RET_SUCCESS);
+}
+
+/*
+ * __OVFL_PUT -- Store an overflow key/data item.
+ *
+ * Parameters:
+ * t: tree
+ * data: DBT to store
+ * pgno: storage page number
+ *
+ * Returns:
+ * RET_ERROR, RET_SUCCESS
+ */
+int
+__ovfl_put(t, dbt, pg)
+ BTREE *t;
+ const DBT *dbt;
+ pgno_t *pg;
+{
+ PAGE *h, *last;
+ void *p;
+ pgno_t npg;
+ size_t nb, plen;
+ u_int32_t sz;
+
+ /*
+ * Allocate pages and copy the key/data record into them. Store the
+ * number of the first page in the chain.
+ */
+ plen = t->bt_psize - BTDATAOFF;
+ for (last = NULL, p = dbt->data, sz = dbt->size;;
+ p = (char *)p + plen, last = h) {
+ if ((h = __bt_new(t, &npg)) == NULL)
+ return (RET_ERROR);
+
+ h->pgno = npg;
+ h->nextpg = h->prevpg = P_INVALID;
+ h->flags = P_OVERFLOW;
+ h->lower = h->upper = 0;
+
+ nb = MIN(sz, plen);
+ memmove((char *)h + BTDATAOFF, p, nb);
+
+ if (last) {
+ last->nextpg = h->pgno;
+ mpool_put(t->bt_mp, last, MPOOL_DIRTY);
+ } else
+ *pg = h->pgno;
+
+ if ((sz -= nb) == 0) {
+ mpool_put(t->bt_mp, h, MPOOL_DIRTY);
+ break;
+ }
+ }
+ return (RET_SUCCESS);
+}
+
+/*
+ * __OVFL_DELETE -- Delete an overflow chain.
+ *
+ * Parameters:
+ * t: tree
+ * p: pointer to { pgno_t, u_int32_t }
+ *
+ * Returns:
+ * RET_ERROR, RET_SUCCESS
+ */
+int
+__ovfl_delete(t, p)
+ BTREE *t;
+ void *p;
+{
+ PAGE *h;
+ pgno_t pg;
+ size_t plen;
+ u_int32_t sz;
+
+ memmove(&pg, p, sizeof(pgno_t));
+ memmove(&sz, (char *)p + sizeof(pgno_t), sizeof(u_int32_t));
+
+#ifdef DEBUG
+ if (pg == P_INVALID || sz == 0)
+ LIBC_ABORT("%s", pg == P_INVALID ? "pg == P_INVALID" : "sz == 0");
+#endif
+ if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
+ return (RET_ERROR);
+
+ /* Don't delete chains used by internal pages. */
+ if (h->flags & P_PRESERVE) {
+ mpool_put(t->bt_mp, h, 0);
+ return (RET_SUCCESS);
+ }
+
+ /* Step through the chain, calling the free routine for each page. */
+ for (plen = t->bt_psize - BTDATAOFF;; sz -= plen) {
+ pg = h->nextpg;
+ __bt_free(t, h);
+ if (sz <= plen)
+ break;
+ if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
+ return (RET_ERROR);
+ }
+ return (RET_SUCCESS);
+}
+++ /dev/null
-./bt_seq.c
\ No newline at end of file
--- /dev/null
+/*-
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)bt_seq.c 8.7 (Berkeley) 7/20/94";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/db/btree/bt_seq.c,v 1.3 2002/03/21 22:46:25 obrien Exp $");
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <db.h>
+#include "btree.h"
+
+static int __bt_first(BTREE *, const DBT *, EPG *, int *);
+static int __bt_seqadv(BTREE *, EPG *, int);
+static int __bt_seqset(BTREE *, EPG *, DBT *, int);
+
+/*
+ * Sequential scan support.
+ *
+ * The tree can be scanned sequentially, starting from either end of the
+ * tree or from any specific key. A scan request before any scanning is
+ * done is initialized as starting from the least node.
+ */
+
+/*
+ * __bt_seq --
+ * Btree sequential scan interface.
+ *
+ * Parameters:
+ * dbp: pointer to access method
+ * key: key for positioning and return value
+ * data: data return value
+ * flags: R_CURSOR, R_FIRST, R_LAST, R_NEXT, R_PREV.
+ *
+ * Returns:
+ * RET_ERROR, RET_SUCCESS or RET_SPECIAL if there's no next key.
+ */
+int
+__bt_seq(dbp, key, data, flags)
+ const DB *dbp;
+ DBT *key, *data;
+ u_int flags;
+{
+ BTREE *t;
+ EPG e;
+ int status;
+
+ t = dbp->internal;
+
+ /* Toss any page pinned across calls. */
+ if (t->bt_pinned != NULL) {
+ mpool_put(t->bt_mp, t->bt_pinned, 0);
+ t->bt_pinned = NULL;
+ }
+
+ /*
+ * If scan unitialized as yet, or starting at a specific record, set
+ * the scan to a specific key. Both __bt_seqset and __bt_seqadv pin
+ * the page the cursor references if they're successful.
+ */
+ switch (flags) {
+ case R_NEXT:
+ case R_PREV:
+ if (F_ISSET(&t->bt_cursor, CURS_INIT)) {
+ status = __bt_seqadv(t, &e, flags);
+ break;
+ }
+ /* FALLTHROUGH */
+ case R_FIRST:
+ case R_LAST:
+ case R_CURSOR:
+ status = __bt_seqset(t, &e, key, flags);
+ break;
+ default:
+ errno = EINVAL;
+ return (RET_ERROR);
+ }
+
+ if (status == RET_SUCCESS) {
+ __bt_setcur(t, e.page->pgno, e.index);
+
+ status =
+ __bt_ret(t, &e, key, &t->bt_rkey, data, &t->bt_rdata, 0);
+
+ /*
+ * If the user is doing concurrent access, we copied the
+ * key/data, toss the page.
+ */
+ if (F_ISSET(t, B_DB_LOCK))
+ mpool_put(t->bt_mp, e.page, 0);
+ else
+ t->bt_pinned = e.page;
+ }
+ return (status);
+}
+
+/*
+ * __bt_seqset --
+ * Set the sequential scan to a specific key.
+ *
+ * Parameters:
+ * t: tree
+ * ep: storage for returned key
+ * key: key for initial scan position
+ * flags: R_CURSOR, R_FIRST, R_LAST, R_NEXT, R_PREV
+ *
+ * Side effects:
+ * Pins the page the cursor references.
+ *
+ * Returns:
+ * RET_ERROR, RET_SUCCESS or RET_SPECIAL if there's no next key.
+ */
+static int
+__bt_seqset(t, ep, key, flags)
+ BTREE *t;
+ EPG *ep;
+ DBT *key;
+ int flags;
+{
+ PAGE *h;
+ pgno_t pg;
+ int exact;
+
+ /*
+ * Find the first, last or specific key in the tree and point the
+ * cursor at it. The cursor may not be moved until a new key has
+ * been found.
+ */
+ switch (flags) {
+ case R_CURSOR: /* Keyed scan. */
+ /*
+ * Find the first instance of the key or the smallest key
+ * which is greater than or equal to the specified key.
+ */
+ if (key->data == NULL || key->size == 0) {
+ errno = EINVAL;
+ return (RET_ERROR);
+ }
+ return (__bt_first(t, key, ep, &exact));
+ case R_FIRST: /* First record. */
+ case R_NEXT:
+ /* Walk down the left-hand side of the tree. */
+ for (pg = P_ROOT;;) {
+ if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
+ return (RET_ERROR);
+
+ /* Check for an empty tree. */
+ if (NEXTINDEX(h) == 0) {
+ mpool_put(t->bt_mp, h, 0);
+ return (RET_SPECIAL);
+ }
+
+ if (h->flags & (P_BLEAF | P_RLEAF))
+ break;
+ pg = GETBINTERNAL(h, 0)->pgno;
+ mpool_put(t->bt_mp, h, 0);
+ }
+ ep->page = h;
+ ep->index = 0;
+ break;
+ case R_LAST: /* Last record. */
+ case R_PREV:
+ /* Walk down the right-hand side of the tree. */
+ for (pg = P_ROOT;;) {
+ if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
+ return (RET_ERROR);
+
+ /* Check for an empty tree. */
+ if (NEXTINDEX(h) == 0) {
+ mpool_put(t->bt_mp, h, 0);
+ return (RET_SPECIAL);
+ }
+
+ if (h->flags & (P_BLEAF | P_RLEAF))
+ break;
+ pg = GETBINTERNAL(h, NEXTINDEX(h) - 1)->pgno;
+ mpool_put(t->bt_mp, h, 0);
+ }
+
+ ep->page = h;
+ ep->index = NEXTINDEX(h) - 1;
+ break;
+ }
+ return (RET_SUCCESS);
+}
+
+/*
+ * __bt_seqadvance --
+ * Advance the sequential scan.
+ *
+ * Parameters:
+ * t: tree
+ * flags: R_NEXT, R_PREV
+ *
+ * Side effects:
+ * Pins the page the new key/data record is on.
+ *
+ * Returns:
+ * RET_ERROR, RET_SUCCESS or RET_SPECIAL if there's no next key.
+ */
+static int
+__bt_seqadv(t, ep, flags)
+ BTREE *t;
+ EPG *ep;
+ int flags;
+{
+ CURSOR *c;
+ PAGE *h;
+ indx_t index;
+ pgno_t pg;
+ int exact;
+
+ /*
+ * There are a couple of states that we can be in. The cursor has
+ * been initialized by the time we get here, but that's all we know.
+ */
+ c = &t->bt_cursor;
+
+ /*
+ * The cursor was deleted where there weren't any duplicate records,
+ * so the key was saved. Find out where that key would go in the
+ * current tree. It doesn't matter if the returned key is an exact
+ * match or not -- if it's an exact match, the record was added after
+ * the delete so we can just return it. If not, as long as there's
+ * a record there, return it.
+ */
+ if (F_ISSET(c, CURS_ACQUIRE))
+ return (__bt_first(t, &c->key, ep, &exact));
+
+ /* Get the page referenced by the cursor. */
+ if ((h = mpool_get(t->bt_mp, c->pg.pgno, 0)) == NULL)
+ return (RET_ERROR);
+
+ /*
+ * Find the next/previous record in the tree and point the cursor at
+ * it. The cursor may not be moved until a new key has been found.
+ */
+ switch (flags) {
+ case R_NEXT: /* Next record. */
+ /*
+ * The cursor was deleted in duplicate records, and moved
+ * forward to a record that has yet to be returned. Clear
+ * that flag, and return the record.
+ */
+ if (F_ISSET(c, CURS_AFTER))
+ goto usecurrent;
+ index = c->pg.index;
+ if (++index == NEXTINDEX(h)) {
+ pg = h->nextpg;
+ mpool_put(t->bt_mp, h, 0);
+ if (pg == P_INVALID)
+ return (RET_SPECIAL);
+ if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
+ return (RET_ERROR);
+ index = 0;
+ }
+ break;
+ case R_PREV: /* Previous record. */
+ /*
+ * The cursor was deleted in duplicate records, and moved
+ * backward to a record that has yet to be returned. Clear
+ * that flag, and return the record.
+ */
+ if (F_ISSET(c, CURS_BEFORE)) {
+usecurrent: F_CLR(c, CURS_AFTER | CURS_BEFORE);
+ ep->page = h;
+ ep->index = c->pg.index;
+ return (RET_SUCCESS);
+ }
+ index = c->pg.index;
+ if (index == 0) {
+ pg = h->prevpg;
+ mpool_put(t->bt_mp, h, 0);
+ if (pg == P_INVALID)
+ return (RET_SPECIAL);
+ if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
+ return (RET_ERROR);
+ index = NEXTINDEX(h) - 1;
+ } else
+ --index;
+ break;
+ }
+
+ ep->page = h;
+ ep->index = index;
+ return (RET_SUCCESS);
+}
+
+/*
+ * __bt_first --
+ * Find the first entry.
+ *
+ * Parameters:
+ * t: the tree
+ * key: the key
+ * erval: return EPG
+ * exactp: pointer to exact match flag
+ *
+ * Returns:
+ * The first entry in the tree greater than or equal to key,
+ * or RET_SPECIAL if no such key exists.
+ */
+static int
+__bt_first(t, key, erval, exactp)
+ BTREE *t;
+ const DBT *key;
+ EPG *erval;
+ int *exactp;
+{
+ PAGE *h;
+ EPG *ep, save;
+ pgno_t pg;
+
+ /*
+ * Find any matching record; __bt_search pins the page.
+ *
+ * If it's an exact match and duplicates are possible, walk backwards
+ * in the tree until we find the first one. Otherwise, make sure it's
+ * a valid key (__bt_search may return an index just past the end of a
+ * page) and return it.
+ */
+ if ((ep = __bt_search(t, key, exactp)) == NULL)
+ return (0);
+ if (*exactp) {
+ if (F_ISSET(t, B_NODUPS)) {
+ *erval = *ep;
+ return (RET_SUCCESS);
+ }
+
+ /*
+ * Walk backwards, as long as the entry matches and there are
+ * keys left in the tree. Save a copy of each match in case
+ * we go too far.
+ */
+ save = *ep;
+ h = ep->page;
+ do {
+ if (save.page->pgno != ep->page->pgno) {
+ mpool_put(t->bt_mp, save.page, 0);
+ save = *ep;
+ } else
+ save.index = ep->index;
+
+ /*
+ * Don't unpin the page the last (or original) match
+ * was on, but make sure it's unpinned if an error
+ * occurs.
+ */
+ if (ep->index == 0) {
+ PAGE *hprev;
+ if (h->prevpg == P_INVALID)
+ break;
+ if (h->pgno != save.page->pgno)
+ mpool_put(t->bt_mp, h, 0);
+ if ((hprev = mpool_get(t->bt_mp,
+ h->prevpg, 0)) == NULL) {
+ if (h->pgno == save.page->pgno)
+ mpool_put(t->bt_mp,
+ save.page, 0);
+ return (RET_ERROR);
+ }
+ ep->page = h = hprev;
+ ep->index = NEXTINDEX(h);
+ }
+ --ep->index;
+ } while (__bt_cmp(t, key, ep) == 0);
+
+ /*
+ * Reach here with the last page that was looked at pinned,
+ * which may or may not be the same as the last (or original)
+ * match page. If it's not useful, release it.
+ */
+ if (h->pgno != save.page->pgno)
+ mpool_put(t->bt_mp, h, 0);
+
+ *erval = save;
+ return (RET_SUCCESS);
+ }
+
+ /* If at the end of a page, find the next entry. */
+ if (ep->index == NEXTINDEX(ep->page)) {
+ h = ep->page;
+ pg = h->nextpg;
+ mpool_put(t->bt_mp, h, 0);
+ if (pg == P_INVALID)
+ return (RET_SPECIAL);
+ if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
+ return (RET_ERROR);
+ ep->index = 0;
+ ep->page = h;
+ }
+ *erval = *ep;
+ return (RET_SUCCESS);
+}
+
+/*
+ * __bt_setcur --
+ * Set the cursor to an entry in the tree.
+ *
+ * Parameters:
+ * t: the tree
+ * pgno: page number
+ * index: page index
+ */
+void
+__bt_setcur(t, pgno, index)
+ BTREE *t;
+ pgno_t pgno;
+ u_int index;
+{
+ /* Lose any already deleted key. */
+ if (t->bt_cursor.key.data != NULL) {
+ free(t->bt_cursor.key.data);
+ t->bt_cursor.key.size = 0;
+ t->bt_cursor.key.data = NULL;
+ }
+ F_CLR(&t->bt_cursor, CURS_ACQUIRE | CURS_AFTER | CURS_BEFORE);
+
+ /* Update the cursor. */
+ t->bt_cursor.pg.pgno = pgno;
+ t->bt_cursor.pg.index = index;
+ F_SET(&t->bt_cursor, CURS_INIT);
+}
+++ /dev/null
-./bt_split.c
\ No newline at end of file
--- /dev/null
+/*-
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)bt_split.c 8.9 (Berkeley) 7/26/94";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/db/btree/bt_split.c,v 1.7 2004/09/13 22:07:24 kuriyama Exp $");
+
+#include <sys/types.h>
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <db.h>
+#include "btree.h"
+
+static int bt_broot(BTREE *, PAGE *, PAGE *, PAGE *);
+static PAGE *bt_page (BTREE *, PAGE *, PAGE **, PAGE **, indx_t *, size_t);
+static int bt_preserve(BTREE *, pgno_t);
+static PAGE *bt_psplit (BTREE *, PAGE *, PAGE *, PAGE *, indx_t *, size_t);
+static PAGE *bt_root (BTREE *, PAGE *, PAGE **, PAGE **, indx_t *, size_t);
+static int bt_rroot(BTREE *, PAGE *, PAGE *, PAGE *);
+static recno_t rec_total(PAGE *);
+
+#ifdef STATISTICS
+u_long bt_rootsplit, bt_split, bt_sortsplit, bt_pfxsaved;
+#endif
+
+/*
+ * __BT_SPLIT -- Split the tree.
+ *
+ * Parameters:
+ * t: tree
+ * sp: page to split
+ * key: key to insert
+ * data: data to insert
+ * flags: BIGKEY/BIGDATA flags
+ * ilen: insert length
+ * skip: index to leave open
+ *
+ * Returns:
+ * RET_ERROR, RET_SUCCESS
+ */
+int
+__bt_split(t, sp, key, data, flags, ilen, argskip)
+ BTREE *t;
+ PAGE *sp;
+ const DBT *key, *data;
+ int flags;
+ size_t ilen;
+ u_int32_t argskip;
+{
+ BINTERNAL *bi;
+ BLEAF *bl, *tbl;
+ DBT a, b;
+ EPGNO *parent;
+ PAGE *h, *l, *r, *lchild, *rchild;
+ indx_t nxtindex;
+ u_int16_t skip;
+ u_int32_t n, nbytes, nksize;
+ int parentsplit;
+ char *dest;
+
+ /*
+ * Split the page into two pages, l and r. The split routines return
+ * a pointer to the page into which the key should be inserted and with
+ * skip set to the offset which should be used. Additionally, l and r
+ * are pinned.
+ */
+ skip = argskip;
+ h = sp->pgno == P_ROOT ?
+ bt_root(t, sp, &l, &r, &skip, ilen) :
+ bt_page(t, sp, &l, &r, &skip, ilen);
+ if (h == NULL)
+ return (RET_ERROR);
+
+ /*
+ * Insert the new key/data pair into the leaf page. (Key inserts
+ * always cause a leaf page to split first.)
+ */
+ h->linp[skip] = h->upper -= ilen;
+ dest = (char *)h + h->upper;
+ if (F_ISSET(t, R_RECNO))
+ WR_RLEAF(dest, data, flags)
+ else
+ WR_BLEAF(dest, key, data, flags)
+
+ /* If the root page was split, make it look right. */
+ if (sp->pgno == P_ROOT &&
+ (F_ISSET(t, R_RECNO) ?
+ bt_rroot(t, sp, l, r) : bt_broot(t, sp, l, r)) == RET_ERROR)
+ goto err2;
+
+ /*
+ * Now we walk the parent page stack -- a LIFO stack of the pages that
+ * were traversed when we searched for the page that split. Each stack
+ * entry is a page number and a page index offset. The offset is for
+ * the page traversed on the search. We've just split a page, so we
+ * have to insert a new key into the parent page.
+ *
+ * If the insert into the parent page causes it to split, may have to
+ * continue splitting all the way up the tree. We stop if the root
+ * splits or the page inserted into didn't have to split to hold the
+ * new key. Some algorithms replace the key for the old page as well
+ * as the new page. We don't, as there's no reason to believe that the
+ * first key on the old page is any better than the key we have, and,
+ * in the case of a key being placed at index 0 causing the split, the
+ * key is unavailable.
+ *
+ * There are a maximum of 5 pages pinned at any time. We keep the left
+ * and right pages pinned while working on the parent. The 5 are the
+ * two children, left parent and right parent (when the parent splits)
+ * and the root page or the overflow key page when calling bt_preserve.
+ * This code must make sure that all pins are released other than the
+ * root page or overflow page which is unlocked elsewhere.
+ */
+ while ((parent = BT_POP(t)) != NULL) {
+ lchild = l;
+ rchild = r;
+
+ /* Get the parent page. */
+ if ((h = mpool_get(t->bt_mp, parent->pgno, 0)) == NULL)
+ goto err2;
+
+ /*
+ * The new key goes ONE AFTER the index, because the split
+ * was to the right.
+ */
+ skip = parent->index + 1;
+
+ /*
+ * Calculate the space needed on the parent page.
+ *
+ * Prefix trees: space hack when inserting into BINTERNAL
+ * pages. Retain only what's needed to distinguish between
+ * the new entry and the LAST entry on the page to its left.
+ * If the keys compare equal, retain the entire key. Note,
+ * we don't touch overflow keys, and the entire key must be
+ * retained for the next-to-left most key on the leftmost
+ * page of each level, or the search will fail. Applicable
+ * ONLY to internal pages that have leaf pages as children.
+ * Further reduction of the key between pairs of internal
+ * pages loses too much information.
+ */
+ switch (rchild->flags & P_TYPE) {
+ case P_BINTERNAL:
+ bi = GETBINTERNAL(rchild, 0);
+ nbytes = NBINTERNAL(bi->ksize);
+ break;
+ case P_BLEAF:
+ bl = GETBLEAF(rchild, 0);
+ nbytes = NBINTERNAL(bl->ksize);
+ if (t->bt_pfx && !(bl->flags & P_BIGKEY) &&
+ (h->prevpg != P_INVALID || skip > 1)) {
+ tbl = GETBLEAF(lchild, NEXTINDEX(lchild) - 1);
+ a.size = tbl->ksize;
+ a.data = tbl->bytes;
+ b.size = bl->ksize;
+ b.data = bl->bytes;
+ nksize = t->bt_pfx(&a, &b);
+ n = NBINTERNAL(nksize);
+ if (n < nbytes) {
+#ifdef STATISTICS
+ bt_pfxsaved += nbytes - n;
+#endif
+ nbytes = n;
+ } else
+ nksize = 0;
+ } else
+ nksize = 0;
+ break;
+ case P_RINTERNAL:
+ case P_RLEAF:
+ nbytes = NRINTERNAL;
+ break;
+ default:
+ LIBC_ABORT("illegal rchild->flags & P_TYPE (0x%x)", rchild->flags & P_TYPE);
+ }
+
+ /* Split the parent page if necessary or shift the indices. */
+ if (h->upper - h->lower < nbytes + sizeof(indx_t)) {
+ sp = h;
+ h = h->pgno == P_ROOT ?
+ bt_root(t, h, &l, &r, &skip, nbytes) :
+ bt_page(t, h, &l, &r, &skip, nbytes);
+ if (h == NULL)
+ goto err1;
+ parentsplit = 1;
+ } else {
+ if (skip < (nxtindex = NEXTINDEX(h)))
+ memmove(h->linp + skip + 1, h->linp + skip,
+ (nxtindex - skip) * sizeof(indx_t));
+ h->lower += sizeof(indx_t);
+ parentsplit = 0;
+ }
+
+ /* Insert the key into the parent page. */
+ switch (rchild->flags & P_TYPE) {
+ case P_BINTERNAL:
+ h->linp[skip] = h->upper -= nbytes;
+ dest = (char *)h + h->linp[skip];
+ memmove(dest, bi, nbytes);
+ ((BINTERNAL *)dest)->pgno = rchild->pgno;
+ break;
+ case P_BLEAF:
+ h->linp[skip] = h->upper -= nbytes;
+ dest = (char *)h + h->linp[skip];
+ WR_BINTERNAL(dest, nksize ? nksize : bl->ksize,
+ rchild->pgno, bl->flags & P_BIGKEY);
+ memmove(dest, bl->bytes, nksize ? nksize : bl->ksize);
+ if (bl->flags & P_BIGKEY &&
+ bt_preserve(t, *(pgno_t *)bl->bytes) == RET_ERROR)
+ goto err1;
+ break;
+ case P_RINTERNAL:
+ /*
+ * Update the left page count. If split
+ * added at index 0, fix the correct page.
+ */
+ if (skip > 0)
+ dest = (char *)h + h->linp[skip - 1];
+ else
+ dest = (char *)l + l->linp[NEXTINDEX(l) - 1];
+ ((RINTERNAL *)dest)->nrecs = rec_total(lchild);
+ ((RINTERNAL *)dest)->pgno = lchild->pgno;
+
+ /* Update the right page count. */
+ h->linp[skip] = h->upper -= nbytes;
+ dest = (char *)h + h->linp[skip];
+ ((RINTERNAL *)dest)->nrecs = rec_total(rchild);
+ ((RINTERNAL *)dest)->pgno = rchild->pgno;
+ break;
+ case P_RLEAF:
+ /*
+ * Update the left page count. If split
+ * added at index 0, fix the correct page.
+ */
+ if (skip > 0)
+ dest = (char *)h + h->linp[skip - 1];
+ else
+ dest = (char *)l + l->linp[NEXTINDEX(l) - 1];
+ ((RINTERNAL *)dest)->nrecs = NEXTINDEX(lchild);
+ ((RINTERNAL *)dest)->pgno = lchild->pgno;
+
+ /* Update the right page count. */
+ h->linp[skip] = h->upper -= nbytes;
+ dest = (char *)h + h->linp[skip];
+ ((RINTERNAL *)dest)->nrecs = NEXTINDEX(rchild);
+ ((RINTERNAL *)dest)->pgno = rchild->pgno;
+ break;
+ default:
+ LIBC_ABORT("illegal rchild->flags & P_TYPE (0x%x)", rchild->flags & P_TYPE);
+ }
+
+ /* Unpin the held pages. */
+ if (!parentsplit) {
+ mpool_put(t->bt_mp, h, MPOOL_DIRTY);
+ break;
+ }
+
+ /* If the root page was split, make it look right. */
+ if (sp->pgno == P_ROOT &&
+ (F_ISSET(t, R_RECNO) ?
+ bt_rroot(t, sp, l, r) : bt_broot(t, sp, l, r)) == RET_ERROR)
+ goto err1;
+
+ mpool_put(t->bt_mp, lchild, MPOOL_DIRTY);
+ mpool_put(t->bt_mp, rchild, MPOOL_DIRTY);
+ }
+
+ /* Unpin the held pages. */
+ mpool_put(t->bt_mp, l, MPOOL_DIRTY);
+ mpool_put(t->bt_mp, r, MPOOL_DIRTY);
+
+ /* Clear any pages left on the stack. */
+ return (RET_SUCCESS);
+
+ /*
+ * If something fails in the above loop we were already walking back
+ * up the tree and the tree is now inconsistent. Nothing much we can
+ * do about it but release any memory we're holding.
+ */
+err1: mpool_put(t->bt_mp, lchild, MPOOL_DIRTY);
+ mpool_put(t->bt_mp, rchild, MPOOL_DIRTY);
+
+err2: mpool_put(t->bt_mp, l, 0);
+ mpool_put(t->bt_mp, r, 0);
+ __dbpanic(t->bt_dbp);
+ return (RET_ERROR);
+}
+
+/*
+ * BT_PAGE -- Split a non-root page of a btree.
+ *
+ * Parameters:
+ * t: tree
+ * h: root page
+ * lp: pointer to left page pointer
+ * rp: pointer to right page pointer
+ * skip: pointer to index to leave open
+ * ilen: insert length
+ *
+ * Returns:
+ * Pointer to page in which to insert or NULL on error.
+ */
+static PAGE *
+bt_page(t, h, lp, rp, skip, ilen)
+ BTREE *t;
+ PAGE *h, **lp, **rp;
+ indx_t *skip;
+ size_t ilen;
+{
+ PAGE *l, *r, *tp;
+ pgno_t npg;
+
+#ifdef STATISTICS
+ ++bt_split;
+#endif
+ /* Put the new right page for the split into place. */
+ if ((r = __bt_new(t, &npg)) == NULL)
+ return (NULL);
+ r->pgno = npg;
+ r->lower = BTDATAOFF;
+ r->upper = t->bt_psize;
+ r->nextpg = h->nextpg;
+ r->prevpg = h->pgno;
+ r->flags = h->flags & P_TYPE;
+
+ /*
+ * If we're splitting the last page on a level because we're appending
+ * a key to it (skip is NEXTINDEX()), it's likely that the data is
+ * sorted. Adding an empty page on the side of the level is less work
+ * and can push the fill factor much higher than normal. If we're
+ * wrong it's no big deal, we'll just do the split the right way next
+ * time. It may look like it's equally easy to do a similar hack for
+ * reverse sorted data, that is, split the tree left, but it's not.
+ * Don't even try.
+ */
+ if (h->nextpg == P_INVALID && *skip == NEXTINDEX(h)) {
+#ifdef STATISTICS
+ ++bt_sortsplit;
+#endif
+ h->nextpg = r->pgno;
+ r->lower = BTDATAOFF + sizeof(indx_t);
+ *skip = 0;
+ *lp = h;
+ *rp = r;
+ return (r);
+ }
+
+ /* Put the new left page for the split into place. */
+ if ((l = (PAGE *)malloc(t->bt_psize)) == NULL) {
+ mpool_put(t->bt_mp, r, 0);
+ return (NULL);
+ }
+#ifdef PURIFY
+ memset(l, 0xff, t->bt_psize);
+#endif
+ l->pgno = h->pgno;
+ l->nextpg = r->pgno;
+ l->prevpg = h->prevpg;
+ l->lower = BTDATAOFF;
+ l->upper = t->bt_psize;
+ l->flags = h->flags & P_TYPE;
+
+ /* Fix up the previous pointer of the page after the split page. */
+ if (h->nextpg != P_INVALID) {
+ if ((tp = mpool_get(t->bt_mp, h->nextpg, 0)) == NULL) {
+ free(l);
+ /* XXX mpool_free(t->bt_mp, r->pgno); */
+ return (NULL);
+ }
+ tp->prevpg = r->pgno;
+ mpool_put(t->bt_mp, tp, MPOOL_DIRTY);
+ }
+
+ /*
+ * Split right. The key/data pairs aren't sorted in the btree page so
+ * it's simpler to copy the data from the split page onto two new pages
+ * instead of copying half the data to the right page and compacting
+ * the left page in place. Since the left page can't change, we have
+ * to swap the original and the allocated left page after the split.
+ */
+ tp = bt_psplit(t, h, l, r, skip, ilen);
+
+ /* Move the new left page onto the old left page. */
+ memmove(h, l, t->bt_psize);
+ if (tp == l)
+ tp = h;
+ free(l);
+
+ *lp = h;
+ *rp = r;
+ return (tp);
+}
+
+/*
+ * BT_ROOT -- Split the root page of a btree.
+ *
+ * Parameters:
+ * t: tree
+ * h: root page
+ * lp: pointer to left page pointer
+ * rp: pointer to right page pointer
+ * skip: pointer to index to leave open
+ * ilen: insert length
+ *
+ * Returns:
+ * Pointer to page in which to insert or NULL on error.
+ */
+static PAGE *
+bt_root(t, h, lp, rp, skip, ilen)
+ BTREE *t;
+ PAGE *h, **lp, **rp;
+ indx_t *skip;
+ size_t ilen;
+{
+ PAGE *l, *r, *tp;
+ pgno_t lnpg, rnpg;
+
+#ifdef STATISTICS
+ ++bt_split;
+ ++bt_rootsplit;
+#endif
+ /* Put the new left and right pages for the split into place. */
+ if ((l = __bt_new(t, &lnpg)) == NULL ||
+ (r = __bt_new(t, &rnpg)) == NULL)
+ return (NULL);
+ l->pgno = lnpg;
+ r->pgno = rnpg;
+ l->nextpg = r->pgno;
+ r->prevpg = l->pgno;
+ l->prevpg = r->nextpg = P_INVALID;
+ l->lower = r->lower = BTDATAOFF;
+ l->upper = r->upper = t->bt_psize;
+ l->flags = r->flags = h->flags & P_TYPE;
+
+ /* Split the root page. */
+ tp = bt_psplit(t, h, l, r, skip, ilen);
+
+ *lp = l;
+ *rp = r;
+ return (tp);
+}
+
+/*
+ * BT_RROOT -- Fix up the recno root page after it has been split.
+ *
+ * Parameters:
+ * t: tree
+ * h: root page
+ * l: left page
+ * r: right page
+ *
+ * Returns:
+ * RET_ERROR, RET_SUCCESS
+ */
+static int
+bt_rroot(t, h, l, r)
+ BTREE *t;
+ PAGE *h, *l, *r;
+{
+ char *dest;
+
+ /* Insert the left and right keys, set the header information. */
+ h->linp[0] = h->upper = t->bt_psize - NRINTERNAL;
+ dest = (char *)h + h->upper;
+ WR_RINTERNAL(dest,
+ l->flags & P_RLEAF ? NEXTINDEX(l) : rec_total(l), l->pgno);
+
+ h->linp[1] = h->upper -= NRINTERNAL;
+ dest = (char *)h + h->upper;
+ WR_RINTERNAL(dest,
+ r->flags & P_RLEAF ? NEXTINDEX(r) : rec_total(r), r->pgno);
+
+ h->lower = BTDATAOFF + 2 * sizeof(indx_t);
+
+ /* Unpin the root page, set to recno internal page. */
+ h->flags &= ~P_TYPE;
+ h->flags |= P_RINTERNAL;
+ mpool_put(t->bt_mp, h, MPOOL_DIRTY);
+
+ return (RET_SUCCESS);
+}
+
+/*
+ * BT_BROOT -- Fix up the btree root page after it has been split.
+ *
+ * Parameters:
+ * t: tree
+ * h: root page
+ * l: left page
+ * r: right page
+ *
+ * Returns:
+ * RET_ERROR, RET_SUCCESS
+ */
+static int
+bt_broot(t, h, l, r)
+ BTREE *t;
+ PAGE *h, *l, *r;
+{
+ BINTERNAL *bi;
+ BLEAF *bl;
+ u_int32_t nbytes;
+ char *dest;
+
+ /*
+ * If the root page was a leaf page, change it into an internal page.
+ * We copy the key we split on (but not the key's data, in the case of
+ * a leaf page) to the new root page.
+ *
+ * The btree comparison code guarantees that the left-most key on any
+ * level of the tree is never used, so it doesn't need to be filled in.
+ */
+ nbytes = NBINTERNAL(0);
+ h->linp[0] = h->upper = t->bt_psize - nbytes;
+ dest = (char *)h + h->upper;
+ WR_BINTERNAL(dest, 0, l->pgno, 0);
+
+ switch (h->flags & P_TYPE) {
+ case P_BLEAF:
+ bl = GETBLEAF(r, 0);
+ nbytes = NBINTERNAL(bl->ksize);
+ h->linp[1] = h->upper -= nbytes;
+ dest = (char *)h + h->upper;
+ WR_BINTERNAL(dest, bl->ksize, r->pgno, 0);
+ memmove(dest, bl->bytes, bl->ksize);
+
+ /*
+ * If the key is on an overflow page, mark the overflow chain
+ * so it isn't deleted when the leaf copy of the key is deleted.
+ */
+ if (bl->flags & P_BIGKEY &&
+ bt_preserve(t, *(pgno_t *)bl->bytes) == RET_ERROR)
+ return (RET_ERROR);
+ break;
+ case P_BINTERNAL:
+ bi = GETBINTERNAL(r, 0);
+ nbytes = NBINTERNAL(bi->ksize);
+ h->linp[1] = h->upper -= nbytes;
+ dest = (char *)h + h->upper;
+ memmove(dest, bi, nbytes);
+ ((BINTERNAL *)dest)->pgno = r->pgno;
+ break;
+ default:
+ LIBC_ABORT("illegal h->flags & P_TYPE (0x%x)", h->flags & P_TYPE);
+ }
+
+ /* There are two keys on the page. */
+ h->lower = BTDATAOFF + 2 * sizeof(indx_t);
+
+ /* Unpin the root page, set to btree internal page. */
+ h->flags &= ~P_TYPE;
+ h->flags |= P_BINTERNAL;
+ mpool_put(t->bt_mp, h, MPOOL_DIRTY);
+
+ return (RET_SUCCESS);
+}
+
+/*
+ * BT_PSPLIT -- Do the real work of splitting the page.
+ *
+ * Parameters:
+ * t: tree
+ * h: page to be split
+ * l: page to put lower half of data
+ * r: page to put upper half of data
+ * pskip: pointer to index to leave open
+ * ilen: insert length
+ *
+ * Returns:
+ * Pointer to page in which to insert.
+ */
+static PAGE *
+bt_psplit(t, h, l, r, pskip, ilen)
+ BTREE *t;
+ PAGE *h, *l, *r;
+ indx_t *pskip;
+ size_t ilen;
+{
+ BINTERNAL *bi;
+ BLEAF *bl;
+ CURSOR *c;
+ RLEAF *rl;
+ PAGE *rval;
+ void *src;
+ indx_t full, half, nxt, off, skip, top, used;
+ u_int32_t nbytes;
+ int bigkeycnt, isbigkey;
+
+ /*
+ * Split the data to the left and right pages. Leave the skip index
+ * open. Additionally, make some effort not to split on an overflow
+ * key. This makes internal page processing faster and can save
+ * space as overflow keys used by internal pages are never deleted.
+ */
+ bigkeycnt = 0;
+ skip = *pskip;
+ full = t->bt_psize - BTDATAOFF;
+ half = full / 2;
+ used = 0;
+ for (nxt = off = 0, top = NEXTINDEX(h); nxt < top; ++off) {
+ if (skip == off) {
+ nbytes = ilen;
+ isbigkey = 0; /* XXX: not really known. */
+ } else
+ switch (h->flags & P_TYPE) {
+ case P_BINTERNAL:
+ src = bi = GETBINTERNAL(h, nxt);
+ nbytes = NBINTERNAL(bi->ksize);
+ isbigkey = bi->flags & P_BIGKEY;
+ break;
+ case P_BLEAF:
+ src = bl = GETBLEAF(h, nxt);
+ nbytes = NBLEAF(bl);
+ isbigkey = bl->flags & P_BIGKEY;
+ break;
+ case P_RINTERNAL:
+ src = GETRINTERNAL(h, nxt);
+ nbytes = NRINTERNAL;
+ isbigkey = 0;
+ break;
+ case P_RLEAF:
+ src = rl = GETRLEAF(h, nxt);
+ nbytes = NRLEAF(rl);
+ isbigkey = 0;
+ break;
+ default:
+ LIBC_ABORT("illegal h->flags & P_TYPE (0x%x)", h->flags & P_TYPE);
+ }
+
+ /*
+ * If the key/data pairs are substantial fractions of the max
+ * possible size for the page, it's possible to get situations
+ * where we decide to try and copy too much onto the left page.
+ * Make sure that doesn't happen.
+ */
+ if ((skip <= off && used + nbytes + sizeof(indx_t) >= full)
+ || nxt == top - 1) {
+ --off;
+ break;
+ }
+
+ /* Copy the key/data pair, if not the skipped index. */
+ if (skip != off) {
+ ++nxt;
+
+ l->linp[off] = l->upper -= nbytes;
+ memmove((char *)l + l->upper, src, nbytes);
+ }
+
+ used += nbytes + sizeof(indx_t);
+ if (used >= half) {
+ if (!isbigkey || bigkeycnt == 3)
+ break;
+ else
+ ++bigkeycnt;
+ }
+ }
+
+ /*
+ * Off is the last offset that's valid for the left page.
+ * Nxt is the first offset to be placed on the right page.
+ */
+ l->lower += (off + 1) * sizeof(indx_t);
+
+ /*
+ * If splitting the page that the cursor was on, the cursor has to be
+ * adjusted to point to the same record as before the split. If the
+ * cursor is at or past the skipped slot, the cursor is incremented by
+ * one. If the cursor is on the right page, it is decremented by the
+ * number of records split to the left page.
+ */
+ c = &t->bt_cursor;
+ if (F_ISSET(c, CURS_INIT) && c->pg.pgno == h->pgno) {
+ if (c->pg.index >= skip)
+ ++c->pg.index;
+ if (c->pg.index < nxt) /* Left page. */
+ c->pg.pgno = l->pgno;
+ else { /* Right page. */
+ c->pg.pgno = r->pgno;
+ c->pg.index -= nxt;
+ }
+ }
+
+ /*
+ * If the skipped index was on the left page, just return that page.
+ * Otherwise, adjust the skip index to reflect the new position on
+ * the right page.
+ */
+ if (skip <= off) {
+ skip = MAX_PAGE_OFFSET;
+ rval = l;
+ } else {
+ rval = r;
+ *pskip -= nxt;
+ }
+
+ for (off = 0; nxt < top; ++off) {
+ if (skip == nxt) {
+ ++off;
+ skip = MAX_PAGE_OFFSET;
+ }
+ switch (h->flags & P_TYPE) {
+ case P_BINTERNAL:
+ src = bi = GETBINTERNAL(h, nxt);
+ nbytes = NBINTERNAL(bi->ksize);
+ break;
+ case P_BLEAF:
+ src = bl = GETBLEAF(h, nxt);
+ nbytes = NBLEAF(bl);
+ break;
+ case P_RINTERNAL:
+ src = GETRINTERNAL(h, nxt);
+ nbytes = NRINTERNAL;
+ break;
+ case P_RLEAF:
+ src = rl = GETRLEAF(h, nxt);
+ nbytes = NRLEAF(rl);
+ break;
+ default:
+ LIBC_ABORT("illegal h->flags & P_TYPE (0x%x)", h->flags & P_TYPE);
+ }
+ ++nxt;
+ r->linp[off] = r->upper -= nbytes;
+ memmove((char *)r + r->upper, src, nbytes);
+ }
+ r->lower += off * sizeof(indx_t);
+
+ /* If the key is being appended to the page, adjust the index. */
+ if (skip == top)
+ r->lower += sizeof(indx_t);
+
+ return (rval);
+}
+
+/*
+ * BT_PRESERVE -- Mark a chain of pages as used by an internal node.
+ *
+ * Chains of indirect blocks pointed to by leaf nodes get reclaimed when the
+ * record that references them gets deleted. Chains pointed to by internal
+ * pages never get deleted. This routine marks a chain as pointed to by an
+ * internal page.
+ *
+ * Parameters:
+ * t: tree
+ * pg: page number of first page in the chain.
+ *
+ * Returns:
+ * RET_SUCCESS, RET_ERROR.
+ */
+static int
+bt_preserve(t, pg)
+ BTREE *t;
+ pgno_t pg;
+{
+ PAGE *h;
+
+ if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
+ return (RET_ERROR);
+ h->flags |= P_PRESERVE;
+ mpool_put(t->bt_mp, h, MPOOL_DIRTY);
+ return (RET_SUCCESS);
+}
+
+/*
+ * REC_TOTAL -- Return the number of recno entries below a page.
+ *
+ * Parameters:
+ * h: page
+ *
+ * Returns:
+ * The number of recno entries below a page.
+ *
+ * XXX
+ * These values could be set by the bt_psplit routine. The problem is that the
+ * entry has to be popped off of the stack etc. or the values have to be passed
+ * all the way back to bt_split/bt_rroot and it's not very clean.
+ */
+static recno_t
+rec_total(h)
+ PAGE *h;
+{
+ recno_t recs;
+ indx_t nxt, top;
+
+ for (recs = 0, nxt = 0, top = NEXTINDEX(h); nxt < top; ++nxt)
+ recs += GETRINTERNAL(h, nxt)->nrecs;
+ return (recs);
+}
-Index: hash.c
-===================================================================
-RCS file: /cvs/root/Libc/db/hash/FreeBSD/hash.c,v
-retrieving revision 1.3
-diff -u -d -b -w -p -u -r1.3 hash.c
---- hash.c 2004/11/25 19:37:57 1.3
-+++ hash.c 2004/12/10 20:34:43
+--- hash.c.orig 2008-09-07 11:37:54.000000000 -0700
++++ hash.c 2008-09-07 12:42:15.000000000 -0700
@@ -58,7 +58,7 @@ __FBSDID("$FreeBSD: src/lib/libc/db/hash
#include <db.h>
#include "hash.h"
}
if (!(hashp = (HTAB *)calloc(1, sizeof(HTAB))))
+@@ -722,7 +721,7 @@ found:
+ return (ERROR);
+ break;
+ default:
+- abort();
++ LIBC_ABORT("illegal action (%d)", action);
+ }
+ save_bufp->flags &= ~BUF_PIN;
+ return (SUCCESS);
return (ERROR);
break;
default:
- abort();
+ LIBC_ABORT("illegal action (%d)", action);
}
save_bufp->flags &= ~BUF_PIN;
return (SUCCESS);
---- mpool.c.orig 2006-12-13 22:19:43.000000000 -0800
-+++ mpool.c 2006-12-13 22:27:26.000000000 -0800
-@@ -294,10 +294,16 @@
+--- mpool.c.orig 2008-09-07 11:37:54.000000000 -0700
++++ mpool.c 2008-09-07 12:46:41.000000000 -0700
+@@ -128,7 +128,7 @@ mpool_new(mp, pgnoaddr)
+
+ if (mp->npages == MAX_PAGE_NUMBER) {
+ (void)fprintf(stderr, "mpool_new: page allocation overflow.\n");
+- abort();
++ LIBC_ABORT("page allocation overflow");
+ }
+ #ifdef STATISTICS
+ ++mp->pagenew;
+@@ -180,7 +180,7 @@ mpool_get(mp, pgno, flags)
+ if (bp->flags & MPOOL_PINNED) {
+ (void)fprintf(stderr,
+ "mpool_get: page %d already pinned\n", bp->pgno);
+- abort();
++ LIBC_ABORT("page %d already pinned", bp->pgno);
+ }
+ #endif
+ /*
+@@ -253,7 +253,7 @@ mpool_put(mp, page, flags)
+ if (!(bp->flags & MPOOL_PINNED)) {
+ (void)fprintf(stderr,
+ "mpool_put: page %d not pinned\n", bp->pgno);
+- abort();
++ LIBC_ABORT("page %d not pinned", bp->pgno);
+ }
+ #endif
+ bp->flags &= ~MPOOL_PINNED;
+@@ -294,10 +294,16 @@ mpool_sync(mp)
BKT *bp;
/* Walk the lru chain, flushing any dirty pages to disk. */
if (mp->npages == MAX_PAGE_NUMBER) {
(void)fprintf(stderr, "mpool_new: page allocation overflow.\n");
- abort();
+ LIBC_ABORT("page allocation overflow");
}
#ifdef STATISTICS
++mp->pagenew;
if (bp->flags & MPOOL_PINNED) {
(void)fprintf(stderr,
"mpool_get: page %d already pinned\n", bp->pgno);
- abort();
+ LIBC_ABORT("page %d already pinned", bp->pgno);
}
#endif
/*
if (!(bp->flags & MPOOL_PINNED)) {
(void)fprintf(stderr,
"mpool_put: page %d not pinned\n", bp->pgno);
- abort();
+ LIBC_ABORT("page %d not pinned", bp->pgno);
}
#endif
bp->flags &= ~MPOOL_PINNED;
.for _src in ${FBSDMISRCS}
CFLAGS-${_src:R}-fbsd.${_src:E} += -D__DBINTERFACE_PRIVATE
.endfor
+CFLAGS-rec_open-fbsd.c += -D_DARWIN_UNLIMITED_STREAMS
FBSDHDRS= recno.h
.include "Makefile.fbsd_end"
/*
* tcgetsid.c
*/
-#define COMPAT_43_TTY 1
#include <sys/ioctl.h>
#include <termios.h>
+#include <sys/ioctl_compat.h> /* ordering avoid termios.h redeclarations */
pid_t
tcgetsid(int fildes)
#define __waitpid waitpid
#define _GENERIC_DIRSIZ(dp) \
- ((sizeof (struct dirent) - (MAXNAMLEN+1)) + (((dp)->d_namlen+1 + 3) &~ 3))
+ (((unsigned long)&((struct dirent *)0)->d_name + (dp)->d_namlen+1 + 3) & ~3)
#endif /* __FBSD_COMPAT__H_ */
/*
- * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2003, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#define LDBL_HEAD_TAIL_PAIR
-__private_extern__ void _ldbl2array32dd(union IEEEl2bits, uint32_t *);
+__private_extern__ int _ldbl2array32dd(union IEEEl2bits, uint32_t *);
#define LDBL_TO_ARRAY32(u, a) _ldbl2array32dd(u, a)
---- _hdtoa.c.orig 2008-03-15 10:50:51.000000000 -0700
-+++ _hdtoa.c 2008-03-27 00:55:34.000000000 -0700
+--- _hdtoa.c.orig 2008-09-07 11:38:10.000000000 -0700
++++ _hdtoa.c 2008-09-07 12:49:47.000000000 -0700
@@ -55,7 +55,7 @@ roundup(char *s0, int ndigits)
*s = 1;
return (1);
}
++*s;
return (0);
+@@ -126,12 +126,12 @@ __hdtoa(double d, const char *xdigs, int
+ static const int sigfigs = (DBL_MANT_DIG + 3) / 4;
+ union IEEEd2bits u;
+ char *s, *s0;
+- int bufsize;
++ int bufsize, f;
+
+ u.d = d;
+ *sign = u.bits.sign;
+
+- switch (fpclassify(d)) {
++ switch (f = fpclassify(d)) {
+ case FP_NORMAL:
+ *decpt = u.bits.exp - DBL_ADJ;
+ break;
+@@ -149,7 +149,7 @@ __hdtoa(double d, const char *xdigs, int
+ *decpt = INT_MAX;
+ return (nrv_alloc(NANSTR, rve, sizeof(NANSTR) - 1));
+ default:
+- abort();
++ LIBC_ABORT("fpclassify returned %d", f);
+ }
+
+ /* FP_NORMAL or FP_SUBNORMAL */
@@ -210,6 +210,7 @@ __hdtoa(double d, const char *xdigs, int
return (s0);
}
#if (LDBL_MANT_DIG > DBL_MANT_DIG)
/*
-@@ -223,12 +224,17 @@ __hldtoa(long double e, const char *xdig
+@@ -222,13 +223,18 @@ __hldtoa(long double e, const char *xdig
+ static const int sigfigs = (LDBL_MANT_DIG + 3) / 4;
union IEEEl2bits u;
char *s, *s0;
- int bufsize;
+- int bufsize;
++ int bufsize, f;
+#ifdef LDBL_HEAD_TAIL_PAIR
+ uint32_t bits[4];
+ int i, pos;
u.e = e;
*sign = u.bits.sign;
- switch (fpclassify(e)) {
+- switch (fpclassify(e)) {
++ switch (f = fpclassify(e)) {
case FP_NORMAL:
+ case FP_SUPERNORMAL:
*decpt = u.bits.exp - LDBL_ADJ;
break;
case FP_ZERO:
+@@ -245,7 +251,7 @@ __hldtoa(long double e, const char *xdig
+ *decpt = INT_MAX;
+ return (nrv_alloc(NANSTR, rve, sizeof(NANSTR) - 1));
+ default:
+- abort();
++ LIBC_ABORT("fpclassify returned %d", f);
+ }
+
+ /* FP_NORMAL or FP_SUBNORMAL */
@@ -270,6 +276,19 @@ __hldtoa(long double e, const char *xdig
*/
for (s = s0 + bufsize - 1; s > s0 + sigfigs - 1; s--)
*s = 0;
+#ifdef LDBL_HEAD_TAIL_PAIR
-+ _ldbl2array32dd(u, bits);
++ *decpt -= _ldbl2array32dd(u, bits);
+ i = 0;
+ pos = 8;
+ for (; s > s0; s--) {
---- _ldtoa.c.orig 2004-06-03 15:17:18.000000000 -0700
-+++ _ldtoa.c 2005-10-09 00:09:11.000000000 -0700
-@@ -46,7 +46,7 @@
+--- _ldtoa.c.orig 2008-09-07 11:38:10.000000000 -0700
++++ _ldtoa.c 2008-09-07 12:55:35.000000000 -0700
+@@ -46,7 +46,7 @@ char *
__ldtoa(long double *ld, int mode, int ndigits, int *decpt, int *sign,
char **rve)
{
LDBL_MANT_DIG, /* nbits */
LDBL_MIN_EXP - LDBL_MANT_DIG, /* emin */
LDBL_MAX_EXP - LDBL_MANT_DIG, /* emax */
-@@ -61,28 +61,55 @@
+@@ -61,28 +61,57 @@ __ldtoa(long double *ld, int mode, int n
char *ret;
union IEEEl2bits u;
uint32_t bits[(LDBL_MANT_DIG + 31) / 32];
+#ifdef Honor_FLT_ROUNDS
+ int rounding = Flt_Rounds;
+#endif
-+#if defined(__ppc__) || defined(__ppc64__)
+ int type;
-+#endif /* defined(__ppc__) || defined(__ppc64__) */
u.e = *ld;
+#if defined(__ppc__) || defined(__ppc64__)
+ type = FP_SUBNORMAL;
+ if (type == FP_SUBNORMAL)
+ u.e *= 1.0e32L;
++#else /* !defined(__ppc__) && !defined(__ppc64__) */
++ type = fpclassify(u.e);
+#endif /* defined(__ppc__) || defined(__ppc64__) */
*sign = u.bits.sign;
be = u.bits.exp - (LDBL_MAX_EXP - 1) - (LDBL_MANT_DIG - 1);
++#if defined(__ppc__) || defined(__ppc64__)
++ be -= LDBL_TO_ARRAY32(u, bits);
++#else /* !defined(__ppc__) && !defined(__ppc64__) */
LDBL_TO_ARRAY32(u, bits);
++#endif /* defined(__ppc__) || defined(__ppc64__) */
-+#if defined(__ppc__) || defined(__ppc64__)
+- switch (fpclassify(u.e)) {
+ switch (type) {
++#if defined(__ppc__) || defined(__ppc64__)
+ case FP_SUBNORMAL:
-+#else /* !defined(__ppc__) && !defined(__ppc64__) */
- switch (fpclassify(u.e)) {
+#endif /* defined(__ppc__) || defined(__ppc64__) */
case FP_NORMAL:
+ case FP_SUPERNORMAL:
case FP_INFINITE:
kind = STRTOG_Infinite;
break;
-@@ -93,8 +120,19 @@
- abort();
+@@ -90,11 +119,22 @@ __ldtoa(long double *ld, int mode, int n
+ kind = STRTOG_NaN;
+ break;
+ default:
+- abort();
++ LIBC_ABORT("fpclassify returned %d", type);
}
- ret = gdtoa(&fpi, be, (ULong *)bits, &kind, mode, ndigits, decpt, rve);
*/
#ifdef Honor_FLT_ROUNDS
-#define Rounding rounding
#undef Check_FLT_ROUNDS
#define Check_FLT_ROUNDS
#else
Bigint *b, *b1, *delta, *mlo, *mhi, *S;
double d2, ds, eps;
char *s, *s0;
-#ifdef Honor_FLT_ROUNDS
- int rounding;
-#endif
#ifdef SET_INEXACT
int inexact, oldinexact;
#endif
+#ifdef Honor_FLT_ROUNDS /*{*/
+ int Rounding;
+#ifdef Trust_FLT_ROUNDS /*{{ only define this if FLT_ROUNDS really works! */
+ Rounding = Flt_Rounds;
+#else /*}{*/
+ Rounding = 1;
+ switch(fegetround()) {
+ case FE_TOWARDZERO: Rounding = 0; break;
+ case FE_UPWARD: Rounding = 2; break;
+ case FE_DOWNWARD: Rounding = 3;
+ }
+#endif /*}}*/
+#endif /*}*/
#ifndef MULTIPLE_THREADS
if (dtoa_result) {
inexact = 1;
#endif
#ifdef Honor_FLT_ROUNDS
- if ((rounding = Flt_Rounds) >= 2) {
+ if (Rounding >= 2) {
if (*sign)
- rounding = rounding == 2 ? 0 : 2;
+ Rounding = Rounding == 2 ? 0 : 2;
else
- if (rounding != 2)
- rounding = 0;
+ if (Rounding != 2)
+ Rounding = 0;
}
#endif
s = s0 = rv_alloc(i);
#ifdef Honor_FLT_ROUNDS
- if (mode > 1 && rounding != 1)
+ if (mode > 1 && Rounding != 1)
leftright = 0;
#endif
if (i == ilim) {
#ifdef Honor_FLT_ROUNDS
if (mode > 1)
- switch(rounding) {
+ switch(Rounding) {
case 0: goto ret1;
case 2: goto bump_up;
}
spec_case = 0;
if ((mode < 2 || leftright)
#ifdef Honor_FLT_ROUNDS
- && rounding == 1
+ && Rounding == 1
#endif
) {
if (!word1(d) && !(word0(d) & Bndry_mask)
#ifndef ROUND_BIASED
if (j1 == 0 && mode != 1 && !(word1(d) & 1)
#ifdef Honor_FLT_ROUNDS
- && rounding >= 1
+ && Rounding >= 1
#endif
) {
if (dig == '9')
}
#ifdef Honor_FLT_ROUNDS
if (mode > 1)
- switch(rounding) {
+ switch(Rounding) {
case 0: goto accept_dig;
case 2: goto keep_dig;
}
}
if (j1 > 0) {
#ifdef Honor_FLT_ROUNDS
- if (!rounding)
+ if (!Rounding)
goto accept_dig;
#endif
if (dig == '9') { /* possible if i == 1 */
/* Round off last digit */
#ifdef Honor_FLT_ROUNDS
- switch(rounding) {
+ switch(Rounding) {
case 0: goto trimzeros;
case 2: goto roundoff;
}
if (dval(d) > ds + dval(eps))
goto bump_up;
else if (dval(d) < ds - dval(eps)) {
- while(*--s == '0'){}
- s++;
if (dval(d))
inex = STRTOG_Inexlo;
- goto ret1;
+ goto clear_trailing0;
}
break;
}
}
++*s++;
}
- else
+ else {
inex = STRTOG_Inexlo;
+ clear_trailing0:
+ while(*--s == '0'){}
+ ++s;
+ }
break;
}
}
if (b->wds > 1 || b->x[0])
inex = STRTOG_Inexlo;
while(*--s == '0'){}
- s++;
+ ++s;
}
ret:
Bfree(S);
+++ /dev/null
---- gdtoa-gdtoa.c.orig 2007-03-26 22:44:56.000000000 -0700
-+++ gdtoa-gdtoa.c 2007-03-26 23:21:08.000000000 -0700
-@@ -479,8 +479,11 @@
- }
- ++*s++;
- }
-- else
-+ else {
- inex = STRTOG_Inexlo;
-+ while(*--s == '0'){}
-+ s++;
-+ }
- break;
- }
- }
{
Bigint *b;
CONST unsigned char *decpt, *s0, *s, *s1;
- int esign, havedig, irv, k, n, nbits, up, zret;
+ int big, esign, havedig, irv, j, k, n, n0, nbits, up, zret;
ULong L, lostbits, *x;
Long e, e1;
#ifdef USE_LOCALE
- unsigned char decimalpoint = *localeconv()->decimal_point;
+ int i;
+#ifdef NO_LOCALE_CACHE
+ const unsigned char *decimalpoint = (unsigned char*)localeconv()->decimal_point;
#else
-#define decimalpoint '.'
+ const unsigned char *decimalpoint;
+ static unsigned char *decimalpoint_cache;
+ if (!(s0 = decimalpoint_cache)) {
+ s0 = (unsigned char*)localeconv()->decimal_point;
+ if ((decimalpoint_cache = (char*)malloc(strlen(s0) + 1))) {
+ strcpy(decimalpoint_cache, s0);
+ s0 = decimalpoint_cache;
+ }
+ }
+ decimalpoint = s0;
+#endif
#endif
if (!hexdig['0'])
hexdig_init_D2A();
+ *bp = 0;
havedig = 0;
s0 = *(CONST unsigned char **)sp + 2;
while(s0[havedig] == '0')
decpt = 0;
zret = 0;
e = 0;
- if (!hexdig[*s]) {
+ if (hexdig[*s])
+ havedig++;
+ else {
zret = 1;
- if (*s != decimalpoint)
+#ifdef USE_LOCALE
+ for(i = 0; decimalpoint[i]; ++i) {
+ if (s[i] != decimalpoint[i])
+ goto pcheck;
+ }
+ decpt = s += i;
+#else
+ if (*s != '.')
goto pcheck;
decpt = ++s;
+#endif
if (!hexdig[*s])
goto pcheck;
while(*s == '0')
}
while(hexdig[*s])
s++;
- if (*s == decimalpoint && !decpt) {
+#ifdef USE_LOCALE
+ if (*s == *decimalpoint && !decpt) {
+ for(i = 1; decimalpoint[i]; ++i) {
+ if (s[i] != decimalpoint[i])
+ goto pcheck;
+ }
+ decpt = s += i;
+#else
+ if (*s == '.' && !decpt) {
decpt = ++s;
+#endif
while(hexdig[*s])
s++;
- }
+ }/*}*/
if (decpt)
e = -(((Long)(s-decpt)) << 2);
pcheck:
s1 = s;
+ big = esign = 0;
switch(*s) {
case 'p':
case 'P':
- esign = 0;
switch(*++s) {
case '-':
esign = 1;
break;
}
e1 = n - 0x10;
- while((n = hexdig[*++s]) !=0 && n <= 0x19)
+ while((n = hexdig[*++s]) !=0 && n <= 0x19) {
+ if (e1 & 0xf8000000)
+ big = 1;
e1 = 10*e1 + n - 0x10;
+ }
if (esign)
e1 = -e1;
e += e1;
}
*sp = (char*)s;
+ if (!havedig)
+ *sp = (char*)s0 - 1;
if (zret)
- return havedig ? STRTOG_Zero : STRTOG_NoNumber;
+ return STRTOG_Zero;
+ if (big) {
+ if (esign) {
+ switch(fpi->rounding) {
+ case FPI_Round_up:
+ if (sign)
+ break;
+ goto ret_tiny;
+ case FPI_Round_down:
+ if (!sign)
+ break;
+ goto ret_tiny;
+ }
+ goto retz;
+ ret_tiny:
+ b = Balloc(0);
+ b->wds = 1;
+ b->x[0] = 1;
+ goto dret;
+ }
+ switch(fpi->rounding) {
+ case FPI_Round_near:
+ goto ovfl1;
+ case FPI_Round_up:
+ if (!sign)
+ goto ovfl1;
+ goto ret_big;
+ case FPI_Round_down:
+ if (sign)
+ goto ovfl1;
+ goto ret_big;
+ }
+ ret_big:
+ nbits = fpi->nbits;
+ n0 = n = nbits >> kshift;
+ if (nbits & kmask)
+ ++n;
+ for(j = n, k = 0; j >>= 1; ++k);
+ *bp = b = Balloc(k);
+ b->wds = n;
+ for(j = 0; j < n0; ++j)
+ b->x[j] = ALL_ON;
+ if (n > n0)
+ b->x[j] = ULbits >> (ULbits - (nbits & kmask));
+ *exp = fpi->emin;
+ return STRTOG_Normal | STRTOG_Inexlo;
+ }
n = s1 - s0 - 1;
for(k = 0; n > 7; n >>= 1)
k++;
x = b->x;
n = 0;
L = 0;
+#ifdef USE_LOCALE
+ for(i = 0; decimalpoint[i+1]; ++i);
+#endif
while(s1 > s0) {
- if (*--s1 == decimalpoint)
+#ifdef USE_LOCALE
+ if (*--s1 == decimalpoint[i]) {
+ s1 -= i;
continue;
+ }
+#else
+ if (*--s1 == '.')
+ continue;
+#endif
if (n == 32) {
*x++ = L;
L = 0;
k = n - 1;
if (x[k>>kshift] & 1 << (k & kmask)) {
lostbits = 2;
- if (k > 1 && any_on(b,k-1))
+ if (k > 0 && any_on(b,k))
lostbits = 3;
}
}
if (e > fpi->emax) {
ovfl:
Bfree(b);
- *bp = 0;
+ ovfl1:
+#ifndef NO_ERRNO
+ errno = ERANGE;
+#endif
return STRTOG_Infinite | STRTOG_Overflow | STRTOG_Inexhi;
}
irv = STRTOG_Normal;
case FPI_Round_down:
if (sign) {
one_bit:
- *exp = fpi->emin;
x[0] = b->wds = 1;
+ dret:
*bp = b;
+ *exp = fpi->emin;
+#ifndef NO_ERRNO
+ errno = ERANGE;
+#endif
return STRTOG_Denormal | STRTOG_Inexhi
| STRTOG_Underflow;
}
}
Bfree(b);
- *bp = 0;
+ retz:
+#ifndef NO_ERRNO
+ errno = ERANGE;
+#endif
return STRTOG_Zero | STRTOG_Inexlo | STRTOG_Underflow;
}
k = n - 1;
---- gdtoa-gethex.c.orig 2005-01-20 20:12:36.000000000 -0800
-+++ gdtoa-gethex.c 2005-03-23 15:45:22.000000000 -0800
-@@ -29,6 +29,8 @@
+--- gdtoa-gethex.c.orig 2008-10-28 11:14:40.000000000 -0700
++++ gdtoa-gethex.c 2008-10-28 11:20:32.000000000 -0700
+@@ -29,6 +29,8 @@ THIS SOFTWARE.
/* Please send bug reports to David M. Gay (dmg at acm dot org,
* with " at " changed at "@" and " dot " changed to "."). */
#include "gdtoaimp.h"
#ifdef USE_LOCALE
-@@ -37,10 +39,10 @@
+@@ -37,10 +39,10 @@ THIS SOFTWARE.
int
#ifdef KR_headers
#endif
{
Bigint *b;
-@@ -49,7 +51,13 @@
- ULong L, lostbits, *x;
+@@ -50,13 +52,14 @@ gethex( CONST char **sp, FPI *fpi, Long
Long e, e1;
#ifdef USE_LOCALE
-- unsigned char decimalpoint = *localeconv()->decimal_point;
-+ char *decimalpoint;
-+ unsigned char *decimalpointend = NULL;
-+ int decimalpointlen;
-+
+ int i;
+ NORMALIZE_LOCALE(loc);
-+ decimalpoint = localeconv_l(loc)->decimal_point;
-+ decimalpointlen = strlen(decimalpoint);
+ #ifdef NO_LOCALE_CACHE
+- const unsigned char *decimalpoint = (unsigned char*)localeconv()->decimal_point;
++ const unsigned char *decimalpoint = (unsigned char*)localeconv_l(loc)->decimal_point;
#else
- #define decimalpoint '.'
- #endif
-@@ -67,9 +75,18 @@
- e = 0;
- if (!hexdig[*s]) {
- zret = 1;
-+#ifdef USE_LOCALE
-+ if (strncmp((char *)s, decimalpoint, decimalpointlen) != 0)
-+#else /* USE_LOCALE */
- if (*s != decimalpoint)
-+#endif /* USE_LOCALE */
- goto pcheck;
-+#ifdef USE_LOCALE
-+ decpt = (s += decimalpointlen);
-+ decimalpointend = s - 1;
-+#else /* USE_LOCALE */
- decpt = ++s;
-+#endif /* USE_LOCALE */
- if (!hexdig[*s])
- goto pcheck;
- while(*s == '0')
-@@ -81,8 +98,18 @@
- }
- while(hexdig[*s])
- s++;
-- if (*s == decimalpoint && !decpt) {
-+#ifdef USE_LOCALE
-+ if (strncmp((char *)s, decimalpoint, decimalpointlen) == 0 && !decpt)
-+#else /* USE_LOCALE */
-+ if (*s == decimalpoint && !decpt)
-+#endif /* USE_LOCALE */
-+ {
-+#ifdef USE_LOCALE
-+ decpt = (s += decimalpointlen);
-+ decimalpointend = s - 1;
-+#else /* USE_LOCALE */
- decpt = ++s;
-+#endif /* USE_LOCALE */
- while(hexdig[*s])
- s++;
- }
-@@ -123,8 +150,15 @@
- n = 0;
- L = 0;
- while(s1 > s0) {
-+#ifdef USE_LOCALE
-+ if (--s1 == decimalpointend) {
-+ s1 -= decimalpointlen - 1;
-+ continue;
-+ }
-+#else /* USE_LOCALE */
- if (*--s1 == decimalpoint)
- continue;
-+#endif /* USE_LOCALE */
- if (n == 32) {
- *x++ = L;
- L = 0;
+ const unsigned char *decimalpoint;
+ static unsigned char *decimalpoint_cache;
+ if (!(s0 = decimalpoint_cache)) {
+- s0 = (unsigned char*)localeconv()->decimal_point;
++ s0 = (unsigned char*)localeconv_l(loc)->decimal_point;
+ if ((decimalpoint_cache = (char*)malloc(strlen(s0) + 1))) {
+ strcpy(decimalpoint_cache, s0);
+ s0 = decimalpoint_cache;
x1 = xe = x;
havedig = hd0 = i = 0;
s = *sp;
+ /* allow optional initial 0x or 0X */
+ while((c = *(CONST unsigned char*)(s+1)) && c <= ' ')
+ ++s;
+ if (s[1] == '0' && (s[2] == 'x' || s[2] == 'X')
+ && *(CONST unsigned char*)(s+3) > ' ')
+ s += 2;
while(c = *(CONST unsigned char*)++s) {
if (!(h = hexdig[c])) {
if (c <= ' ') {
x1 = x;
i = 0;
}
+ while(*(CONST unsigned char*)(s+1) <= ' ')
+ ++s;
+ if (s[1] == '0' && (s[2] == 'x' || s[2] == 'X')
+ && *(CONST unsigned char*)(s+3) > ' ')
+ s += 2;
continue;
}
if (/*(*/ c == ')' && havedig) {
*sp = s + 1;
break;
}
+#ifndef GDTOA_NON_PEDANTIC_NANCHECK
+ do {
+ if (/*(*/ c == ')') {
+ *sp = s + 1;
+ break;
+ }
+ } while(c = *++s);
+#endif
return STRTOG_NaN;
}
havedig++;
---- gdtoa-hexnan.c.orig 2005-01-20 20:12:36.000000000 -0800
-+++ gdtoa-hexnan.c 2005-06-10 17:43:17.000000000 -0700
-@@ -30,6 +30,7 @@
+--- gdtoa-hexnan.c.orig 2008-03-15 10:08:33.000000000 -0700
++++ gdtoa-hexnan.c 2008-08-30 17:55:23.000000000 -0700
+@@ -30,6 +30,7 @@ THIS SOFTWARE.
* with " at " changed at "@" and " dot " changed to "."). */
#include "gdtoaimp.h"
static void
#ifdef KR_headers
-@@ -57,75 +58,53 @@
+@@ -57,94 +58,53 @@ hexnan(sp, fpi, x0)
hexnan( CONST char **sp, FPI *fpi, ULong *x0)
#endif
{
+ if (sp == NULL || *sp == NULL || **sp != '(')
+ return STRTOG_NaN;
s = *sp;
+- /* allow optional initial 0x or 0X */
+- while((c = *(CONST unsigned char*)(s+1)) && c <= ' ')
+- ++s;
+- if (s[1] == '0' && (s[2] == 'x' || s[2] == 'X')
+- && *(CONST unsigned char*)(s+3) > ' ')
+- s += 2;
- while(c = *(CONST unsigned char*)++s) {
- if (!(h = hexdig[c])) {
- if (c <= ' ') {
- x1 = x;
- i = 0;
- }
+- while(*(CONST unsigned char*)(s+1) <= ' ')
+- ++s;
+- if (s[1] == '0' && (s[2] == 'x' || s[2] == 'X')
+- && *(CONST unsigned char*)(s+3) > ' ')
+- s += 2;
- continue;
- }
- if (/*(*/ c == ')' && havedig) {
- *sp = s + 1;
- break;
- }
+-#ifndef GDTOA_NON_PEDANTIC_NANCHECK
+- do {
+- if (/*(*/ c == ')') {
+- *sp = s + 1;
+- break;
+- }
+- } while(c = *++s);
+-#endif
+ if ((cp = strchr(s + 1, ')')) == NULL) {
+ *sp += strlen(s);
+ cp = s + 1;
--- /dev/null
+--- gdtoa-misc.c.orig 2008-11-05 15:59:34.000000000 -0800
++++ gdtoa-misc.c 2008-11-05 16:05:28.000000000 -0800
+@@ -29,9 +29,20 @@ THIS SOFTWARE.
+ /* Please send bug reports to David M. Gay (dmg at acm dot org,
+ * with " at " changed at "@" and " dot " changed to "."). */
+
++#define GDTOA_TSD
++#define Omit_Private_Memory
++
++#ifdef GDTOA_TSD
++#include <pthread.h>
++#endif /* GDTOA_TSD */
+ #include "gdtoaimp.h"
+
++#ifdef GDTOA_TSD
++static pthread_key_t gdtoa_tsd_key = (pthread_key_t)-1;
++static pthread_mutex_t gdtoa_tsd_lock = PTHREAD_MUTEX_INITIALIZER;
++#else /* !GDTOA_TSD */
+ static Bigint *freelist[Kmax+1];
++#endif /* GDTOA_TSD */
+ #ifndef Omit_Private_Memory
+ #ifndef PRIVATE_MEM
+ #define PRIVATE_MEM 2304
+@@ -40,6 +51,26 @@ THIS SOFTWARE.
+ static double private_mem[PRIVATE_mem], *pmem_next = private_mem;
+ #endif
+
++#ifdef GDTOA_TSD
++static void
++gdtoa_freelist_free(void *x)
++{
++ int i;
++ Bigint *cur, *next;
++ Bigint **fl = (Bigint **)x;
++
++ if (!fl) return;
++ for(i = 0; i < Kmax+1; fl++, i++) {
++ if (!*fl) continue;
++ for(cur = *fl; cur; cur = next) {
++ next = cur->next;
++ free(cur);
++ }
++ }
++ free(x);
++ }
++#endif /* GDTOA_TSD */
++
+ Bigint *
+ Balloc
+ #ifdef KR_headers
+@@ -53,8 +84,25 @@ Balloc
+ #ifndef Omit_Private_Memory
+ unsigned int len;
+ #endif
++#ifdef GDTOA_TSD
++ Bigint **freelist;
+
++ if (gdtoa_tsd_key == (pthread_key_t)-1) {
++ pthread_mutex_lock(&gdtoa_tsd_lock);
++ if (gdtoa_tsd_key == (pthread_key_t)-1) {
++ gdtoa_tsd_key = __LIBC_PTHREAD_KEY_GDTOA_BIGINT;
++ pthread_key_init_np(gdtoa_tsd_key, gdtoa_freelist_free);
++ }
++ pthread_mutex_unlock(&gdtoa_tsd_lock);
++ }
++ if ((freelist = (Bigint **)pthread_getspecific(gdtoa_tsd_key)) == NULL) {
++ freelist = (Bigint **)MALLOC((Kmax+1) * sizeof(Bigint *));
++ bzero(freelist, (Kmax+1) * sizeof(Bigint *));
++ pthread_setspecific(gdtoa_tsd_key, freelist);
++ }
++#else /* !GDTOA_TSD */
+ ACQUIRE_DTOA_LOCK(0);
++#endif /* GDTOA_TSD */
+ if ( (rv = freelist[k]) !=0) {
+ freelist[k] = rv->next;
+ }
+@@ -75,7 +123,9 @@ Balloc
+ rv->k = k;
+ rv->maxwds = x;
+ }
++#ifndef GDTOA_TSD
+ FREE_DTOA_LOCK(0);
++#endif /* GDTOA_TSD */
+ rv->sign = rv->wds = 0;
+ return rv;
+ }
+@@ -89,10 +139,16 @@ Bfree
+ #endif
+ {
+ if (v) {
++#ifdef GDTOA_TSD
++ Bigint **freelist = (Bigint **)pthread_getspecific(gdtoa_tsd_key);
++#else /* !GDTOA_TSD */
+ ACQUIRE_DTOA_LOCK(0);
++#endif /* GDTOA_TSD */
+ v->next = freelist[v->k];
+ freelist[v->k] = v;
++#ifndef GDTOA_TSD
+ FREE_DTOA_LOCK(0);
++#endif /* GDTOA_TSD */
+ }
+ }
+
if (rv & STRTOG_Inexlo) {
swap = 0;
b1 = increment(b1);
- if (fpi->sudden_underflow
- && (rv & STRTOG_Retmask) == STRTOG_Zero) {
- b1->x[0] = 0;
- b1->x[nw1] = 1L << nb11;
- rv1 += STRTOG_Normal - STRTOG_Zero;
- rv1 &= ~STRTOG_Underflow;
+ if ((rv & STRTOG_Retmask) == STRTOG_Zero) {
+ if (fpi->sudden_underflow) {
+ b1->x[0] = 0;
+ b1->x[nw1] = 1L << nb11;
+ rv1 += STRTOG_Normal - STRTOG_Zero;
+ rv1 &= ~STRTOG_Underflow;
+ goto swapcheck;
+ }
+ rv1 &= STRTOG_Inexlo | STRTOG_Underflow | STRTOG_Zero;
+ rv1 |= STRTOG_Inexhi | STRTOG_Denormal;
goto swapcheck;
}
if (b1->wds > nw
#ifndef NO_IEEE_Scale
#define Avoid_Underflow
#undef tinytens
-/* The factor of 2^53 in tinytens[4] helps us avoid setting the underflow */
+/* The factor of 2^106 in tinytens[4] helps us avoid setting the underflow */
/* flag unnecessarily. It leads to a song and dance at the end of strtod. */
static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128,
- 9007199254740992.e-256
+ 9007199254740992.*9007199254740992.e-256
};
#endif
#endif
#ifdef Honor_FLT_ROUNDS
-#define Rounding rounding
#undef Check_FLT_ROUNDS
#define Check_FLT_ROUNDS
#else
#ifdef SET_INEXACT
int inexact, oldinexact;
#endif
-#ifdef Honor_FLT_ROUNDS
- int rounding;
-#endif
+#ifdef USE_LOCALE
+#ifdef NO_LOCALE_CACHE
+ char *decimalpoint = localeconv()->decimal_point;
+#else
+ char *decimalpoint;
+ static char *decimalpoint_cache;
+ if (!(s0 = decimalpoint_cache)) {
+ s0 = localeconv()->decimal_point;
+ if ((decimalpoint_cache = (char*)malloc(strlen(s0) + 1))) {
+ strcpy(decimalpoint_cache, s0);
+ s0 = decimalpoint_cache;
+ }
+ }
+ decimalpoint = (char*)s0;
+#endif
+#endif
+#ifdef Honor_FLT_ROUNDS /*{*/
+ int Rounding;
+#ifdef Trust_FLT_ROUNDS /*{{ only define this if FLT_ROUNDS really works! */
+ Rounding = Flt_Rounds;
+#else /*}{*/
+ Rounding = 1;
+ switch(fegetround()) {
+ case FE_TOWARDZERO: Rounding = 0; break;
+ case FE_UPWARD: Rounding = 2; break;
+ case FE_DOWNWARD: Rounding = 3;
+ }
+#endif /*}}*/
+#endif /*}*/
sign = nz0 = nz = decpt = 0;
dval(rv) = 0.;
}
break2:
if (*s == '0') {
-#ifndef NO_HEX_FP
+#ifndef NO_HEX_FP /*{*/
{
static FPI fpi = { 53, 1-1023-53+1, 2046-1023-53+1, 1, SI };
Long exp;
case 'x':
case 'X':
{
-#if defined(FE_DOWNWARD) && defined(FE_TONEAREST) && defined(FE_TOWARDZERO) && defined(FE_UPWARD)
+#if defined(FE_DOWNWARD) && defined(FE_TONEAREST) && defined(FE_TOWARDZERO) && defined(FE_UPWARD) /*{{*/
FPI fpi1 = fpi;
+#ifdef Honor_FLT_ROUNDS /*{{*/
+ fpi1.rounding = Rounding;
+#else /*}{*/
switch(fegetround()) {
case FE_TOWARDZERO: fpi1.rounding = 0; break;
case FE_UPWARD: fpi1.rounding = 2; break;
case FE_DOWNWARD: fpi1.rounding = 3;
}
-#else
+#endif /*}}*/
+#else /*}{*/
#define fpi1 fpi
-#endif
+#endif /*}}*/
switch((i = gethex(&s, &fpi1, &exp, &bb, sign)) & STRTOG_Retmask) {
case STRTOG_NoNumber:
s = s00;
goto ret;
}
}
-#endif
+#endif /*}*/
nz0 = 1;
while(*++s == '0') ;
if (!*s)
z = 10*z + c - '0';
nd0 = nd;
#ifdef USE_LOCALE
- if (c == *localeconv()->decimal_point)
+ if (c == *decimalpoint) {
+ for(i = 1; decimalpoint[i]; ++i)
+ if (s[i] != decimalpoint[i])
+ goto dig_done;
+ s += i;
+ c = *s;
#else
- if (c == '.')
+ if (c == '.') {
+ c = *++s;
#endif
- {
decpt = 1;
- c = *++s;
if (!nd) {
for(; c == '0'; c = *++s)
nz++;
nz = 0;
}
}
- }
+ }/*}*/
dig_done:
e = 0;
if (c == 'e' || c == 'E') {
scale = 0;
#endif
#ifdef Honor_FLT_ROUNDS
- if ((rounding = Flt_Rounds) >= 2) {
+ if (Rounding >= 2) {
if (sign)
- rounding = rounding == 2 ? 0 : 2;
+ Rounding = Rounding == 2 ? 0 : 2;
else
- if (rounding != 2)
- rounding = 0;
+ if (Rounding != 2)
+ Rounding = 0;
}
#endif
#endif /*IEEE_Arith*/
/* Can't trust HUGE_VAL */
#ifdef IEEE_Arith
#ifdef Honor_FLT_ROUNDS
- switch(rounding) {
+ switch(Rounding) {
case 0: /* toward 0 */
case 3: /* toward -infinity */
word0(rv) = Big0;
bd2 -= bbe;
bs2 = bb2;
#ifdef Honor_FLT_ROUNDS
- if (rounding != 1)
+ if (Rounding != 1)
bs2++;
#endif
#ifdef Avoid_Underflow
delta->sign = 0;
i = cmp(delta, bs);
#ifdef Honor_FLT_ROUNDS
- if (rounding != 1) {
+ if (Rounding != 1) {
if (i < 0) {
/* Error is less than an ulp */
if (!delta->x[0] && delta->wds <= 1) {
#endif
break;
}
- if (rounding) {
+ if (Rounding) {
if (dsign) {
adj = 1.;
goto apply_adj;
if (adj < 1.)
adj = 1.;
if (adj <= 0x7ffffffe) {
- /* adj = rounding ? ceil(adj) : floor(adj); */
+ /* adj = Rounding ? ceil(adj) : floor(adj); */
y = adj;
if (y != adj) {
- if (!((rounding>>1) ^ dsign))
+ if (!((Rounding>>1) ^ dsign))
y++;
adj = y;
}
#endif /*Sudden_Underflow*/
#endif /*Avoid_Underflow*/
adj *= ulp(dval(rv));
- if (dsign)
+ if (dsign) {
+ if (word0(rv) == Big0 && word1(rv) == Big1)
+ goto ovfl;
dval(rv) += adj;
+ }
else
dval(rv) -= adj;
goto cont;
}
#endif /*Avoid_Underflow*/
L = (word0(rv) & Exp_mask) - Exp_msk1;
-#endif /*Sudden_Underflow}*/
+#endif /*Sudden_Underflow}}*/
word0(rv) = L | Bndry_mask1;
word1(rv) = 0xffffffff;
#ifdef IBM
dval(rv) *= dval(rv0);
#ifndef NO_ERRNO
/* try to avoid the bug of testing an 8087 register value */
+#ifdef IEEE_Arith
+ if (!(word0(rv) & Exp_mask))
+#else
if (word0(rv) == 0 && word1(rv) == 0)
+#endif
errno = ERANGE;
#endif
}
---- gdtoa-strtod.c.orig 2007-10-04 15:00:21.000000000 -0700
-+++ gdtoa-strtod.c 2007-10-04 15:02:41.000000000 -0700
-@@ -29,6 +29,8 @@
+--- gdtoa-strtod.c.orig 2008-10-28 12:07:31.000000000 -0700
++++ gdtoa-strtod.c 2008-10-28 12:22:37.000000000 -0700
+@@ -29,6 +29,8 @@ THIS SOFTWARE.
/* Please send bug reports to David M. Gay (dmg at acm dot org,
* with " at " changed at "@" and " dot " changed to "."). */
#include "gdtoaimp.h"
#ifndef NO_FENV_H
#include <fenv.h>
-@@ -59,11 +61,11 @@
+@@ -58,11 +60,11 @@ static CONST double tinytens[] = { 1e-16
#endif
double
#endif
{
#ifdef Avoid_Underflow
-@@ -80,8 +82,12 @@
+@@ -79,13 +81,14 @@ strtod
int inexact, oldinexact;
#endif
- #ifdef Honor_FLT_ROUNDS
-- int rounding;
-+ int rounding = Flt_Rounds;
- #endif
-+#ifdef USE_LOCALE
-+ char *decimal_point;
-+ int decimal_point_len;
-+#endif /* USE_LOCALE */
-
- sign = nz0 = nz = decpt = 0;
- dval(rv) = 0.;
-@@ -126,7 +132,7 @@
+ #ifdef USE_LOCALE
++ NORMALIZE_LOCALE(loc);
+ #ifdef NO_LOCALE_CACHE
+- char *decimalpoint = localeconv()->decimal_point;
++ char *decimalpoint = localeconv_l(loc)->decimal_point;
#else
+ char *decimalpoint;
+ static char *decimalpoint_cache;
+ if (!(s0 = decimalpoint_cache)) {
+- s0 = localeconv()->decimal_point;
++ s0 = localeconv_l(loc)->decimal_point;
+ if ((decimalpoint_cache = (char*)malloc(strlen(s0) + 1))) {
+ strcpy(decimalpoint_cache, s0);
+ s0 = decimalpoint_cache;
+@@ -155,7 +158,7 @@ strtod
+ #else /*}{*/
#define fpi1 fpi
- #endif
+ #endif /*}}*/
- switch((i = gethex(&s, &fpi1, &exp, &bb, sign)) & STRTOG_Retmask) {
+ switch((i = gethex(&s, &fpi1, &exp, &bb, sign, loc)) & STRTOG_Retmask) {
case STRTOG_NoNumber:
s = s00;
sign = 0;
-@@ -156,14 +162,22 @@
- else if (nd < 16)
- z = 10*z + c - '0';
- nd0 = nd;
-+ NORMALIZE_LOCALE(loc);
- #ifdef USE_LOCALE
-- if (c == *localeconv()->decimal_point)
-+ decimal_point = localeconv_l(loc)->decimal_point;
-+ decimal_point_len = strlen(decimal_point);
-+ if (strncmp(s, decimal_point, decimal_point_len) == 0)
- #else
- if (c == '.')
- #endif
- {
- decpt = 1;
-+#ifdef USE_LOCALE
-+ s += decimal_point_len;
-+ c = *s;
-+#else
- c = *++s;
-+#endif
- if (!nd) {
- for(; c == '0'; c = *++s)
- nz++;
-@@ -379,7 +393,7 @@
- scale = 0;
- #endif
- #ifdef Honor_FLT_ROUNDS
-- if ((rounding = Flt_Rounds) >= 2) {
-+ if (rounding >= 2) {
- if (sign)
- rounding = rounding == 2 ? 0 : 2;
- else
-@@ -512,7 +526,11 @@
+@@ -545,7 +548,11 @@ strtod
/* Put digits into bd: true value = bd * 10^e */
- bd0 = s2b(s0, nd0, nd, y);
+#ifdef USE_LOCALE
-+ bd0 = s2b(s0, nd0, nd, y, decimal_point_len);
++ bd0 = s2b(s0, nd0, nd, y, strlen(decimalpoint));
+#else
+ bd0 = s2b(s0, nd0, nd, y, 1);
+#endif
for(;;) {
bd = Balloc(bd0->k);
-@@ -956,7 +974,11 @@
+@@ -992,7 +999,7 @@ strtod
dval(rv) *= dval(rv0);
#ifndef NO_ERRNO
/* try to avoid the bug of testing an 8087 register value */
-+#if __DARWIN_UNIX03
-+ if (word0(rv) == 0 && word1(rv) == 0 || dval(rv) < DBL_MIN)
-+#else /* !__DARWIN_UNIX03 */
+-#ifdef IEEE_Arith
++#if defined(IEEE_Arith) && __DARWIN_UNIX03
+ if (!(word0(rv) & Exp_mask))
+ #else
if (word0(rv) == 0 && word1(rv) == 0)
-+#endif /* __DARWIN_UNIX03 */
- errno = ERANGE;
- #endif
- }
-@@ -980,3 +1002,13 @@
+@@ -1020,3 +1027,13 @@ strtod
return sign ? -dval(rv) : dval(rv);
}
return b;
}
- int
+ void
#ifdef KR_headers
decrement(b) Bigint *b;
#else
*x++ = y & 0xffff;
} while(borrow && x < xe);
#endif
- return STRTOG_Inexlo;
}
static int
goto ret;
}
switch(rd) {
- case 1:
+ case 1: /* round down (toward -Infinity) */
goto trunc;
- case 2:
+ case 2: /* round up (toward +Infinity) */
break;
default: /* round near */
k = bdif - 1;
CONST char *s, *s0, *s1;
double adj, adj0, rv, tol;
Long L;
- ULong y, z;
+ ULong *b, *be, y, z;
Bigint *ab, *bb, *bb1, *bd, *bd0, *bs, *delta, *rvb, *rvb0;
+#ifdef USE_LOCALE
+#ifdef NO_LOCALE_CACHE
+ char *decimalpoint = localeconv()->decimal_point;
+#else
+ char *decimalpoint;
+ static char *decimalpoint_cache;
+ if (!(s0 = decimalpoint_cache)) {
+ s0 = localeconv()->decimal_point;
+ if ((decimalpoint_cache = (char*)malloc(strlen(s0) + 1))) {
+ strcpy(decimalpoint_cache, s0);
+ s0 = decimalpoint_cache;
+ }
+ }
+ decimalpoint = (char*)s0;
+#endif
+#endif
irv = STRTOG_Zero;
denorm = sign = nz0 = nz = 0;
z = 10*z + c - '0';
nd0 = nd;
#ifdef USE_LOCALE
- if (c == *localeconv()->decimal_point)
+ if (c == *decimalpoint) {
+ for(i = 1; decimalpoint[i]; ++i)
+ if (s[i] != decimalpoint[i])
+ goto dig_done;
+ s += i;
+ c = *s;
#else
- if (c == '.')
+ if (c == '.') {
+ c = *++s;
#endif
- {
decpt = 1;
- c = *++s;
if (!nd) {
for(; c == '0'; c = *++s)
nz++;
nz = 0;
}
}
- }
+ }/*}*/
dig_done:
e = 0;
if (c == 'e' || c == 'E') {
break;
if (dsign) {
rvb = increment(rvb);
- if ( (j = rvbits & kmask) !=0)
- j = ULbits - j;
- if (hi0bits(rvb->x[(rvb->wds - 1) >> kshift])
- != j)
+ j = kmask & (ULbits - (rvbits & kmask));
+ if (hi0bits(rvb->x[rvb->wds - 1]) != j)
rvbits++;
irv = STRTOG_Normal | STRTOG_Inexhi;
}
Bfree(bd0);
Bfree(delta);
if (rve > fpi->emax) {
+ switch(fpi->rounding & 3) {
+ case FPI_Round_near:
+ goto huge;
+ case FPI_Round_up:
+ if (!sign)
+ goto huge;
+ break;
+ case FPI_Round_down:
+ if (sign)
+ goto huge;
+ }
+ /* Round to largest representable magnitude */
+ Bfree(rvb);
+ rvb = 0;
+ irv = STRTOG_Normal | STRTOG_Inexlo;
+ *exp = fpi->emax;
+ b = bits;
+ be = b + ((fpi->nbits + 31) >> 5);
+ while(b < be)
+ *b++ = -1;
+ if ((j = fpi->nbits & 0x1f))
+ *--be >>= (32 - j);
+ goto ret;
huge:
rvb->wds = 0;
irv = STRTOG_Infinite | STRTOG_Overflow | STRTOG_Inexhi;
if (sudden_underflow) {
rvb->wds = 0;
irv = STRTOG_Underflow | STRTOG_Inexlo;
+#ifndef NO_ERRNO
+ errno = ERANGE;
+#endif
}
else {
irv = (irv & ~STRTOG_Retmask) |
(rvb->wds > 0 ? STRTOG_Denormal : STRTOG_Zero);
- if (irv & STRTOG_Inexact)
+ if (irv & STRTOG_Inexact) {
irv |= STRTOG_Underflow;
+#ifndef NO_ERRNO
+ errno = ERANGE;
+#endif
+ }
}
}
if (se)
---- gdtoa-strtodg.c.orig 2007-10-04 15:00:21.000000000 -0700
-+++ gdtoa-strtodg.c 2007-10-04 17:49:06.000000000 -0700
-@@ -29,13 +29,29 @@
+--- gdtoa-strtodg.c.orig 2008-10-28 12:23:36.000000000 -0700
++++ gdtoa-strtodg.c 2008-10-28 12:34:18.000000000 -0700
+@@ -29,13 +29,29 @@ THIS SOFTWARE.
/* Please send bug reports to David M. Gay (dmg at acm dot org,
* with " at " changed at "@" and " dot " changed to "."). */
fivesbits[] = { 0, 3, 5, 7, 10, 12, 14, 17, 19, 21,
24, 26, 28, 31, 33, 35, 38, 40, 42, 45,
47, 49, 52
-@@ -122,7 +138,7 @@
- return STRTOG_Inexlo;
+@@ -121,7 +137,7 @@ decrement(Bigint *b)
+ #endif
}
- static int
#ifdef KR_headers
all_on(b, n) Bigint *b; int n;
#else
-@@ -169,7 +185,7 @@
+@@ -168,7 +184,7 @@ set_ones(Bigint *b, int n)
return b;
}
rvOK
#ifdef KR_headers
(d, fpi, exp, bits, exact, rd, irv)
-@@ -290,7 +306,7 @@
+@@ -289,7 +305,7 @@ rvOK
return rv;
}
#ifdef KR_headers
mantbits(d) double d;
#else
-@@ -313,13 +329,15 @@
+@@ -312,13 +328,15 @@ mantbits(double d)
return P - 32 - lo0bits(&L);
}
#endif
{
int abe, abits, asub;
-@@ -332,6 +350,10 @@
- Long L;
- ULong y, z;
+@@ -332,13 +350,14 @@ strtodg
+ ULong *b, *be, y, z;
Bigint *ab, *bb, *bb1, *bd, *bd0, *bs, *delta, *rvb, *rvb0;
-+#ifdef USE_LOCALE
-+ char *decimal_point;
-+ int decimal_point_len;
-+#endif /* USE_LOCALE */
-
- irv = STRTOG_Zero;
- denorm = sign = nz0 = nz = 0;
-@@ -367,7 +389,7 @@
+ #ifdef USE_LOCALE
++ NORMALIZE_LOCALE(loc)
+ #ifdef NO_LOCALE_CACHE
+- char *decimalpoint = localeconv()->decimal_point;
++ char *decimalpoint = localeconv_l(loc)->decimal_point;
+ #else
+ char *decimalpoint;
+ static char *decimalpoint_cache;
+ if (!(s0 = decimalpoint_cache)) {
+- s0 = localeconv()->decimal_point;
++ s0 = localeconv_l(loc)->decimal_point;
+ if ((decimalpoint_cache = (char*)malloc(strlen(s0) + 1))) {
+ strcpy(decimalpoint_cache, s0);
+ s0 = decimalpoint_cache;
+@@ -382,7 +401,7 @@ strtodg
switch(s[1]) {
case 'x':
case 'X':
if (irv == STRTOG_NoNumber) {
s = s00;
sign = 0;
-@@ -389,14 +411,22 @@
- else if (nd < 16)
- z = 10*z + c - '0';
- nd0 = nd;
-+ NORMALIZE_LOCALE(loc);
- #ifdef USE_LOCALE
-- if (c == *localeconv()->decimal_point)
-+ decimal_point = localeconv_l(loc)->decimal_point;
-+ decimal_point_len = strlen(decimal_point);
-+ if (strncmp(s, decimal_point, decimal_point_len) == 0)
- #else
- if (c == '.')
- #endif
- {
- decpt = 1;
-+#ifdef USE_LOCALE
-+ s += decimal_point_len;
-+ c = *s;
-+#else
- c = *++s;
-+#endif
- if (!nd) {
- for(; c == '0'; c = *++s)
- nz++;
-@@ -668,6 +698,9 @@
+@@ -687,6 +706,10 @@ strtodg
rvb->x[0] = 0;
*exp = emin;
irv = STRTOG_Underflow | STRTOG_Inexlo;
-+#ifndef NO_ERRNO
++/* When __DARWIN_UNIX03 is set, we don't need this (errno is set later) */
++#if !defined(NO_ERRNO) && !__DARWIN_UNIX03
+ errno = ERANGE;
+#endif
goto ret;
}
rvb->x[0] = rvb->wds = rvbits = 1;
-@@ -684,7 +717,11 @@
+@@ -703,7 +726,11 @@ strtodg
/* Put digits into bd: true value = bd * 10^e */
- bd0 = s2b(s0, nd0, nd, y);
+#ifdef USE_LOCALE
-+ bd0 = s2b(s0, nd0, nd, y, decimal_point_len);
++ bd0 = s2b(s0, nd0, nd, y, strlen(decimalpoint));
+#else
+ bd0 = s2b(s0, nd0, nd, y, 1);
+#endif
for(;;) {
bd = Balloc(bd0->k);
-@@ -824,7 +861,7 @@
- rvb = increment(rvb);
- if ( (j = rvbits & kmask) !=0)
- j = ULbits - j;
-- if (hi0bits(rvb->x[(rvb->wds - 1) >> kshift])
-+ if (hi0bits(rvb->x[rvb->wds - 1])
- != j)
- rvbits++;
- irv = STRTOG_Normal | STRTOG_Inexhi;
-@@ -1008,5 +1045,9 @@
- copybits(bits, nbits, rvb);
- Bfree(rvb);
- }
+@@ -1032,7 +1059,7 @@ strtodg
+ if (sudden_underflow) {
+ rvb->wds = 0;
+ irv = STRTOG_Underflow | STRTOG_Inexlo;
+-#ifndef NO_ERRNO
+#if !defined(NO_ERRNO) && __DARWIN_UNIX03
-+ if (irv & STRTOG_Underflow)
-+ errno = ERANGE;
-+#endif
- return irv;
- }
+ errno = ERANGE;
+ #endif
+ }
+@@ -1041,7 +1068,7 @@ strtodg
+ (rvb->wds > 0 ? STRTOG_Denormal : STRTOG_Zero);
+ if (irv & STRTOG_Inexact) {
+ irv |= STRTOG_Underflow;
+-#ifndef NO_ERRNO
++#if !defined(NO_ERRNO) && __DARWIN_UNIX03
+ errno = ERANGE;
+ #endif
+ }
strtof(CONST char *s, char **sp)
#endif
{
- static FPI fpi = { 24, 1-127-24+1, 254-127-24+1, 1, SI };
+ static FPI fpi0 = { 24, 1-127-24+1, 254-127-24+1, 1, SI };
ULong bits[1];
Long exp;
int k;
union { ULong L[1]; float f; } u;
+#ifdef Honor_FLT_ROUNDS
+#include "gdtoa_fltrnds.h"
+#else
+#define fpi &fpi0
+#endif
- k = strtodg(s, sp, &fpi, &exp, bits);
+ k = strtodg(s, sp, fpi, &exp, bits);
switch(k & STRTOG_Retmask) {
case STRTOG_NoNumber:
case STRTOG_Zero:
---- gdtoa-strtof.c.orig 2007-04-03 12:19:28.000000000 -0700
-+++ gdtoa-strtof.c 2007-04-06 12:52:45.000000000 -0700
-@@ -29,24 +29,41 @@
+--- gdtoa-strtof.c.orig 2008-10-28 12:35:19.000000000 -0700
++++ gdtoa-strtof.c 2008-10-28 13:19:34.000000000 -0700
+@@ -29,13 +29,15 @@ THIS SOFTWARE.
/* Please send bug reports to David M. Gay (dmg at acm dot org,
* with " at " changed at "@" and " dot " changed to "."). */
+strtof_l(CONST char *s, char **sp, locale_t loc)
#endif
{
-- static FPI fpi = { 24, 1-127-24+1, 254-127-24+1, 1, SI };
-+ static FPI fpi0 = { 24, 1-127-24+1, 254-127-24+1, 1, SI };
- ULong bits[1];
- Long exp;
- int k;
- union { ULong L[1]; float f; } u;
-+ FPI *fpi = &fpi0, fpi1;
-+#ifdef Honor_FLT_ROUNDS
-+ int rounding = Flt_Rounds;
-+#endif
+ static FPI fpi0 = { 24, 1-127-24+1, 254-127-24+1, 1, SI };
+@@ -49,9 +51,13 @@ strtof(CONST char *s, char **sp)
+ #define fpi &fpi0
+ #endif
-- k = strtodg(s, sp, &fpi, &exp, bits);
+- k = strtodg(s, sp, fpi, &exp, bits);
+ NORMALIZE_LOCALE(loc);
-+#ifdef Honor_FLT_ROUNDS
-+ if (rounding != fpi0.rounding) {
-+ fpi1 = fpi0; /* for thread safety */
-+ fpi1.rounding = rounding;
-+ fpi = &fpi1;
-+ }
-+#endif /* Honor_FLT_ROUNDS */
+ k = strtodg(s, sp, fpi, &exp, bits, loc);
switch(k & STRTOG_Retmask) {
case STRTOG_NoNumber:
case STRTOG_Zero:
u.L[0] = 0;
break;
-@@ -71,3 +88,13 @@
+@@ -76,3 +82,13 @@ strtof(CONST char *s, char **sp)
u.L[0] |= 0x80000000L;
return u.f;
}
#endif
{
#ifdef Sudden_Underflow
- static FPI fpi = { 106, 1-1023, 2046-1023-106+1, 1, 1 };
+ static FPI fpi0 = { 106, 1-1023, 2046-1023-106+1, 1, 1 };
#else
- static FPI fpi = { 106, 1-1023-53+1, 2046-1023-106+1, 1, 0 };
+ static FPI fpi0 = { 106, 1-1023-53+1, 2046-1023-106+1, 1, 0 };
#endif
ULong bits[4];
Long exp;
ULong L[4];
} U;
U *u;
+#ifdef Honor_FLT_ROUNDS
+#include "gdtoa_fltrnds.h"
+#else
+#define fpi &fpi0
+#endif
- rv = strtodg(s, sp, &fpi, &exp, bits);
+ rv = strtodg(s, sp, fpi, &exp, bits);
u = (U*)dd;
switch(rv & STRTOG_Retmask) {
case STRTOG_NoNumber:
---- gdtoa-strtopdd.c.orig 2007-04-03 12:19:28.000000000 -0700
-+++ gdtoa-strtopdd.c 2007-04-06 12:53:25.000000000 -0700
-@@ -29,19 +29,31 @@
+--- gdtoa-strtopdd.c.orig 2008-10-28 12:43:22.000000000 -0700
++++ gdtoa-strtopdd.c 2008-10-28 12:51:49.000000000 -0700
+@@ -29,13 +29,25 @@ THIS SOFTWARE.
/* Please send bug reports to David M. Gay (dmg at acm dot org,
* with " at " changed at "@" and " dot " changed to "."). */
int
#ifdef KR_headers
-strtopdd(s, sp, dd) CONST char *s; char **sp; double *dd;
-+strtopdd(s, sp, dd) CONST char *s; char **sp; double *dd; locale_t loc;
++strtopdd(s, sp, dd, loc) CONST char *s; char **sp; double *dd; locale_t loc;
#else
-strtopdd(CONST char *s, char **sp, double *dd)
+strtopdd(CONST char *s, char **sp, double *dd, locale_t loc)
#endif
{
#ifdef Sudden_Underflow
-- static FPI fpi = { 106, 1-1023, 2046-1023-106+1, 1, 1 };
-+ static FPI fpi0 = { 106, 1-1023, 2046-1023-106+1, 1, 1 };
- #else
-- static FPI fpi = { 106, 1-1023-53+1, 2046-1023-106+1, 1, 0 };
-+ static FPI fpi0 = { 106, 1-1023-53+1, 2046-1023-106+1, 1, 0 };
- #endif
- ULong bits[4];
- Long exp;
-@@ -49,13 +61,30 @@
+@@ -49,6 +61,9 @@ strtopdd(CONST char *s, char **sp, doubl
typedef union {
double d[2];
ULong L[4];
+#endif /* __APPLE__ */
} U;
U *u;
-+ FPI *fpi = &fpi0, fpi1;
-+#ifdef Honor_FLT_ROUNDS
-+ int rounding = Flt_Rounds;
-+#endif
+ #ifdef Honor_FLT_ROUNDS
+@@ -57,10 +72,13 @@ strtopdd(CONST char *s, char **sp, doubl
+ #define fpi &fpi0
+ #endif
-- rv = strtodg(s, sp, &fpi, &exp, bits);
-+#ifdef Honor_FLT_ROUNDS
-+ if (rounding != fpi0.rounding) {
-+ fpi1 = fpi0; /* for thread safety */
-+ fpi1.rounding = rounding;
-+ fpi = &fpi1;
-+ }
-+#endif /* Honor_FLT_ROUNDS */
+- rv = strtodg(s, sp, fpi, &exp, bits);
+ rv = strtodg(s, sp, fpi, &exp, bits, loc);
u = (U*)dd;
switch(rv & STRTOG_Retmask) {
case STRTOG_Zero:
u->d[0] = u->d[1] = 0.;
break;
-@@ -101,6 +130,9 @@
+@@ -106,6 +124,9 @@ strtopdd(CONST char *s, char **sp, doubl
}
u->L[2+_1] = bits[0];
u->L[2+_0] = bits[1] & 0xfffff | exp << 20;
break;
case STRTOG_Denormal:
-@@ -124,6 +156,9 @@
+@@ -129,6 +150,9 @@ strtopdd(CONST char *s, char **sp, doubl
u->L[_1] = (bits[2] << i | bits[1] >> j) & 0xffffffffL;
u->L[2+_0] = bits[1] & (1L << j) - 1;
u->L[2+_1] = bits[0];
break;
partly_normal:
-@@ -135,6 +170,9 @@
+@@ -140,6 +164,9 @@ strtopdd(CONST char *s, char **sp, doubl
u->L[_1] = (bits[2] << i | bits[1] >> j) & 0xffffffffL;
u->L[2+_0] = bits[1] & (1L << j) - 1;
u->L[2+_1] = bits[0];
break;
}
if (i == 0) {
-@@ -142,6 +180,9 @@
+@@ -147,6 +174,9 @@ strtopdd(CONST char *s, char **sp, doubl
u->L[_1] = bits[1];
u->L[2+_0] = 0;
u->L[2+_1] = bits[0];
break;
}
j = 32 - i;
-@@ -150,6 +191,9 @@
+@@ -155,6 +185,9 @@ strtopdd(CONST char *s, char **sp, doubl
u->L[_1] = (bits[1] << i | bits[0] >> j) & 0xffffffffL;
u->L[2+_0] = 0;
u->L[2+_1] = bits[0] & (1L << j) - 1;
break;
hardly_normal:
-@@ -159,20 +203,44 @@
+@@ -164,20 +197,45 @@ strtopdd(CONST char *s, char **sp, doubl
u->L[_1] = (bits[1] << i | bits[0] >> j) & 0xffffffffL;
u->L[2+_0] = 0;
u->L[2+_1] = bits[0] & (1L << j) - 1;
u->L[0] = u->L[2] = d_QNAN0;
u->L[1] = u->L[3] = d_QNAN1;
+#endif /* __APPLE__ */
++ break;
+#ifdef __APPLE__
+ case STRTOG_NaNbits:
+ u->L[0] = d_QNAN0 | ((bits[2] >> 20 | bits[3] << 12) & 0xfffff);
--- /dev/null
+/****************************************************************
+
+The author of this software is David M. Gay.
+
+Copyright (C) 1998, 2000 by Lucent Technologies
+All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appear in all
+copies and that both that the copyright notice and this
+permission notice and warranty disclaimer appear in supporting
+documentation, and that the name of Lucent or any of its entities
+not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+LUCENT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
+IN NO EVENT SHALL LUCENT OR ANY OF ITS ENTITIES BE LIABLE FOR ANY
+SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
+IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
+ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
+THIS SOFTWARE.
+
+****************************************************************/
+
+/* Please send bug reports to David M. Gay (dmg at acm dot org,
+ * with " at " changed at "@" and " dot " changed to "."). */
+
+#include "gdtoaimp.h"
+
+#undef _0
+#undef _1
+
+/* one or the other of IEEE_MC68k or IEEE_8087 should be #defined */
+
+#ifdef IEEE_MC68k
+#define _0 0
+#define _1 1
+#define _2 2
+#define _3 3
+#define _4 4
+#endif
+#ifdef IEEE_8087
+#define _0 4
+#define _1 3
+#define _2 2
+#define _3 1
+#define _4 0
+#endif
+
+ int
+#ifdef KR_headers
+strtopx(s, sp, V) CONST char *s; char **sp; void *V;
+#else
+strtopx(CONST char *s, char **sp, void *V)
+#endif
+{
+ static FPI fpi0 = { 64, 1-16383-64+1, 32766 - 16383 - 64 + 1, 1, SI };
+ ULong bits[2];
+ Long exp;
+ int k;
+ UShort *L = (UShort*)V;
+#ifdef Honor_FLT_ROUNDS
+#include "gdtoa_fltrnds.h"
+#else
+#define fpi &fpi0
+#endif
+
+ k = strtodg(s, sp, fpi, &exp, bits);
+ switch(k & STRTOG_Retmask) {
+ case STRTOG_NoNumber:
+ case STRTOG_Zero:
+ L[0] = L[1] = L[2] = L[3] = L[4] = 0;
+ break;
+
+ case STRTOG_Denormal:
+ L[_0] = 0;
+ goto normal_bits;
+
+ case STRTOG_Normal:
+ case STRTOG_NaNbits:
+ L[_0] = exp + 0x3fff + 63;
+ normal_bits:
+ L[_4] = (UShort)bits[0];
+ L[_3] = (UShort)(bits[0] >> 16);
+ L[_2] = (UShort)bits[1];
+ L[_1] = (UShort)(bits[1] >> 16);
+ break;
+
+ case STRTOG_Infinite:
+ L[_0] = 0x7fff;
+ L[_1] = L[_2] = L[_3] = L[_4] = 0;
+ break;
+
+ case STRTOG_NaN:
+ L[0] = ldus_QNAN0;
+ L[1] = ldus_QNAN1;
+ L[2] = ldus_QNAN2;
+ L[3] = ldus_QNAN3;
+ L[4] = ldus_QNAN4;
+ }
+ if (k & STRTOG_Neg)
+ L[_0] |= 0x8000;
+ return k;
+ }
--- /dev/null
+--- gdtoa-strtopx.c.orig 2008-10-28 12:54:18.000000000 -0700
++++ gdtoa-strtopx.c 2008-10-28 12:57:26.000000000 -0700
+@@ -29,6 +29,8 @@ THIS SOFTWARE.
+ /* Please send bug reports to David M. Gay (dmg at acm dot org,
+ * with " at " changed at "@" and " dot " changed to "."). */
+
++#include "xlocale_private.h"
++
+ #include "gdtoaimp.h"
+
+ #undef _0
+@@ -53,9 +55,9 @@ THIS SOFTWARE.
+
+ int
+ #ifdef KR_headers
+-strtopx(s, sp, V) CONST char *s; char **sp; void *V;
++strtopx(s, sp, V, loc) CONST char *s; char **sp; void *V; locale_t loc;
+ #else
+-strtopx(CONST char *s, char **sp, void *V)
++strtopx(CONST char *s, char **sp, void *V, locale_t loc)
+ #endif
+ {
+ static FPI fpi0 = { 64, 1-16383-64+1, 32766 - 16383 - 64 + 1, 1, SI };
+@@ -69,9 +71,12 @@ strtopx(CONST char *s, char **sp, void *
+ #define fpi &fpi0
+ #endif
+
+- k = strtodg(s, sp, fpi, &exp, bits);
++ k = strtodg(s, sp, fpi, &exp, bits, loc);
+ switch(k & STRTOG_Retmask) {
+ case STRTOG_NoNumber:
++ L[0] = L[1] = L[2] = L[3] = L[4] = 0;
++ return k; // avoid setting sign
++
+ case STRTOG_Zero:
+ L[0] = L[1] = L[2] = L[3] = L[4] = 0;
+ break;
+@@ -92,7 +97,8 @@ strtopx(CONST char *s, char **sp, void *
+
+ case STRTOG_Infinite:
+ L[_0] = 0x7fff;
+- L[_1] = L[_2] = L[_3] = L[_4] = 0;
++ L[_1] = 0x8000; /* 4306392: to match gcc */
++ L[_2] = L[_3] = L[_4] = 0;
+ break;
+
+ case STRTOG_NaN:
#define GDTOA_H_INCLUDED
#include "arith.h"
+#include <stddef.h> /* for size_t */
#ifndef Long
#define Long long
/* The following may be or-ed into one of the above values. */
- STRTOG_Neg = 0x08,
- STRTOG_Inexlo = 0x10,
- STRTOG_Inexhi = 0x20,
+ STRTOG_Neg = 0x08, /* does not affect STRTOG_Inexlo or STRTOG_Inexhi */
+ STRTOG_Inexlo = 0x10, /* returned result rounded toward zero */
+ STRTOG_Inexhi = 0x20, /* returned result rounded away from zero */
STRTOG_Inexact = 0x30,
STRTOG_Underflow= 0x40,
STRTOG_Overflow = 0x80
extern double strtod ANSI((CONST char *, char **));
extern int strtodg ANSI((CONST char*, char**, FPI*, Long*, ULong*));
-extern char* g_ddfmt ANSI((char*, double*, int, unsigned));
-extern char* g_dfmt ANSI((char*, double*, int, unsigned));
-extern char* g_ffmt ANSI((char*, float*, int, unsigned));
-extern char* g_Qfmt ANSI((char*, void*, int, unsigned));
-extern char* g_xfmt ANSI((char*, void*, int, unsigned));
-extern char* g_xLfmt ANSI((char*, void*, int, unsigned));
+extern char* g_ddfmt ANSI((char*, double*, int, size_t));
+extern char* g_dfmt ANSI((char*, double*, int, size_t));
+extern char* g_ffmt ANSI((char*, float*, int, size_t));
+extern char* g_Qfmt ANSI((char*, void*, int, size_t));
+extern char* g_xfmt ANSI((char*, void*, int, size_t));
+extern char* g_xLfmt ANSI((char*, void*, int, size_t));
extern int strtoId ANSI((CONST char*, char**, double*, double*));
extern int strtoIdd ANSI((CONST char*, char**, double*, double*));
--- /dev/null
+--- gdtoa.h.orig 2008-10-28 11:36:35.000000000 -0700
++++ gdtoa.h 2008-10-28 12:00:08.000000000 -0700
+@@ -110,7 +110,7 @@
+ extern void freedtoa ANSI((char*));
+ extern float strtof ANSI((CONST char *, char **));
+ extern double strtod ANSI((CONST char *, char **));
+-extern int strtodg ANSI((CONST char*, char**, FPI*, Long*, ULong*));
++extern int strtodg ANSI((CONST char*, char**, FPI*, Long*, ULong*, locale_t)) __DARWIN_ALIAS(strtodg);
+
+ extern char* g_ddfmt ANSI((char*, double*, int, size_t));
+ extern char* g_dfmt ANSI((char*, double*, int, size_t));
+@@ -134,10 +134,10 @@
+ #if 1
+ extern int strtodI ANSI((CONST char*, char**, double*));
+ extern int strtopd ANSI((CONST char*, char**, double*));
+-extern int strtopdd ANSI((CONST char*, char**, double*));
++extern int strtopdd ANSI((CONST char*, char**, double*, locale_t));
+ extern int strtopf ANSI((CONST char*, char**, float*));
+ extern int strtopQ ANSI((CONST char*, char**, void*));
+-extern int strtopx ANSI((CONST char*, char**, void*));
++extern int strtopx ANSI((CONST char*, char**, void*, locale_t));
+ extern int strtopxL ANSI((CONST char*, char**, void*));
+ #else
+ #define strtopd(s,se,x) strtord(s,se,1,x)
--- /dev/null
+ FPI *fpi, fpi1;
+ int Rounding;
+#ifdef Trust_FLT_ROUNDS /*{{ only define this if FLT_ROUNDS really works! */
+ Rounding = Flt_Rounds;
+#else /*}{*/
+ Rounding = 1;
+ switch(fegetround()) {
+ case FE_TOWARDZERO: Rounding = 0; break;
+ case FE_UPWARD: Rounding = 2; break;
+ case FE_DOWNWARD: Rounding = 3;
+ }
+#endif /*}}*/
+ fpi = &fpi0;
+ if (Rounding != 1) {
+ fpi1 = fpi0;
+ fpi = &fpi1;
+ fpi1.rounding = Rounding;
+ }
+++ /dev/null
-/****************************************************************
-
-The author of this software is David M. Gay.
-
-Copyright (C) 1998, 2000 by Lucent Technologies
-All Rights Reserved
-
-Permission to use, copy, modify, and distribute this software and
-its documentation for any purpose and without fee is hereby
-granted, provided that the above copyright notice appear in all
-copies and that both that the copyright notice and this
-permission notice and warranty disclaimer appear in supporting
-documentation, and that the name of Lucent or any of its entities
-not be used in advertising or publicity pertaining to
-distribution of the software without specific, written prior
-permission.
-
-LUCENT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
-IN NO EVENT SHALL LUCENT OR ANY OF ITS ENTITIES BE LIABLE FOR ANY
-SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
-IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
-ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
-THIS SOFTWARE.
-
-****************************************************************/
-
-/* Please send bug reports to David M. Gay (dmg at acm dot org,
- * with " at " changed at "@" and " dot " changed to "."). */
-
-#include "gdtoaimp.h"
-
-#undef _0
-#undef _1
-
-/* one or the other of IEEE_MC68k or IEEE_8087 should be #defined */
-
-#ifdef IEEE_MC68k
-#define _0 0
-#define _1 1
-#define _2 2
-#define _3 3
-#define _4 4
-#endif
-#ifdef IEEE_8087
-#define _0 4
-#define _1 3
-#define _2 2
-#define _3 1
-#define _4 0
-#endif
-
- int
-#ifdef KR_headers
-strtopx(s, sp, V) CONST char *s; char **sp; void *V;
-#else
-strtopx(CONST char *s, char **sp, void *V)
-#endif
-{
- static FPI fpi = { 64, 1-16383-64+1, 32766 - 16383 - 64 + 1, 1, SI };
- ULong bits[2];
- Long exp;
- int k;
- UShort *L = (UShort*)V;
-
- k = strtodg(s, sp, &fpi, &exp, bits);
- switch(k & STRTOG_Retmask) {
- case STRTOG_NoNumber:
- case STRTOG_Zero:
- L[0] = L[1] = L[2] = L[3] = L[4] = 0;
- break;
-
- case STRTOG_Denormal:
- L[_0] = 0;
- goto normal_bits;
-
- case STRTOG_Normal:
- case STRTOG_NaNbits:
- L[_0] = exp + 0x3fff + 63;
- normal_bits:
- L[_4] = (UShort)bits[0];
- L[_3] = (UShort)(bits[0] >> 16);
- L[_2] = (UShort)bits[1];
- L[_1] = (UShort)(bits[1] >> 16);
- break;
-
- case STRTOG_Infinite:
- L[_0] = 0x7fff;
- L[_1] = L[_2] = L[_3] = L[_4] = 0;
- break;
-
- case STRTOG_NaN:
- L[0] = ldus_QNAN0;
- L[1] = ldus_QNAN1;
- L[2] = ldus_QNAN2;
- L[3] = ldus_QNAN3;
- L[4] = ldus_QNAN4;
- }
- if (k & STRTOG_Neg)
- L[_0] |= 0x8000;
- return k;
- }
+++ /dev/null
---- gdtoa_strtopx.c.orig 2007-04-03 12:19:28.000000000 -0700
-+++ gdtoa_strtopx.c 2007-04-06 12:52:09.000000000 -0700
-@@ -29,6 +29,8 @@
- /* Please send bug reports to David M. Gay (dmg at acm dot org,
- * with " at " changed at "@" and " dot " changed to "."). */
-
-+#include "xlocale_private.h"
-+
- #include "gdtoaimp.h"
-
- #undef _0
-@@ -53,20 +55,34 @@
-
- int
- #ifdef KR_headers
--strtopx(s, sp, V) CONST char *s; char **sp; void *V;
-+strtopx(s, sp, V, loc) CONST char *s; char **sp; void *V; locale_t loc;
- #else
--strtopx(CONST char *s, char **sp, void *V)
-+strtopx(CONST char *s, char **sp, void *V, locale_t loc)
- #endif
- {
-- static FPI fpi = { 64, 1-16383-64+1, 32766 - 16383 - 64 + 1, 1, SI };
-+ static FPI fpi0 = { 64, 1-16383-64+1, 32766 - 16383 - 64 + 1, 1, SI };
- ULong bits[2];
- Long exp;
- int k;
- UShort *L = (UShort*)V;
-+ FPI *fpi = &fpi0, fpi1;
-+#ifdef Honor_FLT_ROUNDS
-+ int rounding = Flt_Rounds;
-+#endif
-
-- k = strtodg(s, sp, &fpi, &exp, bits);
-+#ifdef Honor_FLT_ROUNDS
-+ if (rounding != fpi0.rounding) {
-+ fpi1 = fpi0; /* for thread safety */
-+ fpi1.rounding = rounding;
-+ fpi = &fpi1;
-+ }
-+#endif /* Honor_FLT_ROUNDS */
-+ k = strtodg(s, sp, fpi, &exp, bits, loc);
- switch(k & STRTOG_Retmask) {
- case STRTOG_NoNumber:
-+ L[0] = L[1] = L[2] = L[3] = L[4] = 0;
-+ return k; // avoid setting sign
-+
- case STRTOG_Zero:
- L[0] = L[1] = L[2] = L[3] = L[4] = 0;
- break;
-@@ -87,7 +103,8 @@
-
- case STRTOG_Infinite:
- L[_0] = 0x7fff;
-- L[_1] = L[_2] = L[_3] = L[_4] = 0;
-+ L[_1] = 0x8000; /* 4306392: to match gcc */
-+ L[_2] = L[_3] = L[_4] = 0;
- break;
-
- case STRTOG_NaN:
* Infinity and NaN (case insensitively).
* When INFNAN_CHECK is #defined and No_Hex_NaN is not #defined,
* strtodg also accepts (case insensitively) strings of the form
- * NaN(x), where x is a string of hexadecimal digits and spaces;
- * if there is only one string of hexadecimal digits, it is taken
- * for the fraction bits of the resulting NaN; if there are two or
- * more strings of hexadecimal digits, each string is assigned
- * to the next available sequence of 32-bit words of fractions
- * bits (starting with the most significant), right-aligned in
- * each sequence.
+ * NaN(x), where x is a string of hexadecimal digits (optionally
+ * preceded by 0x or 0X) and spaces; if there is only one string
+ * of hexadecimal digits, it is taken for the fraction bits of the
+ * resulting NaN; if there are two or more strings of hexadecimal
+ * digits, each string is assigned to the next available sequence
+ * of 32-bit words of fractions bits (starting with the most
+ * significant), right-aligned in each sequence.
+ * Unless GDTOA_NON_PEDANTIC_NANCHECK is #defined, input "NaN(...)"
+ * is consumed even when ... has the wrong form (in which case the
+ * "(...)" is consumed but ignored).
* #define MULTIPLE_THREADS if the system offers preemptively scheduled
* multiple threads. In this case, you must provide (or suitably
* #define) two locks, acquired by ACQUIRE_DTOA_LOCK(n) and freed
* dtoa. You may do so whether or not MULTIPLE_THREADS is #defined.
* #define IMPRECISE_INEXACT if you do not care about the setting of
* the STRTOG_Inexact bits in the special case of doing IEEE double
- * precision conversions (which could also be done by the strtog in
+ * precision conversions (which could also be done by the strtod in
* dtoa.c).
* #define NO_HEX_FP to disable recognition of C9x's hexadecimal
* floating-point constants.
#define GDTOAIMP_H_INCLUDED
#include "gdtoa.h"
#include "gd_qnan.h"
+#ifdef Honor_FLT_ROUNDS
+#include <fenv.h>
+#endif
#ifdef DEBUG
#include "stdio.h"
extern int cmp ANSI((Bigint*, Bigint*));
extern void copybits ANSI((ULong*, int, Bigint*));
extern Bigint *d2b ANSI((double, int*, int*));
- extern int decrement ANSI((Bigint*));
+ extern void decrement ANSI((Bigint*));
extern Bigint *diff ANSI((Bigint*, Bigint*));
extern char *dtoa ANSI((double d, int mode, int ndigits,
int *decpt, int *sign, char **rve));
- extern char *g__fmt ANSI((char*, char*, char*, int, ULong));
+ extern char *g__fmt ANSI((char*, char*, char*, int, ULong, size_t));
extern int gethex ANSI((CONST char**, FPI*, Long*, Bigint**, int));
extern void hexdig_init_D2A(Void);
extern int hexnan ANSI((CONST char**, FPI*, ULong*));
---- gdtoaimp.h.orig 2008-07-30 13:29:37.000000000 -0700
-+++ gdtoaimp.h 2008-07-30 13:49:16.000000000 -0700
-@@ -167,6 +167,7 @@
+--- gdtoaimp.h.orig 2008-10-28 11:36:44.000000000 -0700
++++ gdtoaimp.h 2008-10-28 12:01:07.000000000 -0700
+@@ -170,6 +170,91 @@
#ifndef GDTOAIMP_H_INCLUDED
#define GDTOAIMP_H_INCLUDED
++/*
++ * Paranoia: Protect exported symbols, including ones in files we don't
++ * compile right now. The standard strtof and strtod survive.
++ */
++#define dtoa __dtoa
++#define gdtoa __gdtoa
++#define freedtoa __freedtoa
++#define strtodg __strtodg
++#define g_ddfmt __g_ddfmt
++#define g_dfmt __g_dfmt
++#define g_ffmt __g_ffmt
++#define g_Qfmt __g_Qfmt
++#define g_xfmt __g_xfmt
++#define g_xLfmt __g_xLfmt
++#define strtoId __strtoId
++#define strtoIdd __strtoIdd
++#define strtoIf __strtoIf
++#define strtoIQ __strtoIQ
++#define strtoIx __strtoIx
++#define strtoIxL __strtoIxL
++#define strtord __strtord
++#define strtordd __strtordd
++#define strtorf __strtorf
++#define strtorQ __strtorQ
++#define strtorx __strtorx
++#define strtorxL __strtorxL
++#define strtodI __strtodI
++#define strtopd __strtopd
++#define strtopdd __strtopdd
++#define strtopf __strtopf
++#define strtopQ __strtopQ
++#define strtopx __strtopx
++#define strtopxL __strtopxL
++
++/* Protect gdtoa-internal symbols */
++#define Balloc __Balloc_D2A
++#define Bfree __Bfree_D2A
++#define ULtoQ __ULtoQ_D2A
++#define ULtof __ULtof_D2A
++#define ULtod __ULtod_D2A
++#define ULtodd __ULtodd_D2A
++#define ULtox __ULtox_D2A
++#define ULtoxL __ULtoxL_D2A
++#define any_on __any_on_D2A
++#define b2d __b2d_D2A
++#define bigtens __bigtens_D2A
++#define cmp __cmp_D2A
++#define copybits __copybits_D2A
++#define d2b __d2b_D2A
++#define decrement __decrement_D2A
++#define diff __diff_D2A
++#define dtoa_result __dtoa_result_D2A
++#define g__fmt __g__fmt_D2A
++#define gethex __gethex_D2A
++#define hexdig __hexdig_D2A
++#define hexdig_init_D2A __hexdig_init_D2A
++#define hexnan __hexnan_D2A
++#define hi0bits __hi0bits_D2A
++#define hi0bits_D2A __hi0bits_D2A
++#define i2b __i2b_D2A
++#define increment __increment_D2A
++#define lo0bits __lo0bits_D2A
++#define lshift __lshift_D2A
++#define match __match_D2A
++#define mult __mult_D2A
++#define multadd __multadd_D2A
++#define nrv_alloc __nrv_alloc_D2A
++#define pow5mult __pow5mult_D2A
++#define quorem __quorem_D2A
++#define ratio __ratio_D2A
++#define rshift __rshift_D2A
++#define rv_alloc __rv_alloc_D2A
++#define s2b __s2b_D2A
++#define set_ones __set_ones_D2A
++#define strcp __strcp_D2A
++#define strcp_D2A __strcp_D2A
++#define strtoIg __strtoIg_D2A
++#define sum __sum_D2A
++#define tens __tens_D2A
++#define tinytens __tinytens_D2A
++#define tinytens __tinytens_D2A
++#define trailz __trailz_D2A
++#define ulp __ulp_D2A
++
+#include <xlocale.h>
#include "gdtoa.h"
#include "gd_qnan.h"
-
-@@ -175,8 +176,11 @@
+ #ifdef Honor_FLT_ROUNDS
+@@ -181,8 +266,11 @@
#define Bug(x) {fprintf(stderr, "%s\n", x); exit(1);}
#endif
#ifdef KR_headers
#define Char char
-@@ -190,6 +194,9 @@
+@@ -196,6 +284,10 @@
#define MALLOC malloc
#endif
+#define INFNAN_CHECK
+#define USE_LOCALE
++#define NO_LOCALE_CACHE
+
#undef IEEE_Arith
#undef Avoid_Underflow
#ifdef IEEE_MC68k
-@@ -449,10 +456,16 @@
+@@ -455,10 +547,14 @@
#define ALL_ON 0xffff
#endif
-#endif
+#define MULTIPLE_THREADS
+extern spinlock_t __gdtoa_locks[2];
-+#define ACQUIRE_DTOA_LOCK(n) do { \
-+ if (__isthreaded) \
-+ _SPINLOCK(&__gdtoa_locks[n]); \
++#define ACQUIRE_DTOA_LOCK(n) do { \
++ if (__isthreaded) _SPINLOCK(&__gdtoa_locks[n]); \
+} while(0)
-+#define FREE_DTOA_LOCK(n) do { \
-+ if (__isthreaded) \
-+ _SPINUNLOCK(&__gdtoa_locks[n]); \
++#define FREE_DTOA_LOCK(n) do { \
++ if (__isthreaded) _SPINUNLOCK(&__gdtoa_locks[n]); \
+} while(0)
#define Kmax 15
-@@ -475,51 +488,89 @@
+@@ -481,52 +577,6 @@
#define Bcopy(x,y) memcpy(&x->sign,&y->sign,y->wds*sizeof(ULong) + 2*sizeof(int))
#endif /* NO_STRING_H */
-#define tinytens tinytens_D2A
-#define trailz trailz_D2A
-#define ulp ulp_D2A
-+/*
-+ * Paranoia: Protect exported symbols, including ones in files we don't
-+ * compile right now. The standard strtof and strtod survive.
-+ */
-+#define dtoa __dtoa
-+#define gdtoa __gdtoa
-+#define freedtoa __freedtoa
-+#define strtodg __strtodg
-+#define g_ddfmt __g_ddfmt
-+#define g_dfmt __g_dfmt
-+#define g_ffmt __g_ffmt
-+#define g_Qfmt __g_Qfmt
-+#define g_xfmt __g_xfmt
-+#define g_xLfmt __g_xLfmt
-+#define strtoId __strtoId
-+#define strtoIdd __strtoIdd
-+#define strtoIf __strtoIf
-+#define strtoIQ __strtoIQ
-+#define strtoIx __strtoIx
-+#define strtoIxL __strtoIxL
-+#define strtord __strtord
-+#define strtordd __strtordd
-+#define strtorf __strtorf
-+#define strtorQ __strtorQ
-+#define strtorx __strtorx
-+#define strtorxL __strtorxL
-+#define strtodI __strtodI
-+#define strtopd __strtopd
-+#define strtopdd __strtopdd
-+#define strtopf __strtopf
-+#define strtopQ __strtopQ
-+#define strtopx __strtopx
-+#define strtopxL __strtopxL
-+
-+/* Protect gdtoa-internal symbols */
-+#define Balloc __Balloc_D2A
-+#define Bfree __Bfree_D2A
-+#define ULtoQ __ULtoQ_D2A
-+#define ULtof __ULtof_D2A
-+#define ULtod __ULtod_D2A
-+#define ULtodd __ULtodd_D2A
-+#define ULtox __ULtox_D2A
-+#define ULtoxL __ULtoxL_D2A
-+#define any_on __any_on_D2A
-+#define b2d __b2d_D2A
-+#define bigtens __bigtens_D2A
-+#define cmp __cmp_D2A
-+#define copybits __copybits_D2A
-+#define d2b __d2b_D2A
-+#define decrement __decrement_D2A
-+#define diff __diff_D2A
-+#define dtoa_result __dtoa_result_D2A
-+#define g__fmt __g__fmt_D2A
-+#define gethex __gethex_D2A
-+#define hexdig __hexdig_D2A
-+#define hexdig_init_D2A __hexdig_init_D2A
-+#define hexnan __hexnan_D2A
-+#define hi0bits __hi0bits_D2A
-+#define hi0bits_D2A __hi0bits_D2A
-+#define i2b __i2b_D2A
-+#define increment __increment_D2A
-+#define lo0bits __lo0bits_D2A
-+#define lshift __lshift_D2A
-+#define match __match_D2A
-+#define mult __mult_D2A
-+#define multadd __multadd_D2A
-+#define nrv_alloc __nrv_alloc_D2A
-+#define pow5mult __pow5mult_D2A
-+#define quorem __quorem_D2A
-+#define ratio __ratio_D2A
-+#define rshift __rshift_D2A
-+#define rv_alloc __rv_alloc_D2A
-+#define s2b __s2b_D2A
-+#define set_ones __set_ones_D2A
-+#define strcp __strcp_D2A
-+#define strcp_D2A __strcp_D2A
-+#define strtoIg __strtoIg_D2A
-+#define sum __sum_D2A
-+#define tens __tens_D2A
-+#define tinytens __tinytens_D2A
-+#define tinytens __tinytens_D2A
-+#define trailz __trailz_D2A
-+#define ulp __ulp_D2A
-
+-
extern char *dtoa_result;
extern CONST double bigtens[], tens[], tinytens[];
-@@ -542,8 +593,11 @@
- extern Bigint *diff ANSI((Bigint*, Bigint*));
+ extern unsigned char hexdig[];
+@@ -549,7 +599,7 @@
extern char *dtoa ANSI((double d, int mode, int ndigits,
int *decpt, int *sign, char **rve));
-+ extern void freedtoa ANSI((char*));
-+ extern char *gdtoa ANSI((FPI *fpi, int be, ULong *bits, int *kindp,
-+ int mode, int ndigits, int *decpt, char **rve));
- extern char *g__fmt ANSI((char*, char*, char*, int, ULong));
+ extern char *g__fmt ANSI((char*, char*, char*, int, ULong, size_t));
- extern int gethex ANSI((CONST char**, FPI*, Long*, Bigint**, int));
+ extern int gethex ANSI((CONST char**, FPI*, Long*, Bigint**, int, locale_t));
extern void hexdig_init_D2A(Void);
extern int hexnan ANSI((CONST char**, FPI*, ULong*));
extern int hi0bits_D2A ANSI((ULong));
-@@ -560,11 +614,32 @@
+@@ -566,11 +616,12 @@
extern double ratio ANSI((Bigint*, Bigint*));
extern void rshift ANSI((Bigint*, int));
extern char *rv_alloc ANSI((int));
+ extern Bigint *s2b ANSI((CONST char*, int, int, ULong, int));
extern Bigint *set_ones ANSI((Bigint*, int));
extern char *strcp ANSI((char*, const char*));
-+ extern int strtodg ANSI((CONST char*, char**, FPI*, Long*, ULong*, locale_t)) __DARWIN_ALIAS(strtodg);
-+
-+ extern int strtoId ANSI((CONST char *, char **, double *, double *));
-+ extern int strtoIdd ANSI((CONST char *, char **, double *, double *));
-+ extern int strtoIf ANSI((CONST char *, char **, float *, float *));
extern int strtoIg ANSI((CONST char*, char**, FPI*, Long*, Bigint**, int*));
-+ extern int strtoIQ ANSI((CONST char *, char **, void *, void *));
-+ extern int strtoIx ANSI((CONST char *, char **, void *, void *));
-+ extern int strtoIxL ANSI((CONST char *, char **, void *, void *));
extern double strtod ANSI((const char *s00, char **se));
+ extern double strtod_l ANSI((const char *s00, char **se, locale_t));
-+ extern int strtopQ ANSI((CONST char *, char **, Void *));
-+ extern int strtopf ANSI((CONST char *, char **, float *));
-+ extern int strtopd ANSI((CONST char *, char **, double *));
-+ extern int strtopdd ANSI((CONST char *, char **, double *, locale_t));
-+ extern int strtopx ANSI((CONST char *, char **, Void *, locale_t));
-+ extern int strtopxL ANSI((CONST char *, char **, Void *));
-+ extern int strtord ANSI((CONST char *, char **, int, double *));
-+ extern int strtordd ANSI((CONST char *, char **, int, double *));
-+ extern int strtorf ANSI((CONST char *, char **, int, float *));
-+ extern int strtorQ ANSI((CONST char *, char **, int, void *));
-+ extern int strtorx ANSI((CONST char *, char **, int, void *));
-+ extern int strtorxL ANSI((CONST char *, char **, int, void *));
extern Bigint *sum ANSI((Bigint*, Bigint*));
extern int trailz ANSI((Bigint*));
extern double ulp ANSI((double));
.sinclude "${.CURDIR}/${MACHINE_ARCH}/stdlib/gdtoa.mk"
-GDTOA_UNIQUE_SRCS != perl -e '@z = split(" ", "$(GDTOA_FBSDSRCS)"); $$, = "\n"; print @z' | sort -u
+GDTOA_UNIQUE_SRCS != ${PERL} -e '@z = split(" ", "$(GDTOA_FBSDSRCS)"); $$, = "\n"; print @z' | sort -u
.include "Makefile.fbsd_begin"
FBSDMISRCS= $(GDTOA_UNIQUE_SRCS) _hdtoa.c glue.c \
FBSDMISRCS+= _ldtoa.c
.endif
-FBSDHDRS= gdtoa.h gdtoaimp.h
+FBSDHDRS= gdtoa.h gdtoaimp.h gdtoa_fltrnds.h
.include "Makefile.fbsd_end"
static const int sigfigs = (DBL_MANT_DIG + 3) / 4;
union IEEEd2bits u;
char *s, *s0;
- int bufsize;
+ int bufsize, f;
u.d = d;
*sign = u.bits.sign;
- switch (fpclassify(d)) {
+ switch (f = fpclassify(d)) {
case FP_NORMAL:
*decpt = u.bits.exp - DBL_ADJ;
break;
*decpt = INT_MAX;
return (nrv_alloc(NANSTR, rve, sizeof(NANSTR) - 1));
default:
- abort();
+ LIBC_ABORT("fpclassify returned %d", f);
}
/* FP_NORMAL or FP_SUBNORMAL */
static const int sigfigs = (LDBL_MANT_DIG + 3) / 4;
union IEEEl2bits u;
char *s, *s0;
- int bufsize;
+ int bufsize, f;
#ifdef LDBL_HEAD_TAIL_PAIR
uint32_t bits[4];
int i, pos;
u.e = e;
*sign = u.bits.sign;
- switch (fpclassify(e)) {
+ switch (f = fpclassify(e)) {
case FP_NORMAL:
case FP_SUPERNORMAL:
*decpt = u.bits.exp - LDBL_ADJ;
*decpt = INT_MAX;
return (nrv_alloc(NANSTR, rve, sizeof(NANSTR) - 1));
default:
- abort();
+ LIBC_ABORT("fpclassify returned %d", f);
}
/* FP_NORMAL or FP_SUBNORMAL */
for (s = s0 + bufsize - 1; s > s0 + sigfigs - 1; s--)
*s = 0;
#ifdef LDBL_HEAD_TAIL_PAIR
- _ldbl2array32dd(u, bits);
+ *decpt -= _ldbl2array32dd(u, bits);
i = 0;
pos = 8;
for (; s > s0; s--) {
/*
- * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <strings.h>
#include <float.h>
#include <math.h>
-#include <alloca.h>
#include "fpmath.h"
#define LL_BITS (8 * sizeof(int64_t))
#define LL_HIGHBIT (1LL << 63)
-__private_extern__ void
+__private_extern__ int
_ldbl2array32dd(union IEEEl2bits u, uint32_t *a)
{
int bit, shift, highbit, dexp;
uint64_t a64[2];
int64_t t64;
+ int extrabit = 0;
+
+ if(u.d[0] == 0.0) {
+ a[0] = a[1] = a[2] = a[3] = 0;
+ return 0;
+ }
bzero(a64, sizeof(a64));
switch (__fpclassifyd(u.d[0])) {
case FP_NORMAL:
- a64[1] = (1LL << (LDBL_MANT_DIG - BITS64 - 1));
- /* drop through */
+ /*
+ * special case: if the head double only has the high (hidden)
+ * bit set, and the tail double is non-zero and is opposite
+ * in sign, then we increment extrabit to keep 106 bit
+ * precision in the results.
+ */
+ if(u.bits.manh == 0 && u.d[1] != 0 && u.bits.sign != u.bits.sign2)
+ extrabit++;
+ a64[1] = (1LL << (LDBL_MANT_DIG - BITS64 - 1 + extrabit));
+ a64[1] |= ((uint64_t)u.bits.manh >> (BITS64 - LDBL_MANL_SIZE - extrabit));
+ a64[0] = ((uint64_t)u.bits.manh << (LDBL_MANL_SIZE + extrabit));
+ break;
case FP_SUBNORMAL:
a64[1] |= ((uint64_t)u.bits.manh >> (BITS64 - LDBL_MANL_SIZE));
a64[0] = ((uint64_t)u.bits.manh << LDBL_MANL_SIZE);
- break;
+ /* the tail double will be zero, so we are done */
+ goto done;
default:
goto done;
}
* if the tail double is so small to not fit in LDBL_MANT_DIG bits,
* then just skip it.
*/
- if (dexp >= LDBL_MANT_DIG)
+ if (dexp >= LDBL_MANT_DIG + extrabit) {
+ reshift:
+ if (extrabit) {
+ bit = a64[1] & 1;
+ a64[1] >>= 1;
+ a64[0] >>= 1;
+ a64[0] |= ((uint64_t)bit) << (BITS64 - 1);
+ extrabit = 0;
+ }
goto done;
+ }
switch (__fpclassifyd(u.d[1])) {
case FP_NORMAL:
- bit = LDBL_MANT_DIG - dexp - 1;
+ bit = LDBL_MANT_DIG - dexp - 1 + extrabit;
t64 = (1LL << bit);
break;
case FP_SUBNORMAL:
- bit = LDBL_MANT_DIG - (int)u.bits.exp;
+ bit = LDBL_MANT_DIG - (int)u.bits.exp + extrabit;
t64 = 0;
break;
default:
- goto done;
+ /* should never get here */
+ goto reshift;
}
shift = LDBL_MANL_SIZE - bit - 1;
if (shift >= 0)
a[1] = (uint32_t)(a64[0] >> 32);
a[2] = (uint32_t)a64[1];
a[3] = (uint32_t)(a64[1] >> 32);
+ return extrabit;
}
#ifdef Honor_FLT_ROUNDS
int rounding = Flt_Rounds;
#endif
-#if defined(__ppc__) || defined(__ppc64__)
int type;
-#endif /* defined(__ppc__) || defined(__ppc64__) */
u.e = *ld;
#if defined(__ppc__) || defined(__ppc64__)
type = FP_SUBNORMAL;
if (type == FP_SUBNORMAL)
u.e *= 1.0e32L;
+#else /* !defined(__ppc__) && !defined(__ppc64__) */
+ type = fpclassify(u.e);
#endif /* defined(__ppc__) || defined(__ppc64__) */
*sign = u.bits.sign;
be = u.bits.exp - (LDBL_MAX_EXP - 1) - (LDBL_MANT_DIG - 1);
+#if defined(__ppc__) || defined(__ppc64__)
+ be -= LDBL_TO_ARRAY32(u, bits);
+#else /* !defined(__ppc__) && !defined(__ppc64__) */
LDBL_TO_ARRAY32(u, bits);
+#endif /* defined(__ppc__) || defined(__ppc64__) */
-#if defined(__ppc__) || defined(__ppc64__)
switch (type) {
+#if defined(__ppc__) || defined(__ppc64__)
case FP_SUBNORMAL:
-#else /* !defined(__ppc__) && !defined(__ppc64__) */
- switch (fpclassify(u.e)) {
#endif /* defined(__ppc__) || defined(__ppc64__) */
case FP_NORMAL:
case FP_SUPERNORMAL:
kind = STRTOG_NaN;
break;
default:
- abort();
+ LIBC_ABORT("fpclassify returned %d", type);
}
#ifdef Honor_FLT_ROUNDS
#endif
#define Honor_FLT_ROUNDS
+#define Trust_FLT_ROUNDS
#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__)
-#define f_QNAN 0xffc00000
+#define f_QNAN 0x7fc00000
#define d_QNAN0 0x0
-#define d_QNAN1 0xfff80000
+#define d_QNAN1 0x7ff80000
#define ld_QNAN0 0x0
#define ld_QNAN1 0xc0000000
-#define ld_QNAN2 0xffff
+#define ld_QNAN2 0x7fff
#define ld_QNAN3 0x0
#define ldus_QNAN0 0x0
#define ldus_QNAN1 0x0
#define ldus_QNAN2 0x0
#define ldus_QNAN3 0xc000
-#define ldus_QNAN4 0xffff
+#define ldus_QNAN4 0x7fff
#else
#error unknown architecture
+++ /dev/null
-/****************************************************************
-
-The author of this software is David M. Gay.
-
-Copyright (C) 1998, 1999 by Lucent Technologies
-All Rights Reserved
-
-Permission to use, copy, modify, and distribute this software and
-its documentation for any purpose and without fee is hereby
-granted, provided that the above copyright notice appear in all
-copies and that both that the copyright notice and this
-permission notice and warranty disclaimer appear in supporting
-documentation, and that the name of Lucent or any of its entities
-not be used in advertising or publicity pertaining to
-distribution of the software without specific, written prior
-permission.
-
-LUCENT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
-IN NO EVENT SHALL LUCENT OR ANY OF ITS ENTITIES BE LIABLE FOR ANY
-SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
-IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
-ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
-THIS SOFTWARE.
-
-****************************************************************/
-
-/* Please send bug reports to David M. Gay (dmg at acm dot org,
- * with " at " changed at "@" and " dot " changed to "."). */
-
-#include "gdtoaimp.h"
-
- static Bigint *
-#ifdef KR_headers
-bitstob(bits, nbits, bbits) ULong *bits; int nbits; int *bbits;
-#else
-bitstob(ULong *bits, int nbits, int *bbits)
-#endif
-{
- int i, k;
- Bigint *b;
- ULong *be, *x, *x0;
-
- i = ULbits;
- k = 0;
- while(i < nbits) {
- i <<= 1;
- k++;
- }
-#ifndef Pack_32
- if (!k)
- k = 1;
-#endif
- b = Balloc(k);
- be = bits + ((nbits - 1) >> kshift);
- x = x0 = b->x;
- do {
- *x++ = *bits & ALL_ON;
-#ifdef Pack_16
- *x++ = (*bits >> 16) & ALL_ON;
-#endif
- } while(++bits <= be);
- i = x - x0;
- while(!x0[--i])
- if (!i) {
- b->wds = 0;
- *bbits = 0;
- goto ret;
- }
- b->wds = i + 1;
- *bbits = i*ULbits + 32 - hi0bits(b->x[i]);
- ret:
- return b;
- }
-
-/* dtoa for IEEE arithmetic (dmg): convert double to ASCII string.
- *
- * Inspired by "How to Print Floating-Point Numbers Accurately" by
- * Guy L. Steele, Jr. and Jon L. White [Proc. ACM SIGPLAN '90, pp. 112-126].
- *
- * Modifications:
- * 1. Rather than iterating, we use a simple numeric overestimate
- * to determine k = floor(log10(d)). We scale relevant
- * quantities using O(log2(k)) rather than O(k) multiplications.
- * 2. For some modes > 2 (corresponding to ecvt and fcvt), we don't
- * try to generate digits strictly left to right. Instead, we
- * compute with fewer bits and propagate the carry if necessary
- * when rounding the final digit up. This is often faster.
- * 3. Under the assumption that input will be rounded nearest,
- * mode 0 renders 1e23 as 1e23 rather than 9.999999999999999e22.
- * That is, we allow equality in stopping tests when the
- * round-nearest rule will give the same floating-point value
- * as would satisfaction of the stopping test with strict
- * inequality.
- * 4. We remove common factors of powers of 2 from relevant
- * quantities.
- * 5. When converting floating-point integers less than 1e16,
- * we use floating-point arithmetic rather than resorting
- * to multiple-precision integers.
- * 6. When asked to produce fewer than 15 digits, we first try
- * to get by with floating-point arithmetic; we resort to
- * multiple-precision integer arithmetic only if we cannot
- * guarantee that the floating-point calculation has given
- * the correctly rounded result. For k requested digits and
- * "uniformly" distributed input, the probability is
- * something like 10^(k-15) that we must resort to the Long
- * calculation.
- */
-
- char *
-gdtoa
-#ifdef KR_headers
- (fpi, be, bits, kindp, mode, ndigits, decpt, rve)
- FPI *fpi; int be; ULong *bits;
- int *kindp, mode, ndigits, *decpt; char **rve;
-#else
- (FPI *fpi, int be, ULong *bits, int *kindp, int mode, int ndigits, int *decpt, char **rve)
-#endif
-{
- /* Arguments ndigits and decpt are similar to the second and third
- arguments of ecvt and fcvt; trailing zeros are suppressed from
- the returned string. If not null, *rve is set to point
- to the end of the return value. If d is +-Infinity or NaN,
- then *decpt is set to 9999.
-
- mode:
- 0 ==> shortest string that yields d when read in
- and rounded to nearest.
- 1 ==> like 0, but with Steele & White stopping rule;
- e.g. with IEEE P754 arithmetic , mode 0 gives
- 1e23 whereas mode 1 gives 9.999999999999999e22.
- 2 ==> max(1,ndigits) significant digits. This gives a
- return value similar to that of ecvt, except
- that trailing zeros are suppressed.
- 3 ==> through ndigits past the decimal point. This
- gives a return value similar to that from fcvt,
- except that trailing zeros are suppressed, and
- ndigits can be negative.
- 4-9 should give the same return values as 2-3, i.e.,
- 4 <= mode <= 9 ==> same return as mode
- 2 + (mode & 1). These modes are mainly for
- debugging; often they run slower but sometimes
- faster than modes 2-3.
- 4,5,8,9 ==> left-to-right digit generation.
- 6-9 ==> don't try fast floating-point estimate
- (if applicable).
-
- Values of mode other than 0-9 are treated as mode 0.
-
- Sufficient space is allocated to the return value
- to hold the suppressed trailing zeros.
- */
-
- int bbits, b2, b5, be0, dig, i, ieps, ilim, ilim0, ilim1, inex;
- int j, j1, k, k0, k_check, kind, leftright, m2, m5, nbits;
- int rdir, s2, s5, spec_case, try_quick;
- Long L;
- Bigint *b, *b1, *delta, *mlo, *mhi, *mhi1, *S;
- double d, d2, ds, eps;
- char *s, *s0;
-
-#ifndef MULTIPLE_THREADS
- if (dtoa_result) {
- freedtoa(dtoa_result);
- dtoa_result = 0;
- }
-#endif
- inex = 0;
- kind = *kindp &= ~STRTOG_Inexact;
- switch(kind & STRTOG_Retmask) {
- case STRTOG_Zero:
- goto ret_zero;
- case STRTOG_Normal:
- case STRTOG_Denormal:
- break;
- case STRTOG_Infinite:
- *decpt = -32768;
- return nrv_alloc("Infinity", rve, 8);
- case STRTOG_NaN:
- *decpt = -32768;
- return nrv_alloc("NaN", rve, 3);
- default:
- return 0;
- }
- b = bitstob(bits, nbits = fpi->nbits, &bbits);
- be0 = be;
- if ( (i = trailz(b)) !=0) {
- rshift(b, i);
- be += i;
- bbits -= i;
- }
- if (!b->wds) {
- Bfree(b);
- ret_zero:
- *decpt = 1;
- return nrv_alloc("0", rve, 1);
- }
-
- dval(d) = b2d(b, &i);
- i = be + bbits - 1;
- word0(d) &= Frac_mask1;
- word0(d) |= Exp_11;
-#ifdef IBM
- if ( (j = 11 - hi0bits(word0(d) & Frac_mask)) !=0)
- dval(d) /= 1 << j;
-#endif
-
- /* log(x) ~=~ log(1.5) + (x-1.5)/1.5
- * log10(x) = log(x) / log(10)
- * ~=~ log(1.5)/log(10) + (x-1.5)/(1.5*log(10))
- * log10(d) = (i-Bias)*log(2)/log(10) + log10(d2)
- *
- * This suggests computing an approximation k to log10(d) by
- *
- * k = (i - Bias)*0.301029995663981
- * + ( (d2-1.5)*0.289529654602168 + 0.176091259055681 );
- *
- * We want k to be too large rather than too small.
- * The error in the first-order Taylor series approximation
- * is in our favor, so we just round up the constant enough
- * to compensate for any error in the multiplication of
- * (i - Bias) by 0.301029995663981; since |i - Bias| <= 1077,
- * and 1077 * 0.30103 * 2^-52 ~=~ 7.2e-14,
- * adding 1e-13 to the constant term more than suffices.
- * Hence we adjust the constant term to 0.1760912590558.
- * (We could get a more accurate k by invoking log10,
- * but this is probably not worthwhile.)
- */
-#ifdef IBM
- i <<= 2;
- i += j;
-#endif
- ds = (dval(d)-1.5)*0.289529654602168 + 0.1760912590558 + i*0.301029995663981;
-
- /* correct assumption about exponent range */
- if ((j = i) < 0)
- j = -j;
- if ((j -= 1077) > 0)
- ds += j * 7e-17;
-
- k = (int)ds;
- if (ds < 0. && ds != k)
- k--; /* want k = floor(ds) */
- k_check = 1;
-#ifdef IBM
- j = be + bbits - 1;
- if ( (j1 = j & 3) !=0)
- dval(d) *= 1 << j1;
- word0(d) += j << Exp_shift - 2 & Exp_mask;
-#else
- word0(d) += (be + bbits - 1) << Exp_shift;
-#endif
- if (k >= 0 && k <= Ten_pmax) {
- if (dval(d) < tens[k])
- k--;
- k_check = 0;
- }
- j = bbits - i - 1;
- if (j >= 0) {
- b2 = 0;
- s2 = j;
- }
- else {
- b2 = -j;
- s2 = 0;
- }
- if (k >= 0) {
- b5 = 0;
- s5 = k;
- s2 += k;
- }
- else {
- b2 -= k;
- b5 = -k;
- s5 = 0;
- }
- if (mode < 0 || mode > 9)
- mode = 0;
- try_quick = 1;
- if (mode > 5) {
- mode -= 4;
- try_quick = 0;
- }
- leftright = 1;
- switch(mode) {
- case 0:
- case 1:
- ilim = ilim1 = -1;
- i = (int)(nbits * .30103) + 3;
- ndigits = 0;
- break;
- case 2:
- leftright = 0;
- /* no break */
- case 4:
- if (ndigits <= 0)
- ndigits = 1;
- ilim = ilim1 = i = ndigits;
- break;
- case 3:
- leftright = 0;
- /* no break */
- case 5:
- i = ndigits + k + 1;
- ilim = i;
- ilim1 = i - 1;
- if (i <= 0)
- i = 1;
- }
- s = s0 = rv_alloc(i);
-
- if ( (rdir = fpi->rounding - 1) !=0) {
- if (rdir < 0)
- rdir = 2;
- if (kind & STRTOG_Neg)
- rdir = 3 - rdir;
- }
-
- /* Now rdir = 0 ==> round near, 1 ==> round up, 2 ==> round down. */
-
- if (ilim >= 0 && ilim <= Quick_max && try_quick && !rdir
-#ifndef IMPRECISE_INEXACT
- && k == 0
-#endif
- ) {
-
- /* Try to get by with floating-point arithmetic. */
-
- i = 0;
- d2 = dval(d);
-#ifdef IBM
- if ( (j = 11 - hi0bits(word0(d) & Frac_mask)) !=0)
- dval(d) /= 1 << j;
-#endif
- k0 = k;
- ilim0 = ilim;
- ieps = 2; /* conservative */
- if (k > 0) {
- ds = tens[k&0xf];
- j = k >> 4;
- if (j & Bletch) {
- /* prevent overflows */
- j &= Bletch - 1;
- dval(d) /= bigtens[n_bigtens-1];
- ieps++;
- }
- for(; j; j >>= 1, i++)
- if (j & 1) {
- ieps++;
- ds *= bigtens[i];
- }
- }
- else {
- ds = 1.;
- if ( (j1 = -k) !=0) {
- dval(d) *= tens[j1 & 0xf];
- for(j = j1 >> 4; j; j >>= 1, i++)
- if (j & 1) {
- ieps++;
- dval(d) *= bigtens[i];
- }
- }
- }
- if (k_check && dval(d) < 1. && ilim > 0) {
- if (ilim1 <= 0)
- goto fast_failed;
- ilim = ilim1;
- k--;
- dval(d) *= 10.;
- ieps++;
- }
- dval(eps) = ieps*dval(d) + 7.;
- word0(eps) -= (P-1)*Exp_msk1;
- if (ilim == 0) {
- S = mhi = 0;
- dval(d) -= 5.;
- if (dval(d) > dval(eps))
- goto one_digit;
- if (dval(d) < -dval(eps))
- goto no_digits;
- goto fast_failed;
- }
-#ifndef No_leftright
- if (leftright) {
- /* Use Steele & White method of only
- * generating digits needed.
- */
- dval(eps) = ds*0.5/tens[ilim-1] - dval(eps);
- for(i = 0;;) {
- L = (Long)(dval(d)/ds);
- dval(d) -= L*ds;
- *s++ = '0' + (int)L;
- if (dval(d) < dval(eps)) {
- if (dval(d))
- inex = STRTOG_Inexlo;
- goto ret1;
- }
- if (ds - dval(d) < dval(eps))
- goto bump_up;
- if (++i >= ilim)
- break;
- dval(eps) *= 10.;
- dval(d) *= 10.;
- }
- }
- else {
-#endif
- /* Generate ilim digits, then fix them up. */
- dval(eps) *= tens[ilim-1];
- for(i = 1;; i++, dval(d) *= 10.) {
- if ( (L = (Long)(dval(d)/ds)) !=0)
- dval(d) -= L*ds;
- *s++ = '0' + (int)L;
- if (i == ilim) {
- ds *= 0.5;
- if (dval(d) > ds + dval(eps))
- goto bump_up;
- else if (dval(d) < ds - dval(eps)) {
- while(*--s == '0'){}
- s++;
- if (dval(d))
- inex = STRTOG_Inexlo;
- goto ret1;
- }
- break;
- }
- }
-#ifndef No_leftright
- }
-#endif
- fast_failed:
- s = s0;
- dval(d) = d2;
- k = k0;
- ilim = ilim0;
- }
-
- /* Do we have a "small" integer? */
-
- if (be >= 0 && k <= Int_max) {
- /* Yes. */
- ds = tens[k];
- if (ndigits < 0 && ilim <= 0) {
- S = mhi = 0;
- if (ilim < 0 || dval(d) <= 5*ds)
- goto no_digits;
- goto one_digit;
- }
- for(i = 1;; i++, dval(d) *= 10.) {
- L = dval(d) / ds;
- dval(d) -= L*ds;
-#ifdef Check_FLT_ROUNDS
- /* If FLT_ROUNDS == 2, L will usually be high by 1 */
- if (dval(d) < 0) {
- L--;
- dval(d) += ds;
- }
-#endif
- *s++ = '0' + (int)L;
- if (dval(d) == 0.)
- break;
- if (i == ilim) {
- if (rdir) {
- if (rdir == 1)
- goto bump_up;
- inex = STRTOG_Inexlo;
- goto ret1;
- }
- dval(d) += dval(d);
- if (dval(d) > ds || dval(d) == ds && L & 1) {
- bump_up:
- inex = STRTOG_Inexhi;
- while(*--s == '9')
- if (s == s0) {
- k++;
- *s = '0';
- break;
- }
- ++*s++;
- }
- else {
- inex = STRTOG_Inexlo;
- while(*--s == '0'){}
- s++;
- }
- break;
- }
- }
- goto ret1;
- }
-
- m2 = b2;
- m5 = b5;
- mhi = mlo = 0;
- if (leftright) {
- if (mode < 2) {
- i = nbits - bbits;
- if (be - i++ < fpi->emin)
- /* denormal */
- i = be - fpi->emin + 1;
- }
- else {
- j = ilim - 1;
- if (m5 >= j)
- m5 -= j;
- else {
- s5 += j -= m5;
- b5 += j;
- m5 = 0;
- }
- if ((i = ilim) < 0) {
- m2 -= i;
- i = 0;
- }
- }
- b2 += i;
- s2 += i;
- mhi = i2b(1);
- }
- if (m2 > 0 && s2 > 0) {
- i = m2 < s2 ? m2 : s2;
- b2 -= i;
- m2 -= i;
- s2 -= i;
- }
- if (b5 > 0) {
- if (leftright) {
- if (m5 > 0) {
- mhi = pow5mult(mhi, m5);
- b1 = mult(mhi, b);
- Bfree(b);
- b = b1;
- }
- if ( (j = b5 - m5) !=0)
- b = pow5mult(b, j);
- }
- else
- b = pow5mult(b, b5);
- }
- S = i2b(1);
- if (s5 > 0)
- S = pow5mult(S, s5);
-
- /* Check for special case that d is a normalized power of 2. */
-
- spec_case = 0;
- if (mode < 2) {
- if (bbits == 1 && be0 > fpi->emin + 1) {
- /* The special case */
- b2++;
- s2++;
- spec_case = 1;
- }
- }
-
- /* Arrange for convenient computation of quotients:
- * shift left if necessary so divisor has 4 leading 0 bits.
- *
- * Perhaps we should just compute leading 28 bits of S once
- * and for all and pass them and a shift to quorem, so it
- * can do shifts and ors to compute the numerator for q.
- */
-#ifdef Pack_32
- if ( (i = ((s5 ? 32 - hi0bits(S->x[S->wds-1]) : 1) + s2) & 0x1f) !=0)
- i = 32 - i;
-#else
- if ( (i = ((s5 ? 32 - hi0bits(S->x[S->wds-1]) : 1) + s2) & 0xf) !=0)
- i = 16 - i;
-#endif
- if (i > 4) {
- i -= 4;
- b2 += i;
- m2 += i;
- s2 += i;
- }
- else if (i < 4) {
- i += 28;
- b2 += i;
- m2 += i;
- s2 += i;
- }
- if (b2 > 0)
- b = lshift(b, b2);
- if (s2 > 0)
- S = lshift(S, s2);
- if (k_check) {
- if (cmp(b,S) < 0) {
- k--;
- b = multadd(b, 10, 0); /* we botched the k estimate */
- if (leftright)
- mhi = multadd(mhi, 10, 0);
- ilim = ilim1;
- }
- }
- if (ilim <= 0 && mode > 2) {
- if (ilim < 0 || cmp(b,S = multadd(S,5,0)) <= 0) {
- /* no digits, fcvt style */
- no_digits:
- k = -1 - ndigits;
- inex = STRTOG_Inexlo;
- goto ret;
- }
- one_digit:
- inex = STRTOG_Inexhi;
- *s++ = '1';
- k++;
- goto ret;
- }
- if (leftright) {
- if (m2 > 0)
- mhi = lshift(mhi, m2);
-
- /* Compute mlo -- check for special case
- * that d is a normalized power of 2.
- */
-
- mlo = mhi;
- if (spec_case) {
- mhi = Balloc(mhi->k);
- Bcopy(mhi, mlo);
- mhi = lshift(mhi, 1);
- }
-
- for(i = 1;;i++) {
- dig = quorem(b,S) + '0';
- /* Do we yet have the shortest decimal string
- * that will round to d?
- */
- j = cmp(b, mlo);
- delta = diff(S, mhi);
- j1 = delta->sign ? 1 : cmp(b, delta);
- Bfree(delta);
-#ifndef ROUND_BIASED
- if (j1 == 0 && !mode && !(bits[0] & 1) && !rdir) {
- if (dig == '9')
- goto round_9_up;
- if (j <= 0) {
- if (b->wds > 1 || b->x[0])
- inex = STRTOG_Inexlo;
- }
- else {
- dig++;
- inex = STRTOG_Inexhi;
- }
- *s++ = dig;
- goto ret;
- }
-#endif
- if (j < 0 || j == 0 && !mode
-#ifndef ROUND_BIASED
- && !(bits[0] & 1)
-#endif
- ) {
- if (rdir && (b->wds > 1 || b->x[0])) {
- if (rdir == 2) {
- inex = STRTOG_Inexlo;
- goto accept;
- }
- while (cmp(S,mhi) > 0) {
- *s++ = dig;
- mhi1 = multadd(mhi, 10, 0);
- if (mlo == mhi)
- mlo = mhi1;
- mhi = mhi1;
- b = multadd(b, 10, 0);
- dig = quorem(b,S) + '0';
- }
- if (dig++ == '9')
- goto round_9_up;
- inex = STRTOG_Inexhi;
- goto accept;
- }
- if (j1 > 0) {
- b = lshift(b, 1);
- j1 = cmp(b, S);
- if ((j1 > 0 || j1 == 0 && dig & 1)
- && dig++ == '9')
- goto round_9_up;
- inex = STRTOG_Inexhi;
- }
- if (b->wds > 1 || b->x[0])
- inex = STRTOG_Inexlo;
- accept:
- *s++ = dig;
- goto ret;
- }
- if (j1 > 0 && rdir != 2) {
- if (dig == '9') { /* possible if i == 1 */
- round_9_up:
- *s++ = '9';
- inex = STRTOG_Inexhi;
- goto roundoff;
- }
- inex = STRTOG_Inexhi;
- *s++ = dig + 1;
- goto ret;
- }
- *s++ = dig;
- if (i == ilim)
- break;
- b = multadd(b, 10, 0);
- if (mlo == mhi)
- mlo = mhi = multadd(mhi, 10, 0);
- else {
- mlo = multadd(mlo, 10, 0);
- mhi = multadd(mhi, 10, 0);
- }
- }
- }
- else
- for(i = 1;; i++) {
- *s++ = dig = quorem(b,S) + '0';
- if (i >= ilim)
- break;
- b = multadd(b, 10, 0);
- }
-
- /* Round off last digit */
-
- if (rdir) {
- if (rdir == 2 || b->wds <= 1 && !b->x[0])
- goto chopzeros;
- goto roundoff;
- }
- b = lshift(b, 1);
- j = cmp(b, S);
- if (j > 0 || j == 0 && dig & 1) {
- roundoff:
- inex = STRTOG_Inexhi;
- while(*--s == '9')
- if (s == s0) {
- k++;
- *s++ = '1';
- goto ret;
- }
- ++*s++;
- }
- else {
- chopzeros:
- if (b->wds > 1 || b->x[0])
- inex = STRTOG_Inexlo;
- while(*--s == '0'){}
- s++;
- }
- ret:
- Bfree(S);
- if (mhi) {
- if (mlo && mlo != mhi)
- Bfree(mlo);
- Bfree(mhi);
- }
- ret1:
- Bfree(b);
- *s = 0;
- *decpt = k + 1;
- if (rve)
- *rve = s;
- *kindp |= inex;
- return s0;
- }
--- /dev/null
+./gdtoa-gdtoa.c
\ No newline at end of file
{
Bigint *b;
CONST unsigned char *decpt, *s0, *s, *s1;
- int esign, havedig, irv, k, n, nbits, up, zret;
+ int big, esign, havedig, irv, j, k, n, n0, nbits, up, zret;
ULong L, lostbits, *x;
Long e, e1;
#ifdef USE_LOCALE
- char *decimalpoint;
- unsigned char *decimalpointend = NULL;
- int decimalpointlen;
-
+ int i;
NORMALIZE_LOCALE(loc);
- decimalpoint = localeconv_l(loc)->decimal_point;
- decimalpointlen = strlen(decimalpoint);
+#ifdef NO_LOCALE_CACHE
+ const unsigned char *decimalpoint = (unsigned char*)localeconv_l(loc)->decimal_point;
#else
-#define decimalpoint '.'
+ const unsigned char *decimalpoint;
+ static unsigned char *decimalpoint_cache;
+ if (!(s0 = decimalpoint_cache)) {
+ s0 = (unsigned char*)localeconv_l(loc)->decimal_point;
+ if ((decimalpoint_cache = (char*)malloc(strlen(s0) + 1))) {
+ strcpy(decimalpoint_cache, s0);
+ s0 = decimalpoint_cache;
+ }
+ }
+ decimalpoint = s0;
+#endif
#endif
if (!hexdig['0'])
hexdig_init_D2A();
+ *bp = 0;
havedig = 0;
s0 = *(CONST unsigned char **)sp + 2;
while(s0[havedig] == '0')
decpt = 0;
zret = 0;
e = 0;
- if (!hexdig[*s]) {
+ if (hexdig[*s])
+ havedig++;
+ else {
zret = 1;
#ifdef USE_LOCALE
- if (strncmp((char *)s, decimalpoint, decimalpointlen) != 0)
-#else /* USE_LOCALE */
- if (*s != decimalpoint)
-#endif /* USE_LOCALE */
+ for(i = 0; decimalpoint[i]; ++i) {
+ if (s[i] != decimalpoint[i])
+ goto pcheck;
+ }
+ decpt = s += i;
+#else
+ if (*s != '.')
goto pcheck;
-#ifdef USE_LOCALE
- decpt = (s += decimalpointlen);
- decimalpointend = s - 1;
-#else /* USE_LOCALE */
decpt = ++s;
-#endif /* USE_LOCALE */
+#endif
if (!hexdig[*s])
goto pcheck;
while(*s == '0')
while(hexdig[*s])
s++;
#ifdef USE_LOCALE
- if (strncmp((char *)s, decimalpoint, decimalpointlen) == 0 && !decpt)
-#else /* USE_LOCALE */
- if (*s == decimalpoint && !decpt)
-#endif /* USE_LOCALE */
- {
-#ifdef USE_LOCALE
- decpt = (s += decimalpointlen);
- decimalpointend = s - 1;
-#else /* USE_LOCALE */
+ if (*s == *decimalpoint && !decpt) {
+ for(i = 1; decimalpoint[i]; ++i) {
+ if (s[i] != decimalpoint[i])
+ goto pcheck;
+ }
+ decpt = s += i;
+#else
+ if (*s == '.' && !decpt) {
decpt = ++s;
-#endif /* USE_LOCALE */
+#endif
while(hexdig[*s])
s++;
- }
+ }/*}*/
if (decpt)
e = -(((Long)(s-decpt)) << 2);
pcheck:
s1 = s;
+ big = esign = 0;
switch(*s) {
case 'p':
case 'P':
- esign = 0;
switch(*++s) {
case '-':
esign = 1;
break;
}
e1 = n - 0x10;
- while((n = hexdig[*++s]) !=0 && n <= 0x19)
+ while((n = hexdig[*++s]) !=0 && n <= 0x19) {
+ if (e1 & 0xf8000000)
+ big = 1;
e1 = 10*e1 + n - 0x10;
+ }
if (esign)
e1 = -e1;
e += e1;
}
*sp = (char*)s;
+ if (!havedig)
+ *sp = (char*)s0 - 1;
if (zret)
- return havedig ? STRTOG_Zero : STRTOG_NoNumber;
+ return STRTOG_Zero;
+ if (big) {
+ if (esign) {
+ switch(fpi->rounding) {
+ case FPI_Round_up:
+ if (sign)
+ break;
+ goto ret_tiny;
+ case FPI_Round_down:
+ if (!sign)
+ break;
+ goto ret_tiny;
+ }
+ goto retz;
+ ret_tiny:
+ b = Balloc(0);
+ b->wds = 1;
+ b->x[0] = 1;
+ goto dret;
+ }
+ switch(fpi->rounding) {
+ case FPI_Round_near:
+ goto ovfl1;
+ case FPI_Round_up:
+ if (!sign)
+ goto ovfl1;
+ goto ret_big;
+ case FPI_Round_down:
+ if (sign)
+ goto ovfl1;
+ goto ret_big;
+ }
+ ret_big:
+ nbits = fpi->nbits;
+ n0 = n = nbits >> kshift;
+ if (nbits & kmask)
+ ++n;
+ for(j = n, k = 0; j >>= 1; ++k);
+ *bp = b = Balloc(k);
+ b->wds = n;
+ for(j = 0; j < n0; ++j)
+ b->x[j] = ALL_ON;
+ if (n > n0)
+ b->x[j] = ULbits >> (ULbits - (nbits & kmask));
+ *exp = fpi->emin;
+ return STRTOG_Normal | STRTOG_Inexlo;
+ }
n = s1 - s0 - 1;
for(k = 0; n > 7; n >>= 1)
k++;
x = b->x;
n = 0;
L = 0;
+#ifdef USE_LOCALE
+ for(i = 0; decimalpoint[i+1]; ++i);
+#endif
while(s1 > s0) {
#ifdef USE_LOCALE
- if (--s1 == decimalpointend) {
- s1 -= decimalpointlen - 1;
+ if (*--s1 == decimalpoint[i]) {
+ s1 -= i;
continue;
- }
-#else /* USE_LOCALE */
- if (*--s1 == decimalpoint)
+ }
+#else
+ if (*--s1 == '.')
continue;
-#endif /* USE_LOCALE */
+#endif
if (n == 32) {
*x++ = L;
L = 0;
k = n - 1;
if (x[k>>kshift] & 1 << (k & kmask)) {
lostbits = 2;
- if (k > 1 && any_on(b,k-1))
+ if (k > 0 && any_on(b,k))
lostbits = 3;
}
}
if (e > fpi->emax) {
ovfl:
Bfree(b);
- *bp = 0;
+ ovfl1:
+#ifndef NO_ERRNO
+ errno = ERANGE;
+#endif
return STRTOG_Infinite | STRTOG_Overflow | STRTOG_Inexhi;
}
irv = STRTOG_Normal;
case FPI_Round_down:
if (sign) {
one_bit:
- *exp = fpi->emin;
x[0] = b->wds = 1;
+ dret:
*bp = b;
+ *exp = fpi->emin;
+#ifndef NO_ERRNO
+ errno = ERANGE;
+#endif
return STRTOG_Denormal | STRTOG_Inexhi
| STRTOG_Underflow;
}
}
Bfree(b);
- *bp = 0;
+ retz:
+#ifndef NO_ERRNO
+ errno = ERANGE;
+#endif
return STRTOG_Zero | STRTOG_Inexlo | STRTOG_Underflow;
}
k = n - 1;
+++ /dev/null
-./gdtoa-misc.c
\ No newline at end of file
--- /dev/null
+/****************************************************************
+
+The author of this software is David M. Gay.
+
+Copyright (C) 1998, 1999 by Lucent Technologies
+All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appear in all
+copies and that both that the copyright notice and this
+permission notice and warranty disclaimer appear in supporting
+documentation, and that the name of Lucent or any of its entities
+not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+LUCENT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
+IN NO EVENT SHALL LUCENT OR ANY OF ITS ENTITIES BE LIABLE FOR ANY
+SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
+IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
+ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
+THIS SOFTWARE.
+
+****************************************************************/
+
+/* Please send bug reports to David M. Gay (dmg at acm dot org,
+ * with " at " changed at "@" and " dot " changed to "."). */
+
+#define GDTOA_TSD
+#define Omit_Private_Memory
+
+#ifdef GDTOA_TSD
+#include <pthread.h>
+#endif /* GDTOA_TSD */
+#include "gdtoaimp.h"
+
+#ifdef GDTOA_TSD
+static pthread_key_t gdtoa_tsd_key = (pthread_key_t)-1;
+static pthread_mutex_t gdtoa_tsd_lock = PTHREAD_MUTEX_INITIALIZER;
+#else /* !GDTOA_TSD */
+ static Bigint *freelist[Kmax+1];
+#endif /* GDTOA_TSD */
+#ifndef Omit_Private_Memory
+#ifndef PRIVATE_MEM
+#define PRIVATE_MEM 2304
+#endif
+#define PRIVATE_mem ((PRIVATE_MEM+sizeof(double)-1)/sizeof(double))
+static double private_mem[PRIVATE_mem], *pmem_next = private_mem;
+#endif
+
+#ifdef GDTOA_TSD
+static void
+gdtoa_freelist_free(void *x)
+{
+ int i;
+ Bigint *cur, *next;
+ Bigint **fl = (Bigint **)x;
+
+ if (!fl) return;
+ for(i = 0; i < Kmax+1; fl++, i++) {
+ if (!*fl) continue;
+ for(cur = *fl; cur; cur = next) {
+ next = cur->next;
+ free(cur);
+ }
+ }
+ free(x);
+ }
+#endif /* GDTOA_TSD */
+
+ Bigint *
+Balloc
+#ifdef KR_headers
+ (k) int k;
+#else
+ (int k)
+#endif
+{
+ int x;
+ Bigint *rv;
+#ifndef Omit_Private_Memory
+ unsigned int len;
+#endif
+#ifdef GDTOA_TSD
+ Bigint **freelist;
+
+ if (gdtoa_tsd_key == (pthread_key_t)-1) {
+ pthread_mutex_lock(&gdtoa_tsd_lock);
+ if (gdtoa_tsd_key == (pthread_key_t)-1) {
+ gdtoa_tsd_key = __LIBC_PTHREAD_KEY_GDTOA_BIGINT;
+ pthread_key_init_np(gdtoa_tsd_key, gdtoa_freelist_free);
+ }
+ pthread_mutex_unlock(&gdtoa_tsd_lock);
+ }
+ if ((freelist = (Bigint **)pthread_getspecific(gdtoa_tsd_key)) == NULL) {
+ freelist = (Bigint **)MALLOC((Kmax+1) * sizeof(Bigint *));
+ bzero(freelist, (Kmax+1) * sizeof(Bigint *));
+ pthread_setspecific(gdtoa_tsd_key, freelist);
+ }
+#else /* !GDTOA_TSD */
+ ACQUIRE_DTOA_LOCK(0);
+#endif /* GDTOA_TSD */
+ if ( (rv = freelist[k]) !=0) {
+ freelist[k] = rv->next;
+ }
+ else {
+ x = 1 << k;
+#ifdef Omit_Private_Memory
+ rv = (Bigint *)MALLOC(sizeof(Bigint) + (x-1)*sizeof(ULong));
+#else
+ len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1)
+ /sizeof(double);
+ if (pmem_next - private_mem + len <= PRIVATE_mem) {
+ rv = (Bigint*)pmem_next;
+ pmem_next += len;
+ }
+ else
+ rv = (Bigint*)MALLOC(len*sizeof(double));
+#endif
+ rv->k = k;
+ rv->maxwds = x;
+ }
+#ifndef GDTOA_TSD
+ FREE_DTOA_LOCK(0);
+#endif /* GDTOA_TSD */
+ rv->sign = rv->wds = 0;
+ return rv;
+ }
+
+ void
+Bfree
+#ifdef KR_headers
+ (v) Bigint *v;
+#else
+ (Bigint *v)
+#endif
+{
+ if (v) {
+#ifdef GDTOA_TSD
+ Bigint **freelist = (Bigint **)pthread_getspecific(gdtoa_tsd_key);
+#else /* !GDTOA_TSD */
+ ACQUIRE_DTOA_LOCK(0);
+#endif /* GDTOA_TSD */
+ v->next = freelist[v->k];
+ freelist[v->k] = v;
+#ifndef GDTOA_TSD
+ FREE_DTOA_LOCK(0);
+#endif /* GDTOA_TSD */
+ }
+ }
+
+ int
+lo0bits
+#ifdef KR_headers
+ (y) ULong *y;
+#else
+ (ULong *y)
+#endif
+{
+ register int k;
+ register ULong x = *y;
+
+ if (x & 7) {
+ if (x & 1)
+ return 0;
+ if (x & 2) {
+ *y = x >> 1;
+ return 1;
+ }
+ *y = x >> 2;
+ return 2;
+ }
+ k = 0;
+ if (!(x & 0xffff)) {
+ k = 16;
+ x >>= 16;
+ }
+ if (!(x & 0xff)) {
+ k += 8;
+ x >>= 8;
+ }
+ if (!(x & 0xf)) {
+ k += 4;
+ x >>= 4;
+ }
+ if (!(x & 0x3)) {
+ k += 2;
+ x >>= 2;
+ }
+ if (!(x & 1)) {
+ k++;
+ x >>= 1;
+ if (!x)
+ return 32;
+ }
+ *y = x;
+ return k;
+ }
+
+ Bigint *
+multadd
+#ifdef KR_headers
+ (b, m, a) Bigint *b; int m, a;
+#else
+ (Bigint *b, int m, int a) /* multiply by m and add a */
+#endif
+{
+ int i, wds;
+#ifdef ULLong
+ ULong *x;
+ ULLong carry, y;
+#else
+ ULong carry, *x, y;
+#ifdef Pack_32
+ ULong xi, z;
+#endif
+#endif
+ Bigint *b1;
+
+ wds = b->wds;
+ x = b->x;
+ i = 0;
+ carry = a;
+ do {
+#ifdef ULLong
+ y = *x * (ULLong)m + carry;
+ carry = y >> 32;
+ *x++ = y & 0xffffffffUL;
+#else
+#ifdef Pack_32
+ xi = *x;
+ y = (xi & 0xffff) * m + carry;
+ z = (xi >> 16) * m + (y >> 16);
+ carry = z >> 16;
+ *x++ = (z << 16) + (y & 0xffff);
+#else
+ y = *x * m + carry;
+ carry = y >> 16;
+ *x++ = y & 0xffff;
+#endif
+#endif
+ }
+ while(++i < wds);
+ if (carry) {
+ if (wds >= b->maxwds) {
+ b1 = Balloc(b->k+1);
+ Bcopy(b1, b);
+ Bfree(b);
+ b = b1;
+ }
+ b->x[wds++] = carry;
+ b->wds = wds;
+ }
+ return b;
+ }
+
+ int
+hi0bits_D2A
+#ifdef KR_headers
+ (x) register ULong x;
+#else
+ (register ULong x)
+#endif
+{
+ register int k = 0;
+
+ if (!(x & 0xffff0000)) {
+ k = 16;
+ x <<= 16;
+ }
+ if (!(x & 0xff000000)) {
+ k += 8;
+ x <<= 8;
+ }
+ if (!(x & 0xf0000000)) {
+ k += 4;
+ x <<= 4;
+ }
+ if (!(x & 0xc0000000)) {
+ k += 2;
+ x <<= 2;
+ }
+ if (!(x & 0x80000000)) {
+ k++;
+ if (!(x & 0x40000000))
+ return 32;
+ }
+ return k;
+ }
+
+ Bigint *
+i2b
+#ifdef KR_headers
+ (i) int i;
+#else
+ (int i)
+#endif
+{
+ Bigint *b;
+
+ b = Balloc(1);
+ b->x[0] = i;
+ b->wds = 1;
+ return b;
+ }
+
+ Bigint *
+mult
+#ifdef KR_headers
+ (a, b) Bigint *a, *b;
+#else
+ (Bigint *a, Bigint *b)
+#endif
+{
+ Bigint *c;
+ int k, wa, wb, wc;
+ ULong *x, *xa, *xae, *xb, *xbe, *xc, *xc0;
+ ULong y;
+#ifdef ULLong
+ ULLong carry, z;
+#else
+ ULong carry, z;
+#ifdef Pack_32
+ ULong z2;
+#endif
+#endif
+
+ if (a->wds < b->wds) {
+ c = a;
+ a = b;
+ b = c;
+ }
+ k = a->k;
+ wa = a->wds;
+ wb = b->wds;
+ wc = wa + wb;
+ if (wc > a->maxwds)
+ k++;
+ c = Balloc(k);
+ for(x = c->x, xa = x + wc; x < xa; x++)
+ *x = 0;
+ xa = a->x;
+ xae = xa + wa;
+ xb = b->x;
+ xbe = xb + wb;
+ xc0 = c->x;
+#ifdef ULLong
+ for(; xb < xbe; xc0++) {
+ if ( (y = *xb++) !=0) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = *x++ * (ULLong)y + *xc + carry;
+ carry = z >> 32;
+ *xc++ = z & 0xffffffffUL;
+ }
+ while(x < xae);
+ *xc = carry;
+ }
+ }
+#else
+#ifdef Pack_32
+ for(; xb < xbe; xb++, xc0++) {
+ if ( (y = *xb & 0xffff) !=0) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = (*x & 0xffff) * y + (*xc & 0xffff) + carry;
+ carry = z >> 16;
+ z2 = (*x++ >> 16) * y + (*xc >> 16) + carry;
+ carry = z2 >> 16;
+ Storeinc(xc, z2, z);
+ }
+ while(x < xae);
+ *xc = carry;
+ }
+ if ( (y = *xb >> 16) !=0) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ z2 = *xc;
+ do {
+ z = (*x & 0xffff) * y + (*xc >> 16) + carry;
+ carry = z >> 16;
+ Storeinc(xc, z, z2);
+ z2 = (*x++ >> 16) * y + (*xc & 0xffff) + carry;
+ carry = z2 >> 16;
+ }
+ while(x < xae);
+ *xc = z2;
+ }
+ }
+#else
+ for(; xb < xbe; xc0++) {
+ if ( (y = *xb++) !=0) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = *x++ * y + *xc + carry;
+ carry = z >> 16;
+ *xc++ = z & 0xffff;
+ }
+ while(x < xae);
+ *xc = carry;
+ }
+ }
+#endif
+#endif
+ for(xc0 = c->x, xc = xc0 + wc; wc > 0 && !*--xc; --wc) ;
+ c->wds = wc;
+ return c;
+ }
+
+ static Bigint *p5s;
+
+ Bigint *
+pow5mult
+#ifdef KR_headers
+ (b, k) Bigint *b; int k;
+#else
+ (Bigint *b, int k)
+#endif
+{
+ Bigint *b1, *p5, *p51;
+ int i;
+ static int p05[3] = { 5, 25, 125 };
+
+ if ( (i = k & 3) !=0)
+ b = multadd(b, p05[i-1], 0);
+
+ if (!(k >>= 2))
+ return b;
+ if ((p5 = p5s) == 0) {
+ /* first time */
+#ifdef MULTIPLE_THREADS
+ ACQUIRE_DTOA_LOCK(1);
+ if (!(p5 = p5s)) {
+ p5 = p5s = i2b(625);
+ p5->next = 0;
+ }
+ FREE_DTOA_LOCK(1);
+#else
+ p5 = p5s = i2b(625);
+ p5->next = 0;
+#endif
+ }
+ for(;;) {
+ if (k & 1) {
+ b1 = mult(b, p5);
+ Bfree(b);
+ b = b1;
+ }
+ if (!(k >>= 1))
+ break;
+ if ((p51 = p5->next) == 0) {
+#ifdef MULTIPLE_THREADS
+ ACQUIRE_DTOA_LOCK(1);
+ if (!(p51 = p5->next)) {
+ p51 = p5->next = mult(p5,p5);
+ p51->next = 0;
+ }
+ FREE_DTOA_LOCK(1);
+#else
+ p51 = p5->next = mult(p5,p5);
+ p51->next = 0;
+#endif
+ }
+ p5 = p51;
+ }
+ return b;
+ }
+
+ Bigint *
+lshift
+#ifdef KR_headers
+ (b, k) Bigint *b; int k;
+#else
+ (Bigint *b, int k)
+#endif
+{
+ int i, k1, n, n1;
+ Bigint *b1;
+ ULong *x, *x1, *xe, z;
+
+ n = k >> kshift;
+ k1 = b->k;
+ n1 = n + b->wds + 1;
+ for(i = b->maxwds; n1 > i; i <<= 1)
+ k1++;
+ b1 = Balloc(k1);
+ x1 = b1->x;
+ for(i = 0; i < n; i++)
+ *x1++ = 0;
+ x = b->x;
+ xe = x + b->wds;
+ if (k &= kmask) {
+#ifdef Pack_32
+ k1 = 32 - k;
+ z = 0;
+ do {
+ *x1++ = *x << k | z;
+ z = *x++ >> k1;
+ }
+ while(x < xe);
+ if ((*x1 = z) !=0)
+ ++n1;
+#else
+ k1 = 16 - k;
+ z = 0;
+ do {
+ *x1++ = *x << k & 0xffff | z;
+ z = *x++ >> k1;
+ }
+ while(x < xe);
+ if (*x1 = z)
+ ++n1;
+#endif
+ }
+ else do
+ *x1++ = *x++;
+ while(x < xe);
+ b1->wds = n1 - 1;
+ Bfree(b);
+ return b1;
+ }
+
+ int
+cmp
+#ifdef KR_headers
+ (a, b) Bigint *a, *b;
+#else
+ (Bigint *a, Bigint *b)
+#endif
+{
+ ULong *xa, *xa0, *xb, *xb0;
+ int i, j;
+
+ i = a->wds;
+ j = b->wds;
+#ifdef DEBUG
+ if (i > 1 && !a->x[i-1])
+ Bug("cmp called with a->x[a->wds-1] == 0");
+ if (j > 1 && !b->x[j-1])
+ Bug("cmp called with b->x[b->wds-1] == 0");
+#endif
+ if (i -= j)
+ return i;
+ xa0 = a->x;
+ xa = xa0 + j;
+ xb0 = b->x;
+ xb = xb0 + j;
+ for(;;) {
+ if (*--xa != *--xb)
+ return *xa < *xb ? -1 : 1;
+ if (xa <= xa0)
+ break;
+ }
+ return 0;
+ }
+
+ Bigint *
+diff
+#ifdef KR_headers
+ (a, b) Bigint *a, *b;
+#else
+ (Bigint *a, Bigint *b)
+#endif
+{
+ Bigint *c;
+ int i, wa, wb;
+ ULong *xa, *xae, *xb, *xbe, *xc;
+#ifdef ULLong
+ ULLong borrow, y;
+#else
+ ULong borrow, y;
+#ifdef Pack_32
+ ULong z;
+#endif
+#endif
+
+ i = cmp(a,b);
+ if (!i) {
+ c = Balloc(0);
+ c->wds = 1;
+ c->x[0] = 0;
+ return c;
+ }
+ if (i < 0) {
+ c = a;
+ a = b;
+ b = c;
+ i = 1;
+ }
+ else
+ i = 0;
+ c = Balloc(a->k);
+ c->sign = i;
+ wa = a->wds;
+ xa = a->x;
+ xae = xa + wa;
+ wb = b->wds;
+ xb = b->x;
+ xbe = xb + wb;
+ xc = c->x;
+ borrow = 0;
+#ifdef ULLong
+ do {
+ y = (ULLong)*xa++ - *xb++ - borrow;
+ borrow = y >> 32 & 1UL;
+ *xc++ = y & 0xffffffffUL;
+ }
+ while(xb < xbe);
+ while(xa < xae) {
+ y = *xa++ - borrow;
+ borrow = y >> 32 & 1UL;
+ *xc++ = y & 0xffffffffUL;
+ }
+#else
+#ifdef Pack_32
+ do {
+ y = (*xa & 0xffff) - (*xb & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*xa++ >> 16) - (*xb++ >> 16) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(xc, z, y);
+ }
+ while(xb < xbe);
+ while(xa < xae) {
+ y = (*xa & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*xa++ >> 16) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(xc, z, y);
+ }
+#else
+ do {
+ y = *xa++ - *xb++ - borrow;
+ borrow = (y & 0x10000) >> 16;
+ *xc++ = y & 0xffff;
+ }
+ while(xb < xbe);
+ while(xa < xae) {
+ y = *xa++ - borrow;
+ borrow = (y & 0x10000) >> 16;
+ *xc++ = y & 0xffff;
+ }
+#endif
+#endif
+ while(!*--xc)
+ wa--;
+ c->wds = wa;
+ return c;
+ }
+
+ double
+b2d
+#ifdef KR_headers
+ (a, e) Bigint *a; int *e;
+#else
+ (Bigint *a, int *e)
+#endif
+{
+ ULong *xa, *xa0, w, y, z;
+ int k;
+ double d;
+#ifdef VAX
+ ULong d0, d1;
+#else
+#define d0 word0(d)
+#define d1 word1(d)
+#endif
+
+ xa0 = a->x;
+ xa = xa0 + a->wds;
+ y = *--xa;
+#ifdef DEBUG
+ if (!y) Bug("zero y in b2d");
+#endif
+ k = hi0bits(y);
+ *e = 32 - k;
+#ifdef Pack_32
+ if (k < Ebits) {
+ d0 = Exp_1 | y >> Ebits - k;
+ w = xa > xa0 ? *--xa : 0;
+ d1 = y << (32-Ebits) + k | w >> Ebits - k;
+ goto ret_d;
+ }
+ z = xa > xa0 ? *--xa : 0;
+ if (k -= Ebits) {
+ d0 = Exp_1 | y << k | z >> 32 - k;
+ y = xa > xa0 ? *--xa : 0;
+ d1 = z << k | y >> 32 - k;
+ }
+ else {
+ d0 = Exp_1 | y;
+ d1 = z;
+ }
+#else
+ if (k < Ebits + 16) {
+ z = xa > xa0 ? *--xa : 0;
+ d0 = Exp_1 | y << k - Ebits | z >> Ebits + 16 - k;
+ w = xa > xa0 ? *--xa : 0;
+ y = xa > xa0 ? *--xa : 0;
+ d1 = z << k + 16 - Ebits | w << k - Ebits | y >> 16 + Ebits - k;
+ goto ret_d;
+ }
+ z = xa > xa0 ? *--xa : 0;
+ w = xa > xa0 ? *--xa : 0;
+ k -= Ebits + 16;
+ d0 = Exp_1 | y << k + 16 | z << k | w >> 16 - k;
+ y = xa > xa0 ? *--xa : 0;
+ d1 = w << k + 16 | y << k;
+#endif
+ ret_d:
+#ifdef VAX
+ word0(d) = d0 >> 16 | d0 << 16;
+ word1(d) = d1 >> 16 | d1 << 16;
+#endif
+ return dval(d);
+ }
+#undef d0
+#undef d1
+
+ Bigint *
+d2b
+#ifdef KR_headers
+ (d, e, bits) double d; int *e, *bits;
+#else
+ (double d, int *e, int *bits)
+#endif
+{
+ Bigint *b;
+#ifndef Sudden_Underflow
+ int i;
+#endif
+ int de, k;
+ ULong *x, y, z;
+#ifdef VAX
+ ULong d0, d1;
+ d0 = word0(d) >> 16 | word0(d) << 16;
+ d1 = word1(d) >> 16 | word1(d) << 16;
+#else
+#define d0 word0(d)
+#define d1 word1(d)
+#endif
+
+#ifdef Pack_32
+ b = Balloc(1);
+#else
+ b = Balloc(2);
+#endif
+ x = b->x;
+
+ z = d0 & Frac_mask;
+ d0 &= 0x7fffffff; /* clear sign bit, which we ignore */
+#ifdef Sudden_Underflow
+ de = (int)(d0 >> Exp_shift);
+#ifndef IBM
+ z |= Exp_msk11;
+#endif
+#else
+ if ( (de = (int)(d0 >> Exp_shift)) !=0)
+ z |= Exp_msk1;
+#endif
+#ifdef Pack_32
+ if ( (y = d1) !=0) {
+ if ( (k = lo0bits(&y)) !=0) {
+ x[0] = y | z << 32 - k;
+ z >>= k;
+ }
+ else
+ x[0] = y;
+#ifndef Sudden_Underflow
+ i =
+#endif
+ b->wds = (x[1] = z) !=0 ? 2 : 1;
+ }
+ else {
+#ifdef DEBUG
+ if (!z)
+ Bug("Zero passed to d2b");
+#endif
+ k = lo0bits(&z);
+ x[0] = z;
+#ifndef Sudden_Underflow
+ i =
+#endif
+ b->wds = 1;
+ k += 32;
+ }
+#else
+ if ( (y = d1) !=0) {
+ if ( (k = lo0bits(&y)) !=0)
+ if (k >= 16) {
+ x[0] = y | z << 32 - k & 0xffff;
+ x[1] = z >> k - 16 & 0xffff;
+ x[2] = z >> k;
+ i = 2;
+ }
+ else {
+ x[0] = y & 0xffff;
+ x[1] = y >> 16 | z << 16 - k & 0xffff;
+ x[2] = z >> k & 0xffff;
+ x[3] = z >> k+16;
+ i = 3;
+ }
+ else {
+ x[0] = y & 0xffff;
+ x[1] = y >> 16;
+ x[2] = z & 0xffff;
+ x[3] = z >> 16;
+ i = 3;
+ }
+ }
+ else {
+#ifdef DEBUG
+ if (!z)
+ Bug("Zero passed to d2b");
+#endif
+ k = lo0bits(&z);
+ if (k >= 16) {
+ x[0] = z;
+ i = 0;
+ }
+ else {
+ x[0] = z & 0xffff;
+ x[1] = z >> 16;
+ i = 1;
+ }
+ k += 32;
+ }
+ while(!x[i])
+ --i;
+ b->wds = i + 1;
+#endif
+#ifndef Sudden_Underflow
+ if (de) {
+#endif
+#ifdef IBM
+ *e = (de - Bias - (P-1) << 2) + k;
+ *bits = 4*P + 8 - k - hi0bits(word0(d) & Frac_mask);
+#else
+ *e = de - Bias - (P-1) + k;
+ *bits = P - k;
+#endif
+#ifndef Sudden_Underflow
+ }
+ else {
+ *e = de - Bias - (P-1) + 1 + k;
+#ifdef Pack_32
+ *bits = 32*i - hi0bits(x[i-1]);
+#else
+ *bits = (i+2)*16 - hi0bits(x[i]);
+#endif
+ }
+#endif
+ return b;
+ }
+#undef d0
+#undef d1
+
+ CONST double
+#ifdef IEEE_Arith
+bigtens[] = { 1e16, 1e32, 1e64, 1e128, 1e256 };
+CONST double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128, 1e-256
+ };
+#else
+#ifdef IBM
+bigtens[] = { 1e16, 1e32, 1e64 };
+CONST double tinytens[] = { 1e-16, 1e-32, 1e-64 };
+#else
+bigtens[] = { 1e16, 1e32 };
+CONST double tinytens[] = { 1e-16, 1e-32 };
+#endif
+#endif
+
+ CONST double
+tens[] = {
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22
+#ifdef VAX
+ , 1e23, 1e24
+#endif
+ };
+
+ char *
+#ifdef KR_headers
+strcp_D2A(a, b) char *a; char *b;
+#else
+strcp_D2A(char *a, CONST char *b)
+#endif
+{
+ while(*a = *b++)
+ a++;
+ return a;
+ }
+
+#ifdef NO_STRING_H
+
+ Char *
+#ifdef KR_headers
+memcpy_D2A(a, b, len) Char *a; Char *b; size_t len;
+#else
+memcpy_D2A(void *a1, void *b1, size_t len)
+#endif
+{
+ register char *a = (char*)a1, *ae = a + len;
+ register char *b = (char*)b1, *a0 = a;
+ while(a < ae)
+ *a++ = *b++;
+ return a0;
+ }
+
+#endif /* NO_STRING_H */
#ifndef NO_IEEE_Scale
#define Avoid_Underflow
#undef tinytens
-/* The factor of 2^53 in tinytens[4] helps us avoid setting the underflow */
+/* The factor of 2^106 in tinytens[4] helps us avoid setting the underflow */
/* flag unnecessarily. It leads to a song and dance at the end of strtod. */
static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128,
- 9007199254740992.e-256
+ 9007199254740992.*9007199254740992.e-256
};
#endif
#endif
#ifdef Honor_FLT_ROUNDS
-#define Rounding rounding
#undef Check_FLT_ROUNDS
#define Check_FLT_ROUNDS
#else
#ifdef SET_INEXACT
int inexact, oldinexact;
#endif
-#ifdef Honor_FLT_ROUNDS
- int rounding = Flt_Rounds;
-#endif
#ifdef USE_LOCALE
- char *decimal_point;
- int decimal_point_len;
-#endif /* USE_LOCALE */
+ NORMALIZE_LOCALE(loc);
+#ifdef NO_LOCALE_CACHE
+ char *decimalpoint = localeconv_l(loc)->decimal_point;
+#else
+ char *decimalpoint;
+ static char *decimalpoint_cache;
+ if (!(s0 = decimalpoint_cache)) {
+ s0 = localeconv_l(loc)->decimal_point;
+ if ((decimalpoint_cache = (char*)malloc(strlen(s0) + 1))) {
+ strcpy(decimalpoint_cache, s0);
+ s0 = decimalpoint_cache;
+ }
+ }
+ decimalpoint = (char*)s0;
+#endif
+#endif
+#ifdef Honor_FLT_ROUNDS /*{*/
+ int Rounding;
+#ifdef Trust_FLT_ROUNDS /*{{ only define this if FLT_ROUNDS really works! */
+ Rounding = Flt_Rounds;
+#else /*}{*/
+ Rounding = 1;
+ switch(fegetround()) {
+ case FE_TOWARDZERO: Rounding = 0; break;
+ case FE_UPWARD: Rounding = 2; break;
+ case FE_DOWNWARD: Rounding = 3;
+ }
+#endif /*}}*/
+#endif /*}*/
sign = nz0 = nz = decpt = 0;
dval(rv) = 0.;
}
break2:
if (*s == '0') {
-#ifndef NO_HEX_FP
+#ifndef NO_HEX_FP /*{*/
{
static FPI fpi = { 53, 1-1023-53+1, 2046-1023-53+1, 1, SI };
Long exp;
case 'x':
case 'X':
{
-#if defined(FE_DOWNWARD) && defined(FE_TONEAREST) && defined(FE_TOWARDZERO) && defined(FE_UPWARD)
+#if defined(FE_DOWNWARD) && defined(FE_TONEAREST) && defined(FE_TOWARDZERO) && defined(FE_UPWARD) /*{{*/
FPI fpi1 = fpi;
+#ifdef Honor_FLT_ROUNDS /*{{*/
+ fpi1.rounding = Rounding;
+#else /*}{*/
switch(fegetround()) {
case FE_TOWARDZERO: fpi1.rounding = 0; break;
case FE_UPWARD: fpi1.rounding = 2; break;
case FE_DOWNWARD: fpi1.rounding = 3;
}
-#else
+#endif /*}}*/
+#else /*}{*/
#define fpi1 fpi
-#endif
+#endif /*}}*/
switch((i = gethex(&s, &fpi1, &exp, &bb, sign, loc)) & STRTOG_Retmask) {
case STRTOG_NoNumber:
s = s00;
goto ret;
}
}
-#endif
+#endif /*}*/
nz0 = 1;
while(*++s == '0') ;
if (!*s)
else if (nd < 16)
z = 10*z + c - '0';
nd0 = nd;
- NORMALIZE_LOCALE(loc);
#ifdef USE_LOCALE
- decimal_point = localeconv_l(loc)->decimal_point;
- decimal_point_len = strlen(decimal_point);
- if (strncmp(s, decimal_point, decimal_point_len) == 0)
-#else
- if (c == '.')
-#endif
- {
- decpt = 1;
-#ifdef USE_LOCALE
- s += decimal_point_len;
+ if (c == *decimalpoint) {
+ for(i = 1; decimalpoint[i]; ++i)
+ if (s[i] != decimalpoint[i])
+ goto dig_done;
+ s += i;
c = *s;
#else
+ if (c == '.') {
c = *++s;
#endif
+ decpt = 1;
if (!nd) {
for(; c == '0'; c = *++s)
nz++;
nz = 0;
}
}
- }
+ }/*}*/
dig_done:
e = 0;
if (c == 'e' || c == 'E') {
scale = 0;
#endif
#ifdef Honor_FLT_ROUNDS
- if (rounding >= 2) {
+ if (Rounding >= 2) {
if (sign)
- rounding = rounding == 2 ? 0 : 2;
+ Rounding = Rounding == 2 ? 0 : 2;
else
- if (rounding != 2)
- rounding = 0;
+ if (Rounding != 2)
+ Rounding = 0;
}
#endif
#endif /*IEEE_Arith*/
/* Can't trust HUGE_VAL */
#ifdef IEEE_Arith
#ifdef Honor_FLT_ROUNDS
- switch(rounding) {
+ switch(Rounding) {
case 0: /* toward 0 */
case 3: /* toward -infinity */
word0(rv) = Big0;
/* Put digits into bd: true value = bd * 10^e */
#ifdef USE_LOCALE
- bd0 = s2b(s0, nd0, nd, y, decimal_point_len);
+ bd0 = s2b(s0, nd0, nd, y, strlen(decimalpoint));
#else
bd0 = s2b(s0, nd0, nd, y, 1);
#endif
bd2 -= bbe;
bs2 = bb2;
#ifdef Honor_FLT_ROUNDS
- if (rounding != 1)
+ if (Rounding != 1)
bs2++;
#endif
#ifdef Avoid_Underflow
delta->sign = 0;
i = cmp(delta, bs);
#ifdef Honor_FLT_ROUNDS
- if (rounding != 1) {
+ if (Rounding != 1) {
if (i < 0) {
/* Error is less than an ulp */
if (!delta->x[0] && delta->wds <= 1) {
#endif
break;
}
- if (rounding) {
+ if (Rounding) {
if (dsign) {
adj = 1.;
goto apply_adj;
if (adj < 1.)
adj = 1.;
if (adj <= 0x7ffffffe) {
- /* adj = rounding ? ceil(adj) : floor(adj); */
+ /* adj = Rounding ? ceil(adj) : floor(adj); */
y = adj;
if (y != adj) {
- if (!((rounding>>1) ^ dsign))
+ if (!((Rounding>>1) ^ dsign))
y++;
adj = y;
}
#endif /*Sudden_Underflow*/
#endif /*Avoid_Underflow*/
adj *= ulp(dval(rv));
- if (dsign)
+ if (dsign) {
+ if (word0(rv) == Big0 && word1(rv) == Big1)
+ goto ovfl;
dval(rv) += adj;
+ }
else
dval(rv) -= adj;
goto cont;
}
#endif /*Avoid_Underflow*/
L = (word0(rv) & Exp_mask) - Exp_msk1;
-#endif /*Sudden_Underflow}*/
+#endif /*Sudden_Underflow}}*/
word0(rv) = L | Bndry_mask1;
word1(rv) = 0xffffffff;
#ifdef IBM
dval(rv) *= dval(rv0);
#ifndef NO_ERRNO
/* try to avoid the bug of testing an 8087 register value */
-#if __DARWIN_UNIX03
- if (word0(rv) == 0 && word1(rv) == 0 || dval(rv) < DBL_MIN)
-#else /* !__DARWIN_UNIX03 */
+#if defined(IEEE_Arith) && __DARWIN_UNIX03
+ if (!(word0(rv) & Exp_mask))
+#else
if (word0(rv) == 0 && word1(rv) == 0)
-#endif /* __DARWIN_UNIX03 */
+#endif
errno = ERANGE;
#endif
}
return b;
}
- int
+ void
#ifdef KR_headers
decrement(b) Bigint *b;
#else
*x++ = y & 0xffff;
} while(borrow && x < xe);
#endif
- return STRTOG_Inexlo;
}
__private_extern__ int
goto ret;
}
switch(rd) {
- case 1:
+ case 1: /* round down (toward -Infinity) */
goto trunc;
- case 2:
+ case 2: /* round up (toward +Infinity) */
break;
default: /* round near */
k = bdif - 1;
CONST char *s, *s0, *s1;
double adj, adj0, rv, tol;
Long L;
- ULong y, z;
+ ULong *b, *be, y, z;
Bigint *ab, *bb, *bb1, *bd, *bd0, *bs, *delta, *rvb, *rvb0;
#ifdef USE_LOCALE
- char *decimal_point;
- int decimal_point_len;
-#endif /* USE_LOCALE */
+ NORMALIZE_LOCALE(loc)
+#ifdef NO_LOCALE_CACHE
+ char *decimalpoint = localeconv_l(loc)->decimal_point;
+#else
+ char *decimalpoint;
+ static char *decimalpoint_cache;
+ if (!(s0 = decimalpoint_cache)) {
+ s0 = localeconv_l(loc)->decimal_point;
+ if ((decimalpoint_cache = (char*)malloc(strlen(s0) + 1))) {
+ strcpy(decimalpoint_cache, s0);
+ s0 = decimalpoint_cache;
+ }
+ }
+ decimalpoint = (char*)s0;
+#endif
+#endif
irv = STRTOG_Zero;
denorm = sign = nz0 = nz = 0;
else if (nd < 16)
z = 10*z + c - '0';
nd0 = nd;
- NORMALIZE_LOCALE(loc);
#ifdef USE_LOCALE
- decimal_point = localeconv_l(loc)->decimal_point;
- decimal_point_len = strlen(decimal_point);
- if (strncmp(s, decimal_point, decimal_point_len) == 0)
-#else
- if (c == '.')
-#endif
- {
- decpt = 1;
-#ifdef USE_LOCALE
- s += decimal_point_len;
+ if (c == *decimalpoint) {
+ for(i = 1; decimalpoint[i]; ++i)
+ if (s[i] != decimalpoint[i])
+ goto dig_done;
+ s += i;
c = *s;
#else
+ if (c == '.') {
c = *++s;
#endif
+ decpt = 1;
if (!nd) {
for(; c == '0'; c = *++s)
nz++;
nz = 0;
}
}
- }
+ }/*}*/
dig_done:
e = 0;
if (c == 'e' || c == 'E') {
rvb->x[0] = 0;
*exp = emin;
irv = STRTOG_Underflow | STRTOG_Inexlo;
-#ifndef NO_ERRNO
+/* When __DARWIN_UNIX03 is set, we don't need this (errno is set later) */
+#if !defined(NO_ERRNO) && !__DARWIN_UNIX03
errno = ERANGE;
#endif
goto ret;
/* Put digits into bd: true value = bd * 10^e */
#ifdef USE_LOCALE
- bd0 = s2b(s0, nd0, nd, y, decimal_point_len);
+ bd0 = s2b(s0, nd0, nd, y, strlen(decimalpoint));
#else
bd0 = s2b(s0, nd0, nd, y, 1);
#endif
break;
if (dsign) {
rvb = increment(rvb);
- if ( (j = rvbits & kmask) !=0)
- j = ULbits - j;
- if (hi0bits(rvb->x[rvb->wds - 1])
- != j)
+ j = kmask & (ULbits - (rvbits & kmask));
+ if (hi0bits(rvb->x[rvb->wds - 1]) != j)
rvbits++;
irv = STRTOG_Normal | STRTOG_Inexhi;
}
Bfree(bd0);
Bfree(delta);
if (rve > fpi->emax) {
+ switch(fpi->rounding & 3) {
+ case FPI_Round_near:
+ goto huge;
+ case FPI_Round_up:
+ if (!sign)
+ goto huge;
+ break;
+ case FPI_Round_down:
+ if (sign)
+ goto huge;
+ }
+ /* Round to largest representable magnitude */
+ Bfree(rvb);
+ rvb = 0;
+ irv = STRTOG_Normal | STRTOG_Inexlo;
+ *exp = fpi->emax;
+ b = bits;
+ be = b + ((fpi->nbits + 31) >> 5);
+ while(b < be)
+ *b++ = -1;
+ if ((j = fpi->nbits & 0x1f))
+ *--be >>= (32 - j);
+ goto ret;
huge:
rvb->wds = 0;
irv = STRTOG_Infinite | STRTOG_Overflow | STRTOG_Inexhi;
if (sudden_underflow) {
rvb->wds = 0;
irv = STRTOG_Underflow | STRTOG_Inexlo;
+#if !defined(NO_ERRNO) && __DARWIN_UNIX03
+ errno = ERANGE;
+#endif
}
else {
irv = (irv & ~STRTOG_Retmask) |
(rvb->wds > 0 ? STRTOG_Denormal : STRTOG_Zero);
- if (irv & STRTOG_Inexact)
+ if (irv & STRTOG_Inexact) {
irv |= STRTOG_Underflow;
+#if !defined(NO_ERRNO) && __DARWIN_UNIX03
+ errno = ERANGE;
+#endif
+ }
}
}
if (se)
copybits(bits, nbits, rvb);
Bfree(rvb);
}
-#if !defined(NO_ERRNO) && __DARWIN_UNIX03
- if (irv & STRTOG_Underflow)
- errno = ERANGE;
-#endif
return irv;
}
Long exp;
int k;
union { ULong L[1]; float f; } u;
- FPI *fpi = &fpi0, fpi1;
#ifdef Honor_FLT_ROUNDS
- int rounding = Flt_Rounds;
+#include "gdtoa_fltrnds.h"
+#else
+#define fpi &fpi0
#endif
NORMALIZE_LOCALE(loc);
-#ifdef Honor_FLT_ROUNDS
- if (rounding != fpi0.rounding) {
- fpi1 = fpi0; /* for thread safety */
- fpi1.rounding = rounding;
- fpi = &fpi1;
- }
-#endif /* Honor_FLT_ROUNDS */
k = strtodg(s, sp, fpi, &exp, bits, loc);
switch(k & STRTOG_Retmask) {
case STRTOG_NoNumber:
int
#ifdef KR_headers
-strtopdd(s, sp, dd) CONST char *s; char **sp; double *dd; locale_t loc;
+strtopdd(s, sp, dd, loc) CONST char *s; char **sp; double *dd; locale_t loc;
#else
strtopdd(CONST char *s, char **sp, double *dd, locale_t loc)
#endif
#endif /* __APPLE__ */
} U;
U *u;
- FPI *fpi = &fpi0, fpi1;
#ifdef Honor_FLT_ROUNDS
- int rounding = Flt_Rounds;
+#include "gdtoa_fltrnds.h"
+#else
+#define fpi &fpi0
#endif
-#ifdef Honor_FLT_ROUNDS
- if (rounding != fpi0.rounding) {
- fpi1 = fpi0; /* for thread safety */
- fpi1.rounding = rounding;
- fpi = &fpi1;
- }
-#endif /* Honor_FLT_ROUNDS */
rv = strtodg(s, sp, fpi, &exp, bits, loc);
u = (U*)dd;
switch(rv & STRTOG_Retmask) {
u->L[0] = u->L[2] = d_QNAN0;
u->L[1] = u->L[3] = d_QNAN1;
#endif /* __APPLE__ */
+ break;
#ifdef __APPLE__
case STRTOG_NaNbits:
u->L[0] = d_QNAN0 | ((bits[2] >> 20 | bits[3] << 12) & 0xfffff);
--- /dev/null
+/****************************************************************
+
+The author of this software is David M. Gay.
+
+Copyright (C) 1998, 2000 by Lucent Technologies
+All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appear in all
+copies and that both that the copyright notice and this
+permission notice and warranty disclaimer appear in supporting
+documentation, and that the name of Lucent or any of its entities
+not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+LUCENT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
+IN NO EVENT SHALL LUCENT OR ANY OF ITS ENTITIES BE LIABLE FOR ANY
+SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
+IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
+ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
+THIS SOFTWARE.
+
+****************************************************************/
+
+/* Please send bug reports to David M. Gay (dmg at acm dot org,
+ * with " at " changed at "@" and " dot " changed to "."). */
+
+#include "xlocale_private.h"
+
+#include "gdtoaimp.h"
+
+#undef _0
+#undef _1
+
+/* one or the other of IEEE_MC68k or IEEE_8087 should be #defined */
+
+#ifdef IEEE_MC68k
+#define _0 0
+#define _1 1
+#define _2 2
+#define _3 3
+#define _4 4
+#endif
+#ifdef IEEE_8087
+#define _0 4
+#define _1 3
+#define _2 2
+#define _3 1
+#define _4 0
+#endif
+
+ int
+#ifdef KR_headers
+strtopx(s, sp, V, loc) CONST char *s; char **sp; void *V; locale_t loc;
+#else
+strtopx(CONST char *s, char **sp, void *V, locale_t loc)
+#endif
+{
+ static FPI fpi0 = { 64, 1-16383-64+1, 32766 - 16383 - 64 + 1, 1, SI };
+ ULong bits[2];
+ Long exp;
+ int k;
+ UShort *L = (UShort*)V;
+#ifdef Honor_FLT_ROUNDS
+#include "gdtoa_fltrnds.h"
+#else
+#define fpi &fpi0
+#endif
+
+ k = strtodg(s, sp, fpi, &exp, bits, loc);
+ switch(k & STRTOG_Retmask) {
+ case STRTOG_NoNumber:
+ L[0] = L[1] = L[2] = L[3] = L[4] = 0;
+ return k; // avoid setting sign
+
+ case STRTOG_Zero:
+ L[0] = L[1] = L[2] = L[3] = L[4] = 0;
+ break;
+
+ case STRTOG_Denormal:
+ L[_0] = 0;
+ goto normal_bits;
+
+ case STRTOG_Normal:
+ case STRTOG_NaNbits:
+ L[_0] = exp + 0x3fff + 63;
+ normal_bits:
+ L[_4] = (UShort)bits[0];
+ L[_3] = (UShort)(bits[0] >> 16);
+ L[_2] = (UShort)bits[1];
+ L[_1] = (UShort)(bits[1] >> 16);
+ break;
+
+ case STRTOG_Infinite:
+ L[_0] = 0x7fff;
+ L[_1] = 0x8000; /* 4306392: to match gcc */
+ L[_2] = L[_3] = L[_4] = 0;
+ break;
+
+ case STRTOG_NaN:
+ L[0] = ldus_QNAN0;
+ L[1] = ldus_QNAN1;
+ L[2] = ldus_QNAN2;
+ L[3] = ldus_QNAN3;
+ L[4] = ldus_QNAN4;
+ }
+ if (k & STRTOG_Neg)
+ L[_0] |= 0x8000;
+ return k;
+ }
+++ /dev/null
-./gdtoa.h
\ No newline at end of file
--- /dev/null
+/****************************************************************
+
+The author of this software is David M. Gay.
+
+Copyright (C) 1998 by Lucent Technologies
+All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appear in all
+copies and that both that the copyright notice and this
+permission notice and warranty disclaimer appear in supporting
+documentation, and that the name of Lucent or any of its entities
+not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+LUCENT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
+IN NO EVENT SHALL LUCENT OR ANY OF ITS ENTITIES BE LIABLE FOR ANY
+SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
+IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
+ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
+THIS SOFTWARE.
+
+****************************************************************/
+
+/* Please send bug reports to David M. Gay (dmg at acm dot org,
+ * with " at " changed at "@" and " dot " changed to "."). */
+
+#ifndef GDTOA_H_INCLUDED
+#define GDTOA_H_INCLUDED
+
+#include "arith.h"
+#include <stddef.h> /* for size_t */
+
+#ifndef Long
+#define Long long
+#endif
+#ifndef ULong
+typedef unsigned Long ULong;
+#endif
+#ifndef UShort
+typedef unsigned short UShort;
+#endif
+
+#ifndef ANSI
+#ifdef KR_headers
+#define ANSI(x) ()
+#define Void /*nothing*/
+#else
+#define ANSI(x) x
+#define Void void
+#endif
+#endif /* ANSI */
+
+#ifndef CONST
+#ifdef KR_headers
+#define CONST /* blank */
+#else
+#define CONST const
+#endif
+#endif /* CONST */
+
+ enum { /* return values from strtodg */
+ STRTOG_Zero = 0,
+ STRTOG_Normal = 1,
+ STRTOG_Denormal = 2,
+ STRTOG_Infinite = 3,
+ STRTOG_NaN = 4,
+ STRTOG_NaNbits = 5,
+ STRTOG_NoNumber = 6,
+ STRTOG_Retmask = 7,
+
+ /* The following may be or-ed into one of the above values. */
+
+ STRTOG_Neg = 0x08, /* does not affect STRTOG_Inexlo or STRTOG_Inexhi */
+ STRTOG_Inexlo = 0x10, /* returned result rounded toward zero */
+ STRTOG_Inexhi = 0x20, /* returned result rounded away from zero */
+ STRTOG_Inexact = 0x30,
+ STRTOG_Underflow= 0x40,
+ STRTOG_Overflow = 0x80
+ };
+
+ typedef struct
+FPI {
+ int nbits;
+ int emin;
+ int emax;
+ int rounding;
+ int sudden_underflow;
+ } FPI;
+
+enum { /* FPI.rounding values: same as FLT_ROUNDS */
+ FPI_Round_zero = 0,
+ FPI_Round_near = 1,
+ FPI_Round_up = 2,
+ FPI_Round_down = 3
+ };
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern char* dtoa ANSI((double d, int mode, int ndigits, int *decpt,
+ int *sign, char **rve));
+extern char* gdtoa ANSI((FPI *fpi, int be, ULong *bits, int *kindp,
+ int mode, int ndigits, int *decpt, char **rve));
+extern void freedtoa ANSI((char*));
+extern float strtof ANSI((CONST char *, char **));
+extern double strtod ANSI((CONST char *, char **));
+extern int strtodg ANSI((CONST char*, char**, FPI*, Long*, ULong*, locale_t)) __DARWIN_ALIAS(strtodg);
+
+extern char* g_ddfmt ANSI((char*, double*, int, size_t));
+extern char* g_dfmt ANSI((char*, double*, int, size_t));
+extern char* g_ffmt ANSI((char*, float*, int, size_t));
+extern char* g_Qfmt ANSI((char*, void*, int, size_t));
+extern char* g_xfmt ANSI((char*, void*, int, size_t));
+extern char* g_xLfmt ANSI((char*, void*, int, size_t));
+
+extern int strtoId ANSI((CONST char*, char**, double*, double*));
+extern int strtoIdd ANSI((CONST char*, char**, double*, double*));
+extern int strtoIf ANSI((CONST char*, char**, float*, float*));
+extern int strtoIQ ANSI((CONST char*, char**, void*, void*));
+extern int strtoIx ANSI((CONST char*, char**, void*, void*));
+extern int strtoIxL ANSI((CONST char*, char**, void*, void*));
+extern int strtord ANSI((CONST char*, char**, int, double*));
+extern int strtordd ANSI((CONST char*, char**, int, double*));
+extern int strtorf ANSI((CONST char*, char**, int, float*));
+extern int strtorQ ANSI((CONST char*, char**, int, void*));
+extern int strtorx ANSI((CONST char*, char**, int, void*));
+extern int strtorxL ANSI((CONST char*, char**, int, void*));
+#if 1
+extern int strtodI ANSI((CONST char*, char**, double*));
+extern int strtopd ANSI((CONST char*, char**, double*));
+extern int strtopdd ANSI((CONST char*, char**, double*, locale_t));
+extern int strtopf ANSI((CONST char*, char**, float*));
+extern int strtopQ ANSI((CONST char*, char**, void*));
+extern int strtopx ANSI((CONST char*, char**, void*, locale_t));
+extern int strtopxL ANSI((CONST char*, char**, void*));
+#else
+#define strtopd(s,se,x) strtord(s,se,1,x)
+#define strtopdd(s,se,x) strtordd(s,se,1,x)
+#define strtopf(s,se,x) strtorf(s,se,1,x)
+#define strtopQ(s,se,x) strtorQ(s,se,1,x)
+#define strtopx(s,se,x) strtorx(s,se,1,x)
+#define strtopxL(s,se,x) strtorxL(s,se,1,x)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* GDTOA_H_INCLUDED */
--- /dev/null
+./gdtoa_fltrnds.h
\ No newline at end of file
+++ /dev/null
-/****************************************************************
-
-The author of this software is David M. Gay.
-
-Copyright (C) 1998, 2000 by Lucent Technologies
-All Rights Reserved
-
-Permission to use, copy, modify, and distribute this software and
-its documentation for any purpose and without fee is hereby
-granted, provided that the above copyright notice appear in all
-copies and that both that the copyright notice and this
-permission notice and warranty disclaimer appear in supporting
-documentation, and that the name of Lucent or any of its entities
-not be used in advertising or publicity pertaining to
-distribution of the software without specific, written prior
-permission.
-
-LUCENT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
-IN NO EVENT SHALL LUCENT OR ANY OF ITS ENTITIES BE LIABLE FOR ANY
-SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
-IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
-ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
-THIS SOFTWARE.
-
-****************************************************************/
-
-/* Please send bug reports to David M. Gay (dmg at acm dot org,
- * with " at " changed at "@" and " dot " changed to "."). */
-
-#include "xlocale_private.h"
-
-#include "gdtoaimp.h"
-
-#undef _0
-#undef _1
-
-/* one or the other of IEEE_MC68k or IEEE_8087 should be #defined */
-
-#ifdef IEEE_MC68k
-#define _0 0
-#define _1 1
-#define _2 2
-#define _3 3
-#define _4 4
-#endif
-#ifdef IEEE_8087
-#define _0 4
-#define _1 3
-#define _2 2
-#define _3 1
-#define _4 0
-#endif
-
- int
-#ifdef KR_headers
-strtopx(s, sp, V, loc) CONST char *s; char **sp; void *V; locale_t loc;
-#else
-strtopx(CONST char *s, char **sp, void *V, locale_t loc)
-#endif
-{
- static FPI fpi0 = { 64, 1-16383-64+1, 32766 - 16383 - 64 + 1, 1, SI };
- ULong bits[2];
- Long exp;
- int k;
- UShort *L = (UShort*)V;
- FPI *fpi = &fpi0, fpi1;
-#ifdef Honor_FLT_ROUNDS
- int rounding = Flt_Rounds;
-#endif
-
-#ifdef Honor_FLT_ROUNDS
- if (rounding != fpi0.rounding) {
- fpi1 = fpi0; /* for thread safety */
- fpi1.rounding = rounding;
- fpi = &fpi1;
- }
-#endif /* Honor_FLT_ROUNDS */
- k = strtodg(s, sp, fpi, &exp, bits, loc);
- switch(k & STRTOG_Retmask) {
- case STRTOG_NoNumber:
- L[0] = L[1] = L[2] = L[3] = L[4] = 0;
- return k; // avoid setting sign
-
- case STRTOG_Zero:
- L[0] = L[1] = L[2] = L[3] = L[4] = 0;
- break;
-
- case STRTOG_Denormal:
- L[_0] = 0;
- goto normal_bits;
-
- case STRTOG_Normal:
- case STRTOG_NaNbits:
- L[_0] = exp + 0x3fff + 63;
- normal_bits:
- L[_4] = (UShort)bits[0];
- L[_3] = (UShort)(bits[0] >> 16);
- L[_2] = (UShort)bits[1];
- L[_1] = (UShort)(bits[1] >> 16);
- break;
-
- case STRTOG_Infinite:
- L[_0] = 0x7fff;
- L[_1] = 0x8000; /* 4306392: to match gcc */
- L[_2] = L[_3] = L[_4] = 0;
- break;
-
- case STRTOG_NaN:
- L[0] = ldus_QNAN0;
- L[1] = ldus_QNAN1;
- L[2] = ldus_QNAN2;
- L[3] = ldus_QNAN3;
- L[4] = ldus_QNAN4;
- }
- if (k & STRTOG_Neg)
- L[_0] |= 0x8000;
- return k;
- }
* Infinity and NaN (case insensitively).
* When INFNAN_CHECK is #defined and No_Hex_NaN is not #defined,
* strtodg also accepts (case insensitively) strings of the form
- * NaN(x), where x is a string of hexadecimal digits and spaces;
- * if there is only one string of hexadecimal digits, it is taken
- * for the fraction bits of the resulting NaN; if there are two or
- * more strings of hexadecimal digits, each string is assigned
- * to the next available sequence of 32-bit words of fractions
- * bits (starting with the most significant), right-aligned in
- * each sequence.
+ * NaN(x), where x is a string of hexadecimal digits (optionally
+ * preceded by 0x or 0X) and spaces; if there is only one string
+ * of hexadecimal digits, it is taken for the fraction bits of the
+ * resulting NaN; if there are two or more strings of hexadecimal
+ * digits, each string is assigned to the next available sequence
+ * of 32-bit words of fractions bits (starting with the most
+ * significant), right-aligned in each sequence.
+ * Unless GDTOA_NON_PEDANTIC_NANCHECK is #defined, input "NaN(...)"
+ * is consumed even when ... has the wrong form (in which case the
+ * "(...)" is consumed but ignored).
* #define MULTIPLE_THREADS if the system offers preemptively scheduled
* multiple threads. In this case, you must provide (or suitably
* #define) two locks, acquired by ACQUIRE_DTOA_LOCK(n) and freed
* dtoa. You may do so whether or not MULTIPLE_THREADS is #defined.
* #define IMPRECISE_INEXACT if you do not care about the setting of
* the STRTOG_Inexact bits in the special case of doing IEEE double
- * precision conversions (which could also be done by the strtog in
+ * precision conversions (which could also be done by the strtod in
* dtoa.c).
* #define NO_HEX_FP to disable recognition of C9x's hexadecimal
* floating-point constants.
#ifndef GDTOAIMP_H_INCLUDED
#define GDTOAIMP_H_INCLUDED
+/*
+ * Paranoia: Protect exported symbols, including ones in files we don't
+ * compile right now. The standard strtof and strtod survive.
+ */
+#define dtoa __dtoa
+#define gdtoa __gdtoa
+#define freedtoa __freedtoa
+#define strtodg __strtodg
+#define g_ddfmt __g_ddfmt
+#define g_dfmt __g_dfmt
+#define g_ffmt __g_ffmt
+#define g_Qfmt __g_Qfmt
+#define g_xfmt __g_xfmt
+#define g_xLfmt __g_xLfmt
+#define strtoId __strtoId
+#define strtoIdd __strtoIdd
+#define strtoIf __strtoIf
+#define strtoIQ __strtoIQ
+#define strtoIx __strtoIx
+#define strtoIxL __strtoIxL
+#define strtord __strtord
+#define strtordd __strtordd
+#define strtorf __strtorf
+#define strtorQ __strtorQ
+#define strtorx __strtorx
+#define strtorxL __strtorxL
+#define strtodI __strtodI
+#define strtopd __strtopd
+#define strtopdd __strtopdd
+#define strtopf __strtopf
+#define strtopQ __strtopQ
+#define strtopx __strtopx
+#define strtopxL __strtopxL
+
+/* Protect gdtoa-internal symbols */
+#define Balloc __Balloc_D2A
+#define Bfree __Bfree_D2A
+#define ULtoQ __ULtoQ_D2A
+#define ULtof __ULtof_D2A
+#define ULtod __ULtod_D2A
+#define ULtodd __ULtodd_D2A
+#define ULtox __ULtox_D2A
+#define ULtoxL __ULtoxL_D2A
+#define any_on __any_on_D2A
+#define b2d __b2d_D2A
+#define bigtens __bigtens_D2A
+#define cmp __cmp_D2A
+#define copybits __copybits_D2A
+#define d2b __d2b_D2A
+#define decrement __decrement_D2A
+#define diff __diff_D2A
+#define dtoa_result __dtoa_result_D2A
+#define g__fmt __g__fmt_D2A
+#define gethex __gethex_D2A
+#define hexdig __hexdig_D2A
+#define hexdig_init_D2A __hexdig_init_D2A
+#define hexnan __hexnan_D2A
+#define hi0bits __hi0bits_D2A
+#define hi0bits_D2A __hi0bits_D2A
+#define i2b __i2b_D2A
+#define increment __increment_D2A
+#define lo0bits __lo0bits_D2A
+#define lshift __lshift_D2A
+#define match __match_D2A
+#define mult __mult_D2A
+#define multadd __multadd_D2A
+#define nrv_alloc __nrv_alloc_D2A
+#define pow5mult __pow5mult_D2A
+#define quorem __quorem_D2A
+#define ratio __ratio_D2A
+#define rshift __rshift_D2A
+#define rv_alloc __rv_alloc_D2A
+#define s2b __s2b_D2A
+#define set_ones __set_ones_D2A
+#define strcp __strcp_D2A
+#define strcp_D2A __strcp_D2A
+#define strtoIg __strtoIg_D2A
+#define sum __sum_D2A
+#define tens __tens_D2A
+#define tinytens __tinytens_D2A
+#define tinytens __tinytens_D2A
+#define trailz __trailz_D2A
+#define ulp __ulp_D2A
+
#include <xlocale.h>
#include "gdtoa.h"
#include "gd_qnan.h"
+#ifdef Honor_FLT_ROUNDS
+#include <fenv.h>
+#endif
#ifdef DEBUG
#include "stdio.h"
#define INFNAN_CHECK
#define USE_LOCALE
+#define NO_LOCALE_CACHE
#undef IEEE_Arith
#undef Avoid_Underflow
#define MULTIPLE_THREADS
extern spinlock_t __gdtoa_locks[2];
-#define ACQUIRE_DTOA_LOCK(n) do { \
- if (__isthreaded) \
- _SPINLOCK(&__gdtoa_locks[n]); \
+#define ACQUIRE_DTOA_LOCK(n) do { \
+ if (__isthreaded) _SPINLOCK(&__gdtoa_locks[n]); \
} while(0)
-#define FREE_DTOA_LOCK(n) do { \
- if (__isthreaded) \
- _SPINUNLOCK(&__gdtoa_locks[n]); \
+#define FREE_DTOA_LOCK(n) do { \
+ if (__isthreaded) _SPINUNLOCK(&__gdtoa_locks[n]); \
} while(0)
#define Kmax 15
#define Bcopy(x,y) memcpy(&x->sign,&y->sign,y->wds*sizeof(ULong) + 2*sizeof(int))
#endif /* NO_STRING_H */
-/*
- * Paranoia: Protect exported symbols, including ones in files we don't
- * compile right now. The standard strtof and strtod survive.
- */
-#define dtoa __dtoa
-#define gdtoa __gdtoa
-#define freedtoa __freedtoa
-#define strtodg __strtodg
-#define g_ddfmt __g_ddfmt
-#define g_dfmt __g_dfmt
-#define g_ffmt __g_ffmt
-#define g_Qfmt __g_Qfmt
-#define g_xfmt __g_xfmt
-#define g_xLfmt __g_xLfmt
-#define strtoId __strtoId
-#define strtoIdd __strtoIdd
-#define strtoIf __strtoIf
-#define strtoIQ __strtoIQ
-#define strtoIx __strtoIx
-#define strtoIxL __strtoIxL
-#define strtord __strtord
-#define strtordd __strtordd
-#define strtorf __strtorf
-#define strtorQ __strtorQ
-#define strtorx __strtorx
-#define strtorxL __strtorxL
-#define strtodI __strtodI
-#define strtopd __strtopd
-#define strtopdd __strtopdd
-#define strtopf __strtopf
-#define strtopQ __strtopQ
-#define strtopx __strtopx
-#define strtopxL __strtopxL
-
-/* Protect gdtoa-internal symbols */
-#define Balloc __Balloc_D2A
-#define Bfree __Bfree_D2A
-#define ULtoQ __ULtoQ_D2A
-#define ULtof __ULtof_D2A
-#define ULtod __ULtod_D2A
-#define ULtodd __ULtodd_D2A
-#define ULtox __ULtox_D2A
-#define ULtoxL __ULtoxL_D2A
-#define any_on __any_on_D2A
-#define b2d __b2d_D2A
-#define bigtens __bigtens_D2A
-#define cmp __cmp_D2A
-#define copybits __copybits_D2A
-#define d2b __d2b_D2A
-#define decrement __decrement_D2A
-#define diff __diff_D2A
-#define dtoa_result __dtoa_result_D2A
-#define g__fmt __g__fmt_D2A
-#define gethex __gethex_D2A
-#define hexdig __hexdig_D2A
-#define hexdig_init_D2A __hexdig_init_D2A
-#define hexnan __hexnan_D2A
-#define hi0bits __hi0bits_D2A
-#define hi0bits_D2A __hi0bits_D2A
-#define i2b __i2b_D2A
-#define increment __increment_D2A
-#define lo0bits __lo0bits_D2A
-#define lshift __lshift_D2A
-#define match __match_D2A
-#define mult __mult_D2A
-#define multadd __multadd_D2A
-#define nrv_alloc __nrv_alloc_D2A
-#define pow5mult __pow5mult_D2A
-#define quorem __quorem_D2A
-#define ratio __ratio_D2A
-#define rshift __rshift_D2A
-#define rv_alloc __rv_alloc_D2A
-#define s2b __s2b_D2A
-#define set_ones __set_ones_D2A
-#define strcp __strcp_D2A
-#define strcp_D2A __strcp_D2A
-#define strtoIg __strtoIg_D2A
-#define sum __sum_D2A
-#define tens __tens_D2A
-#define tinytens __tinytens_D2A
-#define tinytens __tinytens_D2A
-#define trailz __trailz_D2A
-#define ulp __ulp_D2A
-
extern char *dtoa_result;
extern CONST double bigtens[], tens[], tinytens[];
extern unsigned char hexdig[];
extern int cmp ANSI((Bigint*, Bigint*));
extern void copybits ANSI((ULong*, int, Bigint*));
extern Bigint *d2b ANSI((double, int*, int*));
- extern int decrement ANSI((Bigint*));
+ extern void decrement ANSI((Bigint*));
extern Bigint *diff ANSI((Bigint*, Bigint*));
extern char *dtoa ANSI((double d, int mode, int ndigits,
int *decpt, int *sign, char **rve));
- extern void freedtoa ANSI((char*));
- extern char *gdtoa ANSI((FPI *fpi, int be, ULong *bits, int *kindp,
- int mode, int ndigits, int *decpt, char **rve));
- extern char *g__fmt ANSI((char*, char*, char*, int, ULong));
+ extern char *g__fmt ANSI((char*, char*, char*, int, ULong, size_t));
extern int gethex ANSI((CONST char**, FPI*, Long*, Bigint**, int, locale_t));
extern void hexdig_init_D2A(Void);
extern int hexnan ANSI((CONST char**, FPI*, ULong*));
extern Bigint *s2b ANSI((CONST char*, int, int, ULong, int));
extern Bigint *set_ones ANSI((Bigint*, int));
extern char *strcp ANSI((char*, const char*));
- extern int strtodg ANSI((CONST char*, char**, FPI*, Long*, ULong*, locale_t)) __DARWIN_ALIAS(strtodg);
-
- extern int strtoId ANSI((CONST char *, char **, double *, double *));
- extern int strtoIdd ANSI((CONST char *, char **, double *, double *));
- extern int strtoIf ANSI((CONST char *, char **, float *, float *));
extern int strtoIg ANSI((CONST char*, char**, FPI*, Long*, Bigint**, int*));
- extern int strtoIQ ANSI((CONST char *, char **, void *, void *));
- extern int strtoIx ANSI((CONST char *, char **, void *, void *));
- extern int strtoIxL ANSI((CONST char *, char **, void *, void *));
extern double strtod ANSI((const char *s00, char **se));
extern double strtod_l ANSI((const char *s00, char **se, locale_t));
- extern int strtopQ ANSI((CONST char *, char **, Void *));
- extern int strtopf ANSI((CONST char *, char **, float *));
- extern int strtopd ANSI((CONST char *, char **, double *));
- extern int strtopdd ANSI((CONST char *, char **, double *, locale_t));
- extern int strtopx ANSI((CONST char *, char **, Void *, locale_t));
- extern int strtopxL ANSI((CONST char *, char **, Void *));
- extern int strtord ANSI((CONST char *, char **, int, double *));
- extern int strtordd ANSI((CONST char *, char **, int, double *));
- extern int strtorf ANSI((CONST char *, char **, int, float *));
- extern int strtorQ ANSI((CONST char *, char **, int, void *));
- extern int strtorx ANSI((CONST char *, char **, int, void *));
- extern int strtorxL ANSI((CONST char *, char **, int, void *));
extern Bigint *sum ANSI((Bigint*, Bigint*));
extern int trailz ANSI((Bigint*));
extern double ulp ANSI((double));
---- assert.c.orig 2004-09-20 17:32:51.000000000 -0700
-+++ assert.c 2004-11-17 15:56:24.000000000 -0800
-@@ -42,7 +42,7 @@
+--- assert.c.orig 2008-09-06 16:27:37.000000000 -0700
++++ assert.c 2008-09-07 01:35:02.000000000 -0700
+@@ -41,20 +41,39 @@ __FBSDID("$FreeBSD: src/lib/libc/gen/ass
+ #include <stdio.h>
#include <stdlib.h>
++extern const char *__crashreporter_info__;
++static const char badasprintf[] =
++ "Assertion failed and asprintf also failed to create full error string";
++
void
-__assert(func, file, line, failedexpr)
+__assert_rtn(func, file, line, failedexpr)
const char *func, *file;
int line;
const char *failedexpr;
+ {
+- if (func == NULL)
++ char *str = NULL;
++
++ if (func == NULL) {
+ (void)fprintf(stderr,
+ "Assertion failed: (%s), file %s, line %d.\n", failedexpr,
+ file, line);
+- else
++ if (!__crashreporter_info__) {
++ asprintf(&str,
++ "Assertion failed: (%s), file %s, line %d.\n",
++ failedexpr, file, line);
++ __crashreporter_info__ = str ? str : badasprintf;
++ }
++ } else {
+ (void)fprintf(stderr,
+ "Assertion failed: (%s), function %s, file %s, line %d.\n",
+ failedexpr, func, file, line);
++ if (!__crashreporter_info__) {
++ asprintf(&str,
++ "Assertion failed: (%s), function %s, file %s, line %d.\n",
++ failedexpr, func, file, line);
++ __crashreporter_info__ = str ? str : badasprintf;
++ }
++ }
+ abort();
+ /* NOTREACHED */
+ }
--- /dev/null
+--- err.3.orig 2009-05-12 11:21:55.000000000 -0700
++++ err.3 2009-05-20 16:48:17.000000000 -0700
+@@ -32,7 +32,7 @@
+ .\" From: @(#)err.3 8.1 (Berkeley) 6/9/93
+ .\" $FreeBSD: src/lib/libc/gen/err.3,v 1.20 2004/10/04 14:04:37 jkoshy Exp $
+ .\"
+-.Dd March 6, 1999
++.Dd May 20, 2008
+ .Dt ERR 3
+ .Os
+ .Sh NAME
+@@ -49,6 +49,9 @@
+ .Nm warnx ,
+ .Nm vwarnx ,
+ .Nm err_set_exit ,
++#ifdef UNIFDEF_BLOCKS
++.Nm err_set_exit_b ,
++#endif
+ .Nm err_set_file
+ .Nd formatted error messages
+ .Sh LIBRARY
+@@ -59,6 +62,10 @@
+ .Fn err "int eval" "const char *fmt" "..."
+ .Ft void
+ .Fn err_set_exit "void (*exitf)(int)"
++#ifdef UNIFDEF_BLOCKS
++.Ft void
++.Fn err_set_exit_b "void (^exitb)(int)"
++#endif
+ .Ft void
+ .Fn err_set_file "void *vfp"
+ .Ft void
+@@ -169,6 +176,24 @@
+ to perform any necessary cleanup; passing a null function pointer for
+ .Va exitf
+ resets the hook to do nothing.
++#ifdef UNIFDEF_BLOCKS
++The
++.Fn err_set_exit_b
++function is like
++.Fn err_set_exit
++except it takes a block pointer instead of a function pointer.
++.Bd -ragged -offset indent
++Note: The
++.Fn Block_copy
++function (defined in
++.In Blocks.h )
++is used by
++.Fn err_set_exit_b
++to make a copy of the block, especially for the case when a stack-based
++block might go out of scope when the subroutine returns.
++.Ed
++.Pp
++#endif
+ The
+ .Fn err_set_file
+ function sets the output stream used by the other functions.
+@@ -234,3 +259,8 @@
+ .Fn warnc
+ functions first appeared in
+ .Fx 3.0 .
++#ifdef UNIFDEF_BLOCKS
++The
++.Fn err_set_exit_b
++function first appeared in Mac OS X 10.6.
++#endif
---- err.c.orig 2006-12-15 11:18:17.000000000 -0800
-+++ err.c 2006-12-15 11:46:52.000000000 -0800
-@@ -44,12 +44,85 @@
+--- err.c.orig 2009-05-12 11:21:55.000000000 -0700
++++ err.c 2009-05-23 13:27:52.000000000 -0700
+@@ -44,12 +44,105 @@ __FBSDID("$FreeBSD: src/lib/libc/gen/err
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <vis.h>
#include "un-namespace.h"
++#ifdef __BLOCKS__
++#include <Block.h>
++#endif /* __BLOCKS__ */
#include "libc_private.h"
-static FILE *err_file; /* file to use for error output */
-static void (*err_exit)(int);
++#define ERR_EXIT_UNDEF 0
++#ifdef __BLOCKS__
++#define ERR_EXIT_BLOCK 1
++#endif /* __BLOCKS__ */
++#define ERR_EXIT_FUNC 2
++struct _e_err_exit {
++ unsigned int type;
++#ifdef __BLOCKS__
++ union {
++#endif /* __BLOCKS__ */
++ void (*func)(int);
++#ifdef __BLOCKS__
++ void (^block)(int);
++ };
++#endif /* __BLOCKS__ */
++};
++
+#ifdef BUILDING_VARIANT
+
+__private_extern__ FILE *_e_err_file; /* file to use for error output */
-+__private_extern__ void (*_e_err_exit)(int);
++__private_extern__ struct _e_err_exit _e_err_exit;
+__private_extern__ void _e_visprintf(FILE * __restrict, const char * __restrict, va_list);
+
+#else /* !BUILDING_VARIANT */
+
+__private_extern__ FILE *_e_err_file = NULL; /* file to use for error output */
-+__private_extern__ void (*_e_err_exit)(int) = NULL;
++__private_extern__ struct _e_err_exit _e_err_exit = {ERR_EXIT_UNDEF};
+
+/*
+ * zero means pass as is
/*
* This is declared to take a `void *' so that the caller is not required
-@@ -60,16 +133,17 @@
+@@ -60,16 +153,27 @@ void
err_set_file(void *fp)
{
if (fp)
err_set_exit(void (*ef)(int))
{
- err_exit = ef;
-+ _e_err_exit = ef;
++ _e_err_exit.type = ERR_EXIT_FUNC;
++ _e_err_exit.func = ef;
++}
++
++#ifdef __BLOCKS__
++void
++err_set_exit_b(void (^ef)(int))
++{
++ _e_err_exit.type = ERR_EXIT_BLOCK;
++ _e_err_exit.block = Block_copy(ef);
}
++#endif /* __BLOCKS__ */
+#endif /* !BUILDING_VARIANT */
__weak_reference(_err, err);
-@@ -107,16 +181,16 @@
+@@ -107,16 +211,21 @@ verrc(eval, code, fmt, ap)
const char *fmt;
va_list ap;
{
- if (err_exit)
- err_exit(eval);
+ fprintf(_e_err_file, "%s\n", strerror(code));
-+ if (_e_err_exit)
-+ _e_err_exit(eval);
++ if (_e_err_exit.type)
++#ifdef __BLOCKS__
++ if (_e_err_exit.type == ERR_EXIT_BLOCK)
++ _e_err_exit.block(eval);
++ else
++#endif /* __BLOCKS__ */
++ _e_err_exit.func(eval);
exit(eval);
}
-@@ -135,14 +209,14 @@
+@@ -135,14 +244,19 @@ verrx(eval, fmt, ap)
const char *fmt;
va_list ap;
{
- err_exit(eval);
+ _e_visprintf(_e_err_file, fmt, ap);
+ fprintf(_e_err_file, "\n");
-+ if (_e_err_exit)
-+ _e_err_exit(eval);
++ if (_e_err_exit.type)
++#ifdef __BLOCKS__
++ if (_e_err_exit.type == ERR_EXIT_BLOCK)
++ _e_err_exit.block(eval);
++ else
++#endif /* __BLOCKS__ */
++ _e_err_exit.func(eval);
exit(eval);
}
-@@ -180,14 +254,14 @@
+@@ -180,14 +294,14 @@ vwarnc(code, fmt, ap)
const char *fmt;
va_list ap;
{
}
void
-@@ -204,10 +278,10 @@
+@@ -204,10 +318,10 @@ vwarnx(fmt, ap)
const char *fmt;
va_list ap;
{
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
static char sccsid[] = "@(#)getbsize.c 8.1 (Berkeley) 6/4/93";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/gen/getbsize.c,v 1.7 2002/12/30 19:04:06 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/gen/getbsize.c,v 1.9 2008/08/04 06:53:13 cperciva Exp $");
#include <err.h>
#include <stdio.h>
default:
fmterr: warnx("%s: unknown blocksize", p);
n = 512;
+ max = MAXB;
mul = 1;
break;
}
---- gethostname.c.orig 2004-11-25 11:38:01.000000000 -0800
-+++ gethostname.c 2005-09-15 09:46:13.000000000 -0700
-@@ -39,6 +39,7 @@ __FBSDID("$FreeBSD: src/lib/libc/gen/get
+--- gethostname.c.orig 2008-04-05 00:47:41.000000000 -0700
++++ gethostname.c 2008-04-05 01:04:59.000000000 -0700
+@@ -37,8 +37,10 @@ static char sccsid[] = "@(#)gethostname.
+ #include <sys/cdefs.h>
+ __FBSDID("$FreeBSD: src/lib/libc/gen/gethostname.c,v 1.5 2003/08/19 23:01:46 wollman Exp $");
++#include <string.h>
#include <sys/param.h>
#include <sys/sysctl.h>
+#include <limits.h>
#include <errno.h>
-@@ -54,10 +55,22 @@ gethostname(name, namelen)
+@@ -54,10 +56,22 @@ gethostname(name, namelen)
mib[0] = CTL_KERN;
mib[1] = KERN_HOSTNAME;
---- getmntinfo.3.orig 2008-04-28 16:25:33.000000000 -0700
-+++ getmntinfo.3 2008-04-30 04:01:45.000000000 -0700
-@@ -46,14 +46,26 @@
+--- getmntinfo.3.orig 2008-05-13 13:37:51.000000000 -0700
++++ getmntinfo.3 2008-05-15 19:58:50.000000000 -0700
+@@ -32,26 +32,30 @@
+ .\" @(#)getmntinfo.3 8.1 (Berkeley) 6/9/93
+ .\" $FreeBSD: src/lib/libc/gen/getmntinfo.3,v 1.12 2002/12/19 09:40:21 ru Exp $
+ .\"
+-.Dd June 9, 1993
++.Dd May 15, 2008
+ .Dt GETMNTINFO 3
+ .Os
+ .Sh NAME
+ .Nm getmntinfo
+ .Nd get information about mounted file systems
+-.Sh LIBRARY
+-.Lb libc
+ .Sh SYNOPSIS
+ .In sys/param.h
+ .In sys/ucred.h
.In sys/mount.h
.Ft int
.Fn getmntinfo "struct statfs **mntbufp" "int flags"
+#ifdef UNIFDEF_LEGACY_64_APIS
++.Sh TRANSITIIONAL SYNOPSIS (NOW DEPRECATED)
+.Ft int
-+.Fn getmntinfo64 "struct statfs64 **mntbufp" "int flags"
++.br
++.Fn getmntinfo64 "struct statfs64 **mntbufp" "int flags" ;
+#endif /* UNIFDEF_LEGACY_64_APIS */
.Sh DESCRIPTION
The
+.Ft statfs
structures describing each currently mounted file system (see
.Xr statfs 2 ) .
-+#ifdef UNIFDEF_LEGACY_64_APIS
-+Likewise, the
-+.Fn getmntinfo64
-+function
-+returns an array of
-+.Ft statfs64
-+structures describing each currently mounted file system.
-+#endif /* UNIFDEF_LEGACY_64_APIS */
.Pp
- The
- .Fn getmntinfo
-@@ -61,11 +73,28 @@
- passes its
+@@ -62,6 +66,33 @@
.Fa flags
argument transparently to
+ .Xr getfsstat 2 .
+#ifdef UNIFDEF_LEGACY_64_APIS
++.Pp
++Like
+.Xr getfsstat 2 ,
-+while the
-+.Fn getmntinfo64
-+function
-+passes its
-+.Fa flags
-+argument transparently to
-+.Fn getfsstat64 .
-+#else /* !UNIFDEF_LEGACY_64_APIS */
- .Xr getfsstat 2 .
++when the macro
++.Dv _DARWIN_FEATURE_64_BIT_INODE
++is defined, the
++.Ft ino_t
++type will be 64-bits (force 64-bit inode mode by defining the
++.Dv _DARWIN_USE_64_BIT_INODE
++macro before including header files).
++This will cause the symbol variant of
++.Fn getmntinfo ,
++with the
++.Fa $INODE64
++suffixes, to be automatically linked in.
++In addition, the
++.Ft statfs
++structure will be the 64-bit inode version.
++If
++.Dv _DARWIN_USE_64_BIT_INODE
++is not defined, both
++.Fn getmntinfo
++and the
++.Ft statfs
++structure will refer to the 32-bit inode versions.
+#endif /* UNIFDEF_LEGACY_64_APIS */
.Sh RETURN VALUES
On successful completion,
.Fn getmntinfo
+@@ -86,6 +117,24 @@
+ .Xr getfsstat 2
+ or
+ .Xr malloc 3 .
+#ifdef UNIFDEF_LEGACY_64_APIS
-+and
-+.Fn getmntinfo64
-+return a count of the number of elements in the array.
-+#else /* !UNIFDEF_LEGACY_64_APIS */
- returns a count of the number of elements in the array.
-+#endif /* UNIFDEF_LEGACY_64_APIS */
- The pointer to the array is stored into
- .Fa mntbufp .
- .Pp
-@@ -76,11 +105,21 @@
- .Fa mntbufp
- will be unmodified, any information previously returned by
- .Fn getmntinfo
-+#ifdef UNIFDEF_LEGACY_64_APIS
-+or
-+.Fn getmntinfo64
-+#endif /* UNIFDEF_LEGACY_64_APIS */
- will be lost.
- .Sh ERRORS
- The
- .Fn getmntinfo
-+#ifdef UNIFDEF_LEGACY_64_APIS
-+and
++.Sh TRANSITIONAL DESCRIPTION (NOW DEPRECATED)
++The
+.Fn getmntinfo64
-+functions
-+#else /* !UNIFDEF_LEGACY_64_APIS */
- function
++routine is equivalent to its corresponding non-64-suffixed routine,
++when 64-bit inodes are in effect.
++It was added before there was support for the symbol variants, and so is
++now deprecated.
++Instead of using it, set the
++.Dv _DARWIN_USE_64_BIT_INODE
++macro before including header files to force 64-bit inode support.
++.Pp
++The
++.Ft statfs64
++structure used by this deprecated routine is the same as the
++.Ft statfs
++structure when 64-bit inodes are in effect.
+#endif /* UNIFDEF_LEGACY_64_APIS */
- may fail and set errno for any of the errors specified for the library
- routines
- .Xr getfsstat 2
-@@ -99,15 +138,29 @@
+ .Sh SEE ALSO
+ .Xr getfsstat 2 ,
+ .Xr mount 2 ,
+@@ -99,15 +148,29 @@
.Sh BUGS
The
.Fn getmntinfo
--function writes the array of structures to an internal static object
+#ifdef UNIFDEF_LEGACY_64_APIS
+and
+.Fn getmntinfo64
+functions write the array of structures to an internal static object
+#else /* !UNIFDEF_LEGACY_64_APIS */
-+function write the array of structures to an internal static object
+ function writes the array of structures to an internal static object
+#endif /* UNIFDEF_LEGACY_64_APIS */
and returns
a pointer to that object.
---- glob.3 2004-11-25 11:38:01.000000000 -0800
-+++ glob.3.edit 2006-09-05 14:47:53.000000000 -0700
-@@ -46,9 +46,16 @@
+--- glob.3.orig 2009-05-12 11:21:55.000000000 -0700
++++ glob.3 2009-05-20 15:39:07.000000000 -0700
+@@ -34,21 +34,38 @@
+ .\" @(#)glob.3 8.3 (Berkeley) 4/16/94
+ .\" $FreeBSD: src/lib/libc/gen/glob.3,v 1.30 2004/09/01 23:28:27 tjr Exp $
+ .\"
+-.Dd September 1, 2004
++.Dd May 20, 2008
+ .Dt GLOB 3
+ .Os
+ .Sh NAME
+ .Nm glob ,
++#ifdef UNIFDEF_BLOCKS
++.Nm glob_b ,
++#endif
+ .Nm globfree
+ .Nd generate pathnames matching a pattern
+-.Sh LIBRARY
+-.Lb libc
.Sh SYNOPSIS
.In glob.h
.Ft int
+.Fo glob
+.Fa "const char *restrict pattern"
+.Fa "int flags"
-+.Fa "int (*errfunc)(const char *epath, int eerno)"
++.Fa "int (*errfunc)(const char *epath, int errno)"
+.Fa "glob_t *restrict pglob"
+.Fc
++#ifdef UNIFDEF_BLOCKS
++.Ft int
++.Fo glob_b
++.Fa "const char *restrict pattern"
++.Fa "int flags"
++.Fa "int (^errblk)(const char *epath, int errno)"
++.Fa "glob_t *restrict pglob"
++.Fc
++#endif
.Ft void
-.Fn globfree "glob_t *pglob"
+.Fo globfree
.Sh DESCRIPTION
The
.Fn glob
-@@ -337,7 +344,7 @@
+@@ -326,18 +343,39 @@
+ or
+ .Fa errfunc
+ returns zero, the error is ignored.
++#ifdef UNIFDEF_BLOCKS
++.Pp
++The
++.Fn glob_b
++function is like
++.Fn glob
++except that the error callback is a block pointer instead of a function
++pointer.
++#endif
+ .Pp
+ The
+ .Fn globfree
+ function frees any space associated with
+ .Fa pglob
+ from a previous call(s) to
++#ifdef UNIFDEF_BLOCKS
++.Fn glob
++or
++.Fn glob_b .
++#else
+ .Fn glob .
++#endif
+ .Sh RETURN VALUES
On successful completion,
.Fn glob
++#ifdef UNIFDEF_BLOCKS
++and
++.Fn glob_b
++return zero.
++#else
returns zero.
-In addition the fields of
++#endif
+In addition, the fields of
.Fa pglob
contain the values described below:
.Bl -tag -width GLOB_NOCHECK
-@@ -418,6 +425,11 @@
+@@ -345,12 +383,22 @@
+ contains the total number of matched pathnames so far.
+ This includes other matches from previous invocations of
+ .Fn glob
++#ifdef UNIFDEF_BLOCKS
++or
++.Fn glob_b
++#endif
+ if
+ .Dv GLOB_APPEND
+ was specified.
+ .It Fa gl_matchc
+ contains the number of matched pathnames in the current invocation of
++#ifdef UNIFDEF_BLOCKS
++.Fn glob
++or
++.Fn glob_b .
++#else
+ .Fn glob .
++#endif
+ .It Fa gl_flags
+ contains a copy of the
+ .Fa flags
+@@ -373,6 +421,10 @@
+ .Pp
+ If
+ .Fn glob
++#ifdef UNIFDEF_BLOCKS
++or
++.Fn glob_b
++#endif
+ terminates due to an error, it sets errno and returns one of the
+ following non-zero constants, which are defined in the include
+ file
+@@ -418,6 +470,18 @@
g.gl_pathv[1] = "-l";
execvp("ls", g.gl_pathv);
.Ed
+.Sh CAVEATS
+The
+.Fn glob
-+function will not match filenames that begin with a period
++#ifdef UNIFDEF_BLOCKS
++and
++.Fn glob_b
++functions
++#else
++function
++#endif
++will not match filenames that begin with a period
+unless this is specifically requested (e.g., by ".*").
.Sh SEE ALSO
.Xr sh 1 ,
.Xr fnmatch 3 ,
+@@ -456,6 +520,11 @@
+ .Fn globfree
+ functions first appeared in
+ .Bx 4.4 .
++#ifdef UNIFDEF_BLOCKS
++The
++.Fn glob_b
++function first appeared in Mac OS X 10.6.
++#endif
+ .Sh BUGS
+ Patterns longer than
+ .Dv MAXPATHLEN
+@@ -463,7 +532,13 @@
+ .Pp
+ The
+ .Fn glob
+-argument
++#ifdef UNIFDEF_BLOCKS
++and
++.Fn glob_b
++functions
++#else
++function
++#endif
+ may fail and set errno for any of the errors specified for the
+ library routines
+ .Xr stat 2 ,
---- glob.c.orig 2008-03-15 10:50:43.000000000 -0700
-+++ glob.c 2008-03-27 03:28:31.000000000 -0700
+--- glob.c.orig 2009-05-12 11:21:55.000000000 -0700
++++ glob.c 2009-05-20 16:26:02.000000000 -0700
@@ -40,6 +40,8 @@ static char sccsid[] = "@(#)glob.c 8.3 (
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/gen/glob.c,v 1.22 2004/07/29 03:48:52 tjr Exp $");
/*
* glob(3) -- a superset of the one defined in POSIX 1003.2.
*
-@@ -143,25 +145,33 @@ typedef char Char;
+@@ -143,33 +145,40 @@ typedef char Char;
#define ismeta(c) (((c)&M_QUOTE) != 0)
#ifdef DEBUG
static void qprintf(const char *, Char *);
#endif
-@@ -178,6 +188,8 @@ glob(pattern, flags, errfunc, pglob)
+
+-int
+-glob(pattern, flags, errfunc, pglob)
++static int
++__glob(pattern, pglob)
+ const char *pattern;
+- int flags, (*errfunc)(const char *, int);
+ glob_t *pglob;
+ {
+ const u_char *patnext;
+@@ -178,30 +187,30 @@ glob(pattern, flags, errfunc, pglob)
mbstate_t mbs;
wchar_t wc;
size_t clen;
+ int mb_cur_max = MB_CUR_MAX_L(loc);
patnext = (u_char *) pattern;
- if (!(flags & GLOB_APPEND)) {
-@@ -200,8 +212,8 @@ glob(pattern, flags, errfunc, pglob)
+- if (!(flags & GLOB_APPEND)) {
++ if (!(pglob->gl_flags & GLOB_APPEND)) {
+ pglob->gl_pathc = 0;
+ pglob->gl_pathv = NULL;
+- if (!(flags & GLOB_DOOFFS))
++ if (!(pglob->gl_flags & GLOB_DOOFFS))
+ pglob->gl_offs = 0;
+ }
+- if (flags & GLOB_LIMIT) {
++ if (pglob->gl_flags & GLOB_LIMIT) {
+ limit = pglob->gl_matchc;
+ if (limit == 0)
+ limit = ARG_MAX;
+ } else
+ limit = 0;
+- pglob->gl_flags = flags & ~GLOB_MAGCHAR;
+- pglob->gl_errfunc = errfunc;
+ pglob->gl_matchc = 0;
+
+ bufnext = patbuf;
bufend = bufnext + MAXPATHLEN - 1;
- if (flags & GLOB_NOESCAPE) {
+- if (flags & GLOB_NOESCAPE) {
++ if (pglob->gl_flags & GLOB_NOESCAPE) {
memset(&mbs, 0, sizeof(mbs));
- while (bufend - bufnext >= MB_CUR_MAX) {
- clen = mbrtowc(&wc, patnext, MB_LEN_MAX, &mbs);
if (clen == (size_t)-1 || clen == (size_t)-2)
return (GLOB_NOMATCH);
else if (clen == 0)
-@@ -212,7 +224,7 @@ glob(pattern, flags, errfunc, pglob)
+@@ -212,7 +221,7 @@ glob(pattern, flags, errfunc, pglob)
} else {
/* Protect the quoted characters. */
memset(&mbs, 0, sizeof(mbs));
if (*patnext == QUOTE) {
if (*++patnext == EOS) {
*bufnext++ = QUOTE | M_PROTECT;
-@@ -221,7 +233,7 @@ glob(pattern, flags, errfunc, pglob)
+@@ -221,7 +230,7 @@ glob(pattern, flags, errfunc, pglob)
prot = M_PROTECT;
} else
prot = 0;
if (clen == (size_t)-1 || clen == (size_t)-2)
return (GLOB_NOMATCH);
else if (clen == 0)
-@@ -233,9 +245,9 @@ glob(pattern, flags, errfunc, pglob)
+@@ -232,11 +241,40 @@ glob(pattern, flags, errfunc, pglob)
+ }
*bufnext = EOS;
- if (flags & GLOB_BRACE)
+- if (flags & GLOB_BRACE)
- return globexp1(patbuf, pglob, &limit);
++ if (pglob->gl_flags & GLOB_BRACE)
+ return globexp1(patbuf, pglob, &limit, loc);
else
- return glob0(patbuf, pglob, &limit);
+ return glob0(patbuf, pglob, &limit, loc);
++}
++
++int
++glob(pattern, flags, errfunc, pglob)
++ const char *pattern;
++ int flags, (*errfunc)(const char *, int);
++ glob_t *pglob;
++{
++#ifdef __BLOCKS__
++ pglob->gl_flags = flags & ~(GLOB_MAGCHAR | _GLOB_ERR_BLOCK);
++#else /* !__BLOCKS__ */
++ pglob->gl_flags = flags & ~GLOB_MAGCHAR;
++#endif /* __BLOCKS__ */
++ pglob->gl_errfunc = errfunc;
++ return __glob(pattern, pglob);
++}
++
++#ifdef __BLOCKS__
++int
++glob_b(pattern, flags, errblk, pglob)
++ const char *pattern;
++ int flags, (^errblk)(const char *, int);
++ glob_t *pglob;
++{
++ pglob->gl_flags = flags & ~GLOB_MAGCHAR;
++ pglob->gl_flags |= _GLOB_ERR_BLOCK;
++ pglob->gl_errblk = errblk;
++ return __glob(pattern, pglob);
}
++#endif /* __BLOCKS__ */
/*
-@@ -244,23 +256,24 @@ glob(pattern, flags, errfunc, pglob)
+ * Expand recursively a glob {} pattern. When there is no more expansion
+@@ -244,23 +282,24 @@ glob(pattern, flags, errfunc, pglob)
* characters
*/
static int
}
-@@ -270,10 +283,11 @@ globexp1(pattern, pglob, limit)
+@@ -270,10 +309,11 @@ globexp1(pattern, pglob, limit)
* If it fails then it tries to glob the rest of the pattern and returns.
*/
static int
{
int i;
Char *lm, *ls;
-@@ -310,7 +324,7 @@ globexp2(ptr, pattern, pglob, rv, limit)
+@@ -310,7 +350,7 @@ globexp2(ptr, pattern, pglob, rv, limit)
/* Non matching braces; just glob the pattern */
if (i != 0 || *pe == EOS) {
return 0;
}
-@@ -357,7 +371,7 @@ globexp2(ptr, pattern, pglob, rv, limit)
+@@ -357,7 +397,7 @@ globexp2(ptr, pattern, pglob, rv, limit)
#ifdef DEBUG
qprintf("globexp2:", patbuf);
#endif
/* move after the comma, to the next string */
pl = pm + 1;
-@@ -373,10 +387,11 @@ globexp2(ptr, pattern, pglob, rv, limit)
+@@ -373,10 +413,11 @@ globexp2(ptr, pattern, pglob, rv, limit)
globtilde(pattern, patbuf, patbuf_len, pglob)
const Char *pattern;
Char *patbuf;
-@@ -438,6 +453,7 @@ globtilde(pattern, patbuf, patbuf_len, p
+@@ -438,6 +479,7 @@ globtilde(pattern, patbuf, patbuf_len, p
return patbuf;
}
/*
-@@ -447,13 +463,15 @@ globtilde(pattern, patbuf, patbuf_len, p
+@@ -447,13 +489,15 @@ globtilde(pattern, patbuf, patbuf_len, p
* if things went well, nonzero if errors occurred.
*/
static int
Char *bufnext, patbuf[MAXPATHLEN];
qpatnext = globtilde(pattern, patbuf, MAXPATHLEN, pglob);
-@@ -462,6 +480,10 @@ glob0(pattern, pglob, limit)
+@@ -462,6 +506,10 @@ glob0(pattern, pglob, limit)
/* We don't need to check for buffer overflow any more. */
while ((c = *qpatnext++) != EOS) {
switch (c) {
case LBRACKET:
c = *qpatnext;
-@@ -512,7 +534,7 @@ glob0(pattern, pglob, limit)
+@@ -512,7 +560,7 @@ glob0(pattern, pglob, limit)
qprintf("glob0:", patbuf);
#endif
return(err);
/*
-@@ -525,7 +547,7 @@ glob0(pattern, pglob, limit)
+@@ -525,7 +573,7 @@ glob0(pattern, pglob, limit)
if (((pglob->gl_flags & GLOB_NOCHECK) ||
((pglob->gl_flags & GLOB_NOMAGIC) &&
!(pglob->gl_flags & GLOB_MAGCHAR))))
else
return(GLOB_NOMATCH);
}
-@@ -535,18 +557,21 @@ glob0(pattern, pglob, limit)
+@@ -535,18 +583,21 @@ glob0(pattern, pglob, limit)
return(0);
}
{
Char pathbuf[MAXPATHLEN];
-@@ -554,7 +579,7 @@ glob1(pattern, pglob, limit)
+@@ -554,7 +605,7 @@ glob1(pattern, pglob, limit)
if (*pattern == EOS)
return(0);
return(glob2(pathbuf, pathbuf, pathbuf + MAXPATHLEN - 1,
}
/*
-@@ -563,10 +588,11 @@ glob1(pattern, pglob, limit)
+@@ -563,10 +614,11 @@ glob1(pattern, pglob, limit)
* meta characters.
*/
static int
{
struct stat sb;
Char *p, *q;
-@@ -579,13 +605,13 @@ glob2(pathbuf, pathend, pathend_last, pa
+@@ -579,13 +631,13 @@ glob2(pathbuf, pathend, pathend_last, pa
for (anymeta = 0;;) {
if (*pattern == EOS) { /* End of pattern? */
*pathend = EOS;
S_ISDIR(sb.st_mode)))) {
if (pathend + 1 > pathend_last)
return (GLOB_ABORTED);
-@@ -593,7 +619,7 @@ glob2(pathbuf, pathend, pathend_last, pa
+@@ -593,7 +645,7 @@ glob2(pathbuf, pathend, pathend_last, pa
*pathend = EOS;
}
++pglob->gl_matchc;
}
/* Find end of next segment, copy tentatively to pathend. */
-@@ -617,16 +643,17 @@ glob2(pathbuf, pathend, pathend_last, pa
+@@ -617,16 +669,17 @@ glob2(pathbuf, pathend, pathend_last, pa
}
} else /* Need expansion, recurse. */
return(glob3(pathbuf, pathend, pathend_last, pattern, p,
{
struct dirent *dp;
DIR *dirp;
-@@ -646,15 +673,16 @@ glob3(pathbuf, pathend, pathend_last, pa
+@@ -646,15 +699,22 @@ glob3(pathbuf, pathend, pathend_last, pa
*pathend = EOS;
errno = 0;
return (GLOB_ABORTED);
- if (pglob->gl_errfunc(buf, errno) ||
- pglob->gl_flags & GLOB_ERR)
++#ifdef __BLOCKS__
++ if (pglob->gl_flags & _GLOB_ERR_BLOCK) {
++ if (pglob->gl_errblk(buf, errno))
++ return (GLOB_ABORTED);
++ } else
++#endif /* __BLOCKS__ */
+ if (pglob->gl_errfunc(buf, errno))
return (GLOB_ABORTED);
}
return(0);
}
-@@ -679,7 +707,7 @@ glob3(pathbuf, pathend, pathend_last, pa
+@@ -679,7 +739,7 @@ glob3(pathbuf, pathend, pathend_last, pa
dc = pathend;
sc = (u_char *) dp->d_name;
while (dc < pathend_last) {
if (clen == (size_t)-1 || clen == (size_t)-2) {
wc = *sc;
clen = 1;
-@@ -689,12 +717,12 @@ glob3(pathbuf, pathend, pathend_last, pa
+@@ -689,12 +749,12 @@ glob3(pathbuf, pathend, pathend_last, pa
break;
sc += clen;
}
if (err)
break;
}
-@@ -707,6 +735,7 @@ glob3(pathbuf, pathend, pathend_last, pa
+@@ -707,6 +767,7 @@ glob3(pathbuf, pathend, pathend_last, pa
}
/*
* Extend the gl_pathv member of a glob_t structure to accomodate a new item,
* add the new item, and update gl_pathc.
-@@ -721,11 +750,12 @@ glob3(pathbuf, pathend, pathend_last, pa
+@@ -721,11 +782,12 @@ glob3(pathbuf, pathend, pathend_last, pa
* Either gl_pathc is zero and gl_pathv is NULL; or gl_pathc > 0 and
* gl_pathv points to (gl_offs + gl_pathc + 1) items.
*/
{
char **pathv;
int i;
-@@ -760,9 +790,9 @@ globextend(path, pglob, limit)
+@@ -760,9 +822,9 @@ globextend(path, pglob, limit)
for (p = path; *p++;)
continue;
free(copy);
return (GLOB_NOSPACE);
}
-@@ -776,9 +806,10 @@ globextend(path, pglob, limit)
+@@ -776,9 +838,10 @@ globextend(path, pglob, limit)
* pattern matching function for filenames. Each occurrence of the *
* pattern causes a recursion level.
*/
{
int ok, negate_range;
Char c, k;
-@@ -790,7 +821,7 @@ match(name, pat, patend)
+@@ -790,7 +853,7 @@ match(name, pat, patend)
if (pat == patend)
return(1);
do
return(1);
while (*name++ != EOS);
return(0);
-@@ -806,10 +837,10 @@ match(name, pat, patend)
+@@ -806,10 +869,10 @@ match(name, pat, patend)
++pat;
while (((c = *pat++) & M_MASK) != M_END)
if ((*pat & M_MASK) == M_RNG) {
)
ok = 1;
pat += 2;
-@@ -844,18 +875,20 @@ globfree(pglob)
+@@ -844,18 +907,20 @@ globfree(pglob)
pglob->gl_pathv = NULL;
}
}
return (NULL);
}
-@@ -866,14 +899,15 @@ g_opendir(str, pglob)
+@@ -866,14 +931,15 @@ g_opendir(str, pglob)
}
static int
errno = ENAMETOOLONG;
return (-1);
}
-@@ -883,14 +917,15 @@ g_lstat(fn, sb, pglob)
+@@ -883,14 +949,15 @@ g_lstat(fn, sb, pglob)
}
static int
errno = ENAMETOOLONG;
return (-1);
}
-@@ -899,7 +934,8 @@ g_stat(fn, sb, pglob)
+@@ -899,7 +966,8 @@ g_stat(fn, sb, pglob)
return(stat(buf, sb));
}
g_strchr(str, ch)
Char *str;
wchar_t ch;
-@@ -911,18 +947,20 @@ g_strchr(str, ch)
+@@ -911,18 +979,20 @@ g_strchr(str, ch)
return (NULL);
}
if (clen == (size_t)-1)
return (1);
if (*str == L'\0')
-@@ -954,3 +992,4 @@ qprintf(str, s)
+@@ -954,3 +1024,4 @@ qprintf(str, s)
(void)printf("\n");
}
#endif
---- isatty.c.orig 2003-05-20 15:21:02.000000000 -0700
-+++ isatty.c 2005-06-02 13:46:32.000000000 -0700
-@@ -39,14 +39,21 @@
+--- isatty.c.orig 2008-04-05 00:47:41.000000000 -0700
++++ isatty.c 2008-04-05 01:05:40.000000000 -0700
+@@ -39,14 +39,22 @@ __FBSDID("$FreeBSD: src/lib/libc/gen/isa
#include <termios.h>
#include <unistd.h>
+#include <sys/filio.h>
+#include <sys/conf.h>
++#include <sys/ioctl.h>
+#include <errno.h>
int
+++ /dev/null
-.\" Copyright (c) 2003 David Schultz <dschultz@uclink.Berkeley.EDU>
-.\" All rights reserved.
-.\"
-.\" Redistribution and use in source and binary forms, with or without
-.\" modification, are permitted provided that the following conditions
-.\" are met:
-.\" 1. Redistributions of source code must retain the above copyright
-.\" notice, this list of conditions and the following disclaimer.
-.\" 2. Redistributions in binary form must reproduce the above copyright
-.\" notice, this list of conditions and the following disclaimer in the
-.\" documentation and/or other materials provided with the distribution.
-.\"
-.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-.\" SUCH DAMAGE.
-.\"
-.\" $FreeBSD: src/lib/libc/gen/isgreater.3,v 1.2 2003/06/01 19:19:59 ru Exp $
-.\"
-.Dd February 12, 2003
-.Dt ISGREATER 3
-.Os
-.Sh NAME
-.Nm isgreater , isgreaterequal , isless , islessequal ,
-.Nm islessgreater , isunordered
-.Nd "compare two floating-point numbers"
-.Sh LIBRARY
-.Lb libc
-.Sh SYNOPSIS
-.In math.h
-.Ft int
-.Fn isgreater "real-floating x" "real-floating y"
-.Ft int
-.Fn isgreaterequal "real-floating x" "real-floating y"
-.Ft int
-.Fn isless "real-floating x" "real-floating y"
-.Ft int
-.Fn islessequal "real-floating x" "real-floating y"
-.Ft int
-.Fn islessgreater "real-floating x" "real-floating y"
-.Ft int
-.Fn isunordered "real-floating x" "real-floating y"
-.Sh DESCRIPTION
-Each of the macros
-.Fn isgreater ,
-.Fn isgreaterequal ,
-.Fn isless ,
-.Fn islessequal ,
-and
-.Fn islessgreater
-take arguments
-.Fa x
-and
-.Fa y
-and return a non-zero value if and only if its nominal
-relation on
-.Fa x
-and
-.Fa y
-is true.
-These macros always return zero if either
-argument is not a number (NaN), but unlike the corresponding C
-operators, they never raise a floating point exception.
-.Pp
-The
-.Fn isunordered
-macro takes arguments
-.Fa x
-and
-.Fa y
-and returns non-zero if and only if neither
-.Fa x
-nor
-.Fa y
-are NaNs.
-For any pair of floating-point values, one
-of the relationships (less, greater, equal, unordered) holds.
-.Sh SEE ALSO
-.Xr fpclassify 3 ,
-.Xr math 3 ,
-.Xr signbit 3
-.Sh STANDARDS
-The
-.Fn isgreater ,
-.Fn isgreaterequal ,
-.Fn isless ,
-.Fn islessequal ,
-.Fn islessgreater ,
-and
-.Fn isunordered
-macros conform to
-.St -isoC-99 .
-.Sh HISTORY
-The relational macros described above first appeared in
-.Fx 5.1 .
+++ /dev/null
---- _SB/Libc/gen/FreeBSD/isgreater.3 2003-11-12 00:44:46.000000000 -0800
-+++ _SB/Libc/gen/FreeBSD/isgreater.3.edit 2006-06-28 16:55:51.000000000 -0700
-@@ -55,11 +55,11 @@
- .Fn islessequal ,
- and
- .Fn islessgreater
--take arguments
-+takes arguments
- .Fa x
- and
- .Fa y
--and return a non-zero value if and only if its nominal
-+and returns a non-zero value if and only if its nominal
- relation on
- .Fa x
- and
-@@ -74,8 +74,8 @@
- macro takes arguments
- .Fa x
- and
--.Fa y
--and returns non-zero if and only if neither
-+.Fa y ,
-+returning non-zero if and only if neither
- .Fa x
- nor
- .Fa y
-@@ -97,6 +97,3 @@
- .Fn isunordered
- macros conform to
- .St -isoC-99 .
--.Sh HISTORY
--The relational macros described above first appeared in
--.Fx 5.1 .
--- /dev/null
+--- makecontext.3.orig 2009-03-13 03:05:02.000000000 -0700
++++ makecontext.3 2009-03-13 03:12:49.000000000 -0700
+@@ -48,7 +48,7 @@
+ .Ft void
+ .Fo makecontext
+ .Fa "ucontext_t *ucp"
+-.Fa "void \*[lp]*func\*[rp]\*[lp]void\*[rp]"
++.Fa "void \*[lp]*func\*[rp]\*[lp]\*[rp]"
+ .Fa "int argc" ...
+ .Fc
+ .Ft int
+@@ -64,7 +64,9 @@
+ and had a stack allocated for it.
+ The context is modified so that it will continue execution by invoking
+ .Fn func
+-with the arguments provided.
++with the arguments (of type
++.Ft int )
++provided.
+ The
+ .Fa argc
+ argument
---- popen.c.orig 2003-05-20 15:21:02.000000000 -0700
-+++ popen.c 2005-09-14 15:53:35.000000000 -0700
-@@ -43,7 +43,8 @@ __FBSDID("$FreeBSD: src/lib/libc/gen/pop
+--- popen.c.orig 2009-03-03 02:04:57.000000000 -0800
++++ popen.c 2009-03-03 15:28:31.000000000 -0800
+@@ -34,6 +34,10 @@
+ * SUCH DAMAGE.
+ */
+
++#ifdef VARIANT_DARWINEXTSN
++#define _DARWIN_UNLIMITED_STREAMS
++#endif /* VARIANT_DARWINEXTSN */
++
+ #if defined(LIBC_SCCS) && !defined(lint)
+ static char sccsid[] = "@(#)popen.c 8.3 (Berkeley) 5/3/95";
+ #endif /* LIBC_SCCS and not lint */
+@@ -43,7 +47,8 @@ __FBSDID("$FreeBSD: src/lib/libc/gen/pop
#include "namespace.h"
#include <sys/param.h>
#include <sys/wait.h>
#include <signal.h>
#include <errno.h>
#include <unistd.h>
-@@ -55,11 +56,14 @@ __FBSDID("$FreeBSD: src/lib/libc/gen/pop
+@@ -52,17 +57,29 @@ __FBSDID("$FreeBSD: src/lib/libc/gen/pop
+ #include <string.h>
+ #include <paths.h>
+ #include <pthread.h>
++#include <spawn.h>
#include "un-namespace.h"
#include "libc_private.h"
+#include <crt_externs.h>
+#define environ (*_NSGetEnviron())
+-static struct pid {
+/* 3516149 - store file descriptor and use that to close to prevent blocking */
- static struct pid {
++struct pid {
struct pid *next;
FILE *fp;
+ int fd;
pid_t pid;
- } *pidlist;
- static pthread_mutex_t pidlist_mutex = PTHREAD_MUTEX_INITIALIZER;
-@@ -77,20 +81,24 @@ popen(command, type)
+-} *pidlist;
+-static pthread_mutex_t pidlist_mutex = PTHREAD_MUTEX_INITIALIZER;
++};
++#define pidlist __popen_pidlist
++#define pidlist_mutex __popen_pidlist_mutex
++#ifndef BUILDING_VARIANT
++__private_extern__ struct pid *pidlist = NULL;
++__private_extern__ pthread_mutex_t pidlist_mutex = PTHREAD_MUTEX_INITIALIZER;
++#else /* BUILDING_VARIANT */
++extern struct pid *pidlist;
++extern pthread_mutex_t pidlist_mutex;
++#endif /* !BUILDING_VARIANT */
+
+ #define THREAD_LOCK() if (__isthreaded) _pthread_mutex_lock(&pidlist_mutex)
+ #define THREAD_UNLOCK() if (__isthreaded) _pthread_mutex_unlock(&pidlist_mutex)
+@@ -73,85 +90,109 @@ popen(command, type)
+ {
+ struct pid *cur;
+ FILE *iop;
+- int pdes[2], pid, twoway;
++ int pdes[2], pid, twoway, other;
char *argv[4];
struct pid *p;
++ posix_spawn_file_actions_t file_actions;
++ int err;
- /*
- * Lite2 introduced two-way popen() pipes using _socketpair().
- if ((*type != 'r' && *type != 'w') || type[1])
+ if ((*type != 'r' && *type != 'w') || type[1]) {
+ errno = EINVAL;
++ return (NULL);
++ }
++ if (pipe(pdes) < 0)
return (NULL);
}
- if (pipe(pdes) < 0)
- return (NULL);
-+ }
+- if (pipe(pdes) < 0)
+- return (NULL);
- if ((cur = malloc(sizeof(struct pid))) == NULL) {
+- if ((cur = malloc(sizeof(struct pid))) == NULL) {
++ /* fdopen can now fail */
++ if (*type == 'r') {
++ iop = fdopen(pdes[0], type);
++ other = pdes[1];
++ } else {
++ iop = fdopen(pdes[1], type);
++ other = pdes[0];
++ }
++ if (iop == NULL) {
(void)_close(pdes[0]);
-@@ -104,7 +112,7 @@ popen(command, type)
+ (void)_close(pdes[1]);
+ return (NULL);
+ }
+
++ if ((cur = malloc(sizeof(struct pid))) == NULL) {
++ (void)fclose(iop);
++ (void)_close(other);
++ return (NULL);
++ }
++
++ if ((err = posix_spawn_file_actions_init(&file_actions)) != 0) {
++ (void)fclose(iop);
++ (void)_close(other);
++ free(cur);
++ errno = err;
++ return (NULL);
++ }
++ if (*type == 'r') {
++ /*
++ * The dup2() to STDIN_FILENO is repeated to avoid
++ * writing to pdes[1], which might corrupt the
++ * parent's copy. This isn't good enough in
++ * general, since the _exit() is no return, so
++ * the compiler is free to corrupt all the local
++ * variables.
++ */
++ (void)posix_spawn_file_actions_addclose(&file_actions, pdes[0]);
++ if (pdes[1] != STDOUT_FILENO) {
++ (void)posix_spawn_file_actions_adddup2(&file_actions, pdes[1], STDOUT_FILENO);
++ (void)posix_spawn_file_actions_addclose(&file_actions, pdes[1]);
++ if (twoway)
++ (void)posix_spawn_file_actions_adddup2(&file_actions, STDOUT_FILENO, STDIN_FILENO);
++ } else if (twoway && (pdes[1] != STDIN_FILENO))
++ (void)posix_spawn_file_actions_adddup2(&file_actions, pdes[1], STDIN_FILENO);
++ } else {
++ if (pdes[0] != STDIN_FILENO) {
++ (void)posix_spawn_file_actions_adddup2(&file_actions, pdes[0], STDIN_FILENO);
++ (void)posix_spawn_file_actions_addclose(&file_actions, pdes[0]);
++ }
++ (void)posix_spawn_file_actions_addclose(&file_actions, pdes[1]);
++ }
++ for (p = pidlist; p; p = p->next) {
++ (void)posix_spawn_file_actions_addclose(&file_actions, p->fd);
++ }
++
+ argv[0] = "sh";
+ argv[1] = "-c";
+ argv[2] = (char *)command;
argv[3] = NULL;
- THREAD_LOCK();
+- THREAD_LOCK();
- switch (pid = vfork()) {
-+ switch (pid = fork()) {
- case -1: /* Error. */
- THREAD_UNLOCK();
- (void)_close(pdes[0]);
-@@ -138,7 +146,7 @@ popen(command, type)
- (void)_close(pdes[1]);
- }
- for (p = pidlist; p; p = p->next) {
+- case -1: /* Error. */
+- THREAD_UNLOCK();
+- (void)_close(pdes[0]);
+- (void)_close(pdes[1]);
++ err = posix_spawn(&pid, _PATH_BSHELL, &file_actions, NULL, argv, environ);
++ posix_spawn_file_actions_destroy(&file_actions);
++
++ if (err == ENOMEM || err == EAGAIN) { /* as if fork failed */
++ (void)fclose(iop);
++ (void)_close(other);
+ free(cur);
++ errno = err;
+ return (NULL);
+- /* NOTREACHED */
+- case 0: /* Child. */
+- if (*type == 'r') {
+- /*
+- * The _dup2() to STDIN_FILENO is repeated to avoid
+- * writing to pdes[1], which might corrupt the
+- * parent's copy. This isn't good enough in
+- * general, since the _exit() is no return, so
+- * the compiler is free to corrupt all the local
+- * variables.
+- */
+- (void)_close(pdes[0]);
+- if (pdes[1] != STDOUT_FILENO) {
+- (void)_dup2(pdes[1], STDOUT_FILENO);
+- (void)_close(pdes[1]);
+- if (twoway)
+- (void)_dup2(STDOUT_FILENO, STDIN_FILENO);
+- } else if (twoway && (pdes[1] != STDIN_FILENO))
+- (void)_dup2(pdes[1], STDIN_FILENO);
+- } else {
+- if (pdes[0] != STDIN_FILENO) {
+- (void)_dup2(pdes[0], STDIN_FILENO);
+- (void)_close(pdes[0]);
+- }
+- (void)_close(pdes[1]);
+- }
+- for (p = pidlist; p; p = p->next) {
- (void)_close(fileno(p->fp));
-+ (void)_close(p->fd);
- }
- _execve(_PATH_BSHELL, argv, environ);
- _exit(127);
-@@ -149,9 +157,11 @@ popen(command, type)
- /* Parent; assume fdopen can't fail. */
+- }
+- _execve(_PATH_BSHELL, argv, environ);
+- _exit(127);
+- /* NOTREACHED */
++ } else if (err != 0) { /* couldn't exec the shell */
++ pid = -1;
+ }
+- THREAD_UNLOCK();
+
+- /* Parent; assume fdopen can't fail. */
if (*type == 'r') {
- iop = fdopen(pdes[0], type);
+- iop = fdopen(pdes[0], type);
+ cur->fd = pdes[0];
(void)_close(pdes[1]);
} else {
- iop = fdopen(pdes[1], type);
+- iop = fdopen(pdes[1], type);
+ cur->fd = pdes[1];
(void)_close(pdes[0]);
}
-@@ -162,7 +172,7 @@ popen(command, type)
+@@ -162,10 +203,11 @@ popen(command, type)
cur->next = pidlist;
pidlist = cur;
THREAD_UNLOCK();
return (iop);
}
++#ifndef BUILDING_VARIANT
+ /*
+ * pclose --
+ * Pclose returns -1 if stream is not associated with a `popened' command,
+@@ -198,6 +240,10 @@ pclose(iop)
+
+ (void)fclose(iop);
+
++ if (cur->pid < 0) {
++ free(cur);
++ return W_EXITCODE(127, 0);
++ }
+ do {
+ pid = _wait4(cur->pid, &pstat, 0, (struct rusage *)0);
+ } while (pid == -1 && errno == EINTR);
+@@ -206,3 +252,4 @@ pclose(iop)
+
+ return (pid == -1 ? -1 : pstat);
+ }
++#endif /* !BUILDING_VARIANT */
--- /dev/null
+--- scandir.3.orig 2009-05-12 11:21:55.000000000 -0700
++++ scandir.3 2009-05-20 15:41:07.000000000 -0700
+@@ -32,15 +32,16 @@
+ .\" @(#)scandir.3 8.1 (Berkeley) 6/4/93
+ .\" $FreeBSD: src/lib/libc/gen/scandir.3,v 1.8 2002/12/19 09:40:21 ru Exp $
+ .\"
+-.Dd June 4, 1993
++.Dd May 20, 2008
+ .Dt SCANDIR 3
+ .Os
+ .Sh NAME
+ .Nm scandir ,
++#ifdef UNIFDEF_BLOCKS
++.Nm scandir_b ,
++#endif
+ .Nm alphasort
+ .Nd scan a directory
+-.Sh LIBRARY
+-.Lb libc
+ .Sh SYNOPSIS
+ .In sys/types.h
+ .In dirent.h
+@@ -48,6 +49,10 @@
+ .Fn scandir "const char *dirname" "struct dirent ***namelist" "int \\*(lp*select\\*(rp\\*(lpstruct dirent *\\*(rp" "int \\*(lp*compar\\*(rp\\*(lpconst void *, const void *\\*(rp"
+ .Ft int
+ .Fn alphasort "const void *d1" "const void *d2"
++#ifdef UNIFDEF_BLOCKS
++.Ft int
++.Fn scandir_b "const char *dirname" "struct dirent ***namelist" "int \\*(lp^select\\*(rp\\*(lpstruct dirent *\\*(rp" "int \\*(lp^compar\\*(rp\\*(lpconst void *, const void *\\*(rp"
++#endif
+ .Sh DESCRIPTION
+ The
+ .Fn scandir
+@@ -80,6 +85,13 @@
+ .Xr qsort 3
+ to sort the completed array.
+ If this pointer is null, the array is not sorted.
++Note that from within the
++.Fa compar
++subroutine, the two arguments are of type
++.Ft const struct dirent ** ,
++so that a double-dereference is needed to access the fields in the
++.Ft dirent
++structure.
+ .Pp
+ The
+ .Fn alphasort
+@@ -91,6 +103,18 @@
+ The memory allocated for the array can be deallocated with
+ .Xr free 3 ,
+ by freeing each pointer in the array and then the array itself.
++#ifdef UNIFDEF_BLOCKS
++.Pp
++The
++.Fn scandir_b
++function works the same way as the
++.Fn scandir
++function, except that
++.Fa select
++and
++.Fa compar
++are blocks instead of subroutines.
++#endif
+ .Sh DIAGNOSTICS
+ Returns \-1 if the directory cannot be opened for reading or if
+ .Xr malloc 3
+@@ -107,3 +131,8 @@
+ .Fn alphasort
+ functions appeared in
+ .Bx 4.2 .
++#ifdef UNIFDEF_BLOCKS
++The
++.Fn scandir_b
++function appeared in Mac OS X 10.6.
++#endif
--- /dev/null
+--- scandir.c.orig 2008-07-28 02:49:02.000000000 -0700
++++ scandir.c 2008-07-29 12:02:49.000000000 -0700
+@@ -53,15 +53,11 @@ __FBSDID("$FreeBSD: src/lib/libc/gen/sca
+ #include "un-namespace.h"
+
+ /*
+- * The DIRSIZ macro is the minimum record length which will hold the directory
++ * The _GENERIC_DIRSIZ macro is the minimum record length which will hold the directory
+ * entry. This requires the amount of space in struct dirent without the
+ * d_name field, plus enough space for the name and a terminating nul byte
+ * (dp->d_namlen + 1), rounded up to a 4 byte boundary.
+ */
+-#undef DIRSIZ
+-#define DIRSIZ(dp) \
+- ((sizeof(struct dirent) - sizeof(dp)->d_name) + \
+- (((dp)->d_namlen + 1 + 3) &~ 3))
+
+ int
+ scandir(dirname, namelist, select, dcomp)
+@@ -96,7 +92,7 @@ scandir(dirname, namelist, select, dcomp
+ /*
+ * Make a minimum size copy of the data
+ */
+- p = (struct dirent *)malloc(DIRSIZ(d));
++ p = (struct dirent *)malloc(_GENERIC_DIRSIZ(d));
+ if (p == NULL)
+ goto fail;
+ p->d_fileno = d->d_fileno;
--- /dev/null
+--- scandir_b.c.orig 2008-07-29 12:03:05.000000000 -0700
++++ scandir_b.c 2008-07-29 12:03:31.000000000 -0700
+@@ -53,22 +53,18 @@ __FBSDID("$FreeBSD: src/lib/libc/gen/sca
+ #include "un-namespace.h"
+
+ /*
+- * The DIRSIZ macro is the minimum record length which will hold the directory
++ * The _GENERIC_DIRSIZ macro is the minimum record length which will hold the directory
+ * entry. This requires the amount of space in struct dirent without the
+ * d_name field, plus enough space for the name and a terminating nul byte
+ * (dp->d_namlen + 1), rounded up to a 4 byte boundary.
+ */
+-#undef DIRSIZ
+-#define DIRSIZ(dp) \
+- ((sizeof(struct dirent) - sizeof(dp)->d_name) + \
+- (((dp)->d_namlen + 1 + 3) &~ 3))
+
+ int
+-scandir(dirname, namelist, select, dcomp)
++scandir_b(dirname, namelist, select, dcomp)
+ const char *dirname;
+ struct dirent ***namelist;
+- int (*select)(struct dirent *);
+- int (*dcomp)(const void *, const void *);
++ int (^select)(struct dirent *);
++ int (^dcomp)(const void *, const void *);
+ {
+ struct dirent *d, *p, **names = NULL;
+ size_t nitems = 0;
+@@ -91,12 +87,12 @@ scandir(dirname, namelist, select, dcomp
+ goto fail;
+
+ while ((d = readdir(dirp)) != NULL) {
+- if (select != NULL && !(*select)(d))
++ if (select != NULL && !select(d))
+ continue; /* just selected names */
+ /*
+ * Make a minimum size copy of the data
+ */
+- p = (struct dirent *)malloc(DIRSIZ(d));
++ p = (struct dirent *)malloc(_GENERIC_DIRSIZ(d));
+ if (p == NULL)
+ goto fail;
+ p->d_fileno = d->d_fileno;
+@@ -125,7 +121,7 @@ scandir(dirname, namelist, select, dcomp
+ }
+ closedir(dirp);
+ if (nitems && dcomp != NULL)
+- qsort(names, nitems, sizeof(struct dirent *), dcomp);
++ qsort_b(names, nitems, sizeof(struct dirent *), dcomp);
+ *namelist = names;
+ return(nitems);
+
+@@ -136,15 +132,3 @@ fail:
+ closedir(dirp);
+ return -1;
+ }
+-
+-/*
+- * Alphabetic order comparison routine for those who want it.
+- */
+-int
+-alphasort(d1, d2)
+- const void *d1;
+- const void *d2;
+-{
+- return(strcmp((*(struct dirent **)d1)->d_name,
+- (*(struct dirent **)d2)->d_name));
+-}
--- /dev/null
+--- setmode.c.orig 2008-02-08 00:45:35.000000000 -0800
++++ setmode.c 2008-02-17 19:36:02.000000000 -0800
+@@ -70,12 +70,15 @@ typedef struct bitcmd {
+ #define CMD2_OBITS 0x08
+ #define CMD2_UBITS 0x10
+
++#define compress_mode _sm_compress_mode
++
+ static BITCMD *addcmd(BITCMD *, int, int, int, u_int);
+-static void compress_mode(BITCMD *);
++__private_extern__ void compress_mode(BITCMD *);
+ #ifdef SETMODE_DEBUG
+ static void dumpmode(BITCMD *);
+ #endif
+
++#ifndef BUILDING_VARIANT
+ /*
+ * Given the old mode and an array of bitcmd structures, apply the operations
+ * described in the bitcmd structures to the old mode, and return the new mode.
+@@ -151,6 +154,7 @@ common: if (set->cmd2 & CMD2_CLR) {
+ return (newmode);
+ }
+ }
++#endif /* BUILDING_VARIANT */
+
+ #define ADDCMD(a, b, c, d) \
+ if (set >= endset) { \
+@@ -169,7 +173,11 @@ common: if (set->cmd2 & CMD2_CLR) {
+ } \
+ set = addcmd(set, (a), (b), (c), (d))
+
++#ifndef VARIANT_LEGACY
++#define STANDARD_BITS (S_ISUID|S_ISGID|S_IRWXU|S_IRWXG|S_IRWXO|S_ISTXT)
++#else /* VARIANT_LEGACY */
+ #define STANDARD_BITS (S_ISUID|S_ISGID|S_IRWXU|S_IRWXG|S_IRWXO)
++#endif /* !VARIANT_LEGACY */
+
+ void *
+ setmode(p)
+@@ -211,12 +219,21 @@ setmode(p)
+ */
+ if (isdigit((unsigned char)*p)) {
+ perml = strtol(p, &ep, 8);
+- if (*ep || perml < 0 || perml & ~(STANDARD_BITS|S_ISTXT)) {
++#ifndef VARIANT_LEGACY
++ if (*ep || perml < 0 || perml & ~STANDARD_BITS)
++#else /* VARIANT_LEGACY */
++ if (*ep || perml < 0 || perml & ~(STANDARD_BITS|S_ISTXT))
++#endif /* !VARIANT_LEGACY */
++ {
+ free(saveset);
+ return (NULL);
+ }
+ perm = (mode_t)perml;
++#ifndef VARIANT_LEGACY
++ ADDCMD('=', STANDARD_BITS, perm, mask);
++#else /* VARIANT_LEGACY */
+ ADDCMD('=', (STANDARD_BITS|S_ISTXT), perm, mask);
++#endif /* !VARIANT_LEGACY */
+ set->cmd = 0;
+ return (saveset);
+ }
+@@ -253,7 +270,9 @@ getop: if ((op = *p++) != '+' && op !=
+ if (op == '=')
+ equalopdone = 0;
+
++#ifdef VARIANT_LEGACY
+ who &= ~S_ISTXT;
++#endif /* VARIANT_LEGACY */
+ for (perm = 0, permXbits = 0;; ++p) {
+ switch (*p) {
+ case 'r':
+@@ -267,7 +286,9 @@ getop: if ((op = *p++) != '+' && op !=
+ case 't':
+ /* If only "other" bits ignore sticky. */
+ if (!who || who & ~S_IRWXO) {
++#ifdef VARIANT_LEGACY
+ who |= S_ISTXT;
++#endif /* VARIANT_LEGACY */
+ perm |= S_ISTXT;
+ }
+ break;
+@@ -402,13 +423,14 @@ dumpmode(set)
+ }
+ #endif
+
++#ifndef BUILDING_VARIANT
+ /*
+ * Given an array of bitcmd structures, compress by compacting consecutive
+ * '+', '-' and 'X' commands into at most 3 commands, one of each. The 'u',
+ * 'g' and 'o' commands continue to be separate. They could probably be
+ * compacted, but it's not worth the effort.
+ */
+-static void
++__private_extern__ void
+ compress_mode(set)
+ BITCMD *set;
+ {
+@@ -457,3 +479,4 @@ compress_mode(set)
+ }
+ }
+ }
++#endif /* BUILDING_VARIANT */
---- setprogname.c.orig 2005-10-19 15:16:49.000000000 -0700
-+++ setprogname.c 2005-10-19 15:17:10.000000000 -0700
+--- setprogname.c.orig 2008-01-31 02:47:06.000000000 -0800
++++ setprogname.c 2008-01-31 03:07:50.000000000 -0800
@@ -3,6 +3,10 @@
#include <stdlib.h>
+ mib[1] = KERN_PROCNAME;
+
+ /* ignore errors as this is not a hard error */
-+ sysctl(mib, 2, NULL, NULL, &buf[0], 2*MAXCOMLEN);
++ sysctl(mib, 2, NULL, NULL, &buf[0], strlen(buf));
}
.It "HW_NCPU integer no"
.It "HW_BYTEORDER integer no"
.It "HW_PHYSMEM integer no"
+.It "HW_MEMSIZE integer no"
.It "HW_USERMEM integer no"
.It "HW_PAGESIZE integer no"
.It "HW_FLOATINGPOINT integer no"
.It Li HW_BYTEORDER
The byteorder (4,321, or 1,234).
.It Li HW_PHYSMEM
-The bytes of physical memory.
+The bytes of physical memory represented by a 32-bit integer (for backward compatibility). Use HW_MEMSIZE instead.
+.It Li HW_MEMSIZE
+The bytes of physical memory represented by a 64-bit integer.
.It Li HW_USERMEM
The bytes of non-kernel memory.
.It Li HW_PAGESIZE
---- sysctl.3.orig 2006-08-17 22:44:01.000000000 -0700
-+++ sysctl.3 2006-08-17 22:45:34.000000000 -0700
+--- sysctl.3.orig 2008-10-21 16:45:53.000000000 -0700
++++ sysctl.3 2008-10-22 09:33:13.000000000 -0700
+@@ -32,7 +32,7 @@
+ .\" @(#)sysctl.3 8.4 (Berkeley) 5/9/95
+ .\" $FreeBSD: src/lib/libc/gen/sysctl.3,v 1.63 2004/07/02 23:52:10 ru Exp $
+ .\"
+-.Dd January 23, 2001
++.Dd October 21, 2008
+ .Dt SYSCTL 3
+ .Os
+ .Sh NAME
@@ -182,13 +182,21 @@
}
.Ed
.El
.Pp
For example, the following retrieves the maximum number of processes allowed
-@@ -453,16 +462,6 @@
+@@ -456,16 +465,6 @@
.It "KERN_PROC_UID A user ID"
.It "KERN_PROC_RUID A real user ID"
.El
.It Li KERN_PROF
Return profiling information about the kernel.
If the kernel is not compiled for profiling,
-@@ -845,7 +844,7 @@
+@@ -731,7 +730,6 @@
+ .Bl -column "Second level nameXXXXXX" "struct loadavgXXX" -offset indent
+ .It Sy "Second level name Type Changeable"
+ .It "VM_LOADAVG struct loadavg no"
+-.It "VM_METER struct vmtotal no"
+ .It "VM_PAGEOUT_ALGORITHM integer yes"
+ .It "VM_SWAPPING_ENABLED integer maybe"
+ .It "VM_V_CACHE_MAX integer yes"
+@@ -748,10 +746,6 @@
+ Return the load average history.
+ The returned data consists of a
+ .Va struct loadavg .
+-.It Li VM_METER
+-Return the system wide virtual memory statistics.
+-The returned data consists of a
+-.Va struct vmtotal .
+ .It Li VM_PAGEOUT_ALGORITHM
+ 0 if the statistics-based page management algorithm is in use
+ or 1 if the near-LRU algorithm is in use.
+@@ -848,7 +842,7 @@
definitions for second level network identifiers
.It In sys/gmon.h
definitions for third level profiling identifiers
---- ttyname.c.orig 2007-02-05 14:38:48.000000000 -0800
-+++ ttyname.c 2007-02-05 14:40:13.000000000 -0800
-@@ -48,10 +48,12 @@
+--- ttyname.c.orig 2008-10-09 21:30:31.000000000 -0700
++++ ttyname.c 2008-10-09 22:00:10.000000000 -0700
+@@ -48,10 +48,12 @@ __FBSDID("$FreeBSD: src/lib/libc/gen/tty
#include <string.h>
#include <paths.h>
#include <pthread.h>
static char buf[sizeof(_PATH_DEV) + MAXNAMLEN];
static char *ttyname_threaded(int fd);
static char *ttyname_unthreaded(int fd);
-@@ -59,6 +61,7 @@
- static pthread_mutex_t ttyname_lock = PTHREAD_MUTEX_INITIALIZER;
- static pthread_key_t ttyname_key;
- static int ttyname_init = 0;
-+extern int __pthread_tsd_first;
-
- char *
- ttyname(int fd)
-@@ -71,31 +74,63 @@
+@@ -71,31 +73,63 @@ ttyname(int fd)
ret = ttyname_threaded(fd);
return (ret);
}
static char *
ttyname_threaded(int fd)
{
-@@ -104,8 +139,12 @@
+@@ -104,8 +138,12 @@ ttyname_threaded(int fd)
if (ttyname_init == 0) {
_pthread_mutex_lock(&ttyname_lock);
if (ttyname_init == 0) {
- if (_pthread_key_create(&ttyname_key, free)) {
+ /* __PTK_LIBC_TTYNAME_KEY */
-+ ttyname_key = __pthread_tsd_first+1;
++ ttyname_key = __LIBC_PTHREAD_KEY_TTYNAME;
+ if (pthread_key_init_np(ttyname_key, free)) {
+ int save = errno;
_pthread_mutex_unlock(&ttyname_lock);
return (NULL);
}
ttyname_init = 1;
-@@ -117,14 +156,20 @@
+@@ -117,14 +155,20 @@ ttyname_threaded(int fd)
if ((buf = _pthread_getspecific(ttyname_key)) == NULL) {
if ((buf = malloc(sizeof(_PATH_DEV) + MAXNAMLEN)) != NULL) {
if (_pthread_setspecific(ttyname_key, buf) != 0) {
}
static char *
-@@ -137,11 +182,19 @@
+@@ -137,11 +181,19 @@ ttyname_unthreaded(int fd)
if (tcgetattr(fd, &ttyb) < 0)
return (NULL);
/* Must be a character device. */
GENMIGHDRS += ${GENMIGDEFS:.defs=.h}
GENMIGSRCS += ${GENMIGDEFS:.defs=User.c}
-MISRCS += ${GENMIGSRCS} NSSystemDirectories.c OSSystemInfo.c \
+MISRCS += ${GENMIGSRCS} NSSystemDirectories.c \
asl.c asl_core.c asl_file.c asl_legacy1.c asl_store.c asl_util.c \
backtrace.c \
cache.c confstr.c crypt.c devname.c disklabel.c errlst.c \
filesec.c fts.c \
get_compat.c getloadavg.c getttyent.c getusershell.c getvfsbyname.c \
isinf.c isnan.c \
- malloc.c nanosleep.c nftw.c nlist.c scalable_malloc.c setlogin.c \
+ malloc.c nanosleep.c nftw.c magazine_malloc.c setlogin.c \
sigsetops.c _simple.c stack_logging.c stack_logging_disk.c strtofflags.c syslog.c \
thread_stack_pcs.c uname.c utmpx-darwin.c wordexp.c
.ifdef FEATURE_LEGACY_NXZONE_APIS
MISRCS += zone.c
.endif
+.if !defined(LP64)
+MISRCS += nlist.c
+.endif
+# DTrace USDT probes placed in magazine_malloc.c and malloc.c
+${SYMROOTINC}/magmallocProvider.h: ${.CURDIR}/gen/magmallocProvider.d
+ ${DTRACE} -o ${.TARGET} -C -h -s ${.ALLSRC}
+magazine_malloc.${OBJSUFFIX}: ${SYMROOTINC}/magmallocProvider.h
+malloc.${OBJSUFFIX}: ${SYMROOTINC}/magmallocProvider.h
+
# Force these files to build after the mig stuff
asl.${OBJSUFFIX}: asl_ipcUser.c
+syslog.${OBJSUFFIX}: asl_ipcUser.c
utmpx-darwin.${OBJSUFFIX}: asl_ipcUser.c
CLEANFILES += ${GENMIGHDRS} ${GENMIGSRCS} ${GENMIGDEFS:.defs=Server.c}
${.CURDIR}/gen/asl_private.h ${.CURDIR}/gen/asl_store.h \
${.CURDIR}/gen/_simple.h ${.CURDIR}/gen/stack_logging.h
+CFLAGS-asl_file.c += -D_DARWIN_UNLIMITED_STREAMS
+CFLAGS-asl_legacy1.c += -D_DARWIN_UNLIMITED_STREAMS
+CFLAGS-asl_store.c += -D_DARWIN_UNLIMITED_STREAMS
CFLAGS-confstr.c += -I${.CURDIR}/darwin
-
+CFLAGS-fmtmsg-fbsd.c += -D_DARWIN_UNLIMITED_STREAMS
+CFLAGS-getcap-fbsd.c += -D_DARWIN_UNLIMITED_STREAMS
+CFLAGS-getttyent.c += -D_DARWIN_UNLIMITED_STREAMS
+CFLAGS-getusershell.c += -D_DARWIN_UNLIMITED_STREAMS
CFLAGS-glob-fbsd.c += -UDEBUG
# 4840357: workaround for compiler failure building libc_debug.a
.if make(lib${LIB}_debug.a)
-CFLAGS-scalable_malloc.c += -funit-at-a-time
+CFLAGS-magazine_malloc.c += -funit-at-a-time
.endif
# Malloc uses count leading zeroes, not available in thumb
-.if (${MACHINE_ARCH} == arm)
-CFLAGS-scalable_malloc.c += -mno-thumb
-.endif
+#.if (${MACHINE_ARCH} == arm)
+#CFLAGS-scalable_malloc.c += -mno-thumb
+#.endif
+
+CFLAGS-stack_logging_disk.c += -D_DARWIN_UNLIMITED_STREAMS
+CFLAGS-utmpx-nbsd.c += -D_DARWIN_UNLIMITED_STREAMS
+CFLAGS-wordexp.c += -D_DARWIN_UNLIMITED_STREAMS
# also build 64-bit long double versions (ppc only)
LDBLSRCS += asl.c err.c syslog.c
.ifdef FEATURE_LEGACY_64_APIS
# special case: getmntinfo64-fbsd.c is derived from getmntinfo.c with getmntinfo64.c.patch
.ifmake autopatch
-
-# This .for statement forces evaluation of ${CWD}
-.for _cwd in ${CWD}
+.for _cwd in ${CWD} # This .for statement forces evaluation of ${CWD}
AUTOPATCHSRCS+= ${_cwd}/getmntinfo64-fbsd.c
${_cwd}/getmntinfo64-fbsd.c: ${_cwd}/FreeBSD/getmntinfo.c
- cp ${.ALLSRC} ${.TARGET}
- patch ${.TARGET} ${.ALLSRC:S/getmntinfo/getmntinfo64/}.patch
+ ${CP} ${.ALLSRC} ${.TARGET}
+ ${PATCH} ${.TARGET} ${.ALLSRC:S/getmntinfo/getmntinfo64/}.patch
.endfor # _cwd
-
.else # !autopatch
MISRCS+= getmntinfo64.c
.endif # autopatch
.endif # FEATURE_LEGACY_64_APIS
+# special case: scandir_b-fbsd.c is derived from scandir.c with scandir_b.c.patch
+.ifmake autopatch
+.for _cwd in ${CWD} # This .for statement forces evaluation of ${CWD}
+AUTOPATCHSRCS+= ${_cwd}/scandir_b-fbsd.c
+${_cwd}/scandir_b-fbsd.c: ${_cwd}/FreeBSD/scandir.c
+ ${CP} ${.ALLSRC} ${.TARGET}
+ ${PATCH} ${.TARGET} ${.ALLSRC:S/scandir/scandir_b/}.patch
+.endfor # _cwd
+.else # !autopatch
+.ifdef FEATURE_BLOCKS
+MISRCS+= scandir_b.c
+.endif # FEATURE_BLOCKS
+.endif # autopatch
+
PRE1050SRCS+= daemon.c
.if defined(LP64)
PRE1050SRCS+= pselect.c
LEGACYSRCS += clock.c closedir.c confstr.c crypt.c fnmatch.c \
lockf.c nanosleep.c nftw.c nice.c opendir.c \
- pause.c pselect.c rewinddir.c \
- seekdir.c sleep.c telldir.c termios.c timezone.c ttyname.c \
+ pause.c popen.c pselect.c rewinddir.c \
+ seekdir.c setmode.c sleep.c \
+ telldir.c termios.c timezone.c ttyname.c \
usleep.c wait.c waitpid.c
INODE32SRCS += fts.c getmntinfo.c glob.c nftw.c opendir.c \
readdir.c rewinddir.c scandir.c seekdir.c telldir.c
+.ifdef FEATURE_BLOCKS
+INODE32SRCS += scandir_b.c
+.endif # FEATURE_BLOCKS
CANCELABLESRCS += lockf.c nanosleep.c pause.c pselect.c sleep.c termios.c \
usleep.c wait.c waitpid.c
CANCELABLE-DARWINEXTSNSRCS += pselect.c
-DARWINEXTSNSRCS += pselect.c
+DARWINEXTSNSRCS += popen.c pselect.c
+DYLDSRCS += _simple.c
# include __dirent.h to rename DIR structure elements
.for _src in closedir-fbsd.c opendir-fbsd.c readdir-fbsd.c rewinddir-fbsd.c \
scandir-fbsd.c seekdir-fbsd.c telldir-fbsd.c
CFLAGS-${_src} += -I${.CURDIR}/gen -include __dirent.h
.endfor
+.ifdef FEATURE_BLOCKS
+CFLAGS-scandir_b-fbsd.c += -I${.CURDIR}/gen -include __dirent.h
+.endif # FEATURE_BLOCKS
-# ppc optimizer hacks:
-#drand48-fbsd.c is better with -O1 than -Os.
-#erand48-fbsd.c is better with -O3 than -Os.
+# 4105292/4329702 different optimizations improves performance of
+# drand48 and erand48
.if !make(lib${LIB}_debug.a)
-.if (${MACHINE_ARCH} == ppc)
+.if (${MACHINE_ARCH} == i386)
OPTIMIZE-drand48-fbsd.c += -O1
-OPTIMIZE-erand48-fbsd.c += -O3
+OPTIMIZE-erand48-fbsd.c += -O1
.endif
.endif
CFLAGS-confstr.c += -DLIBC_ALIAS_CONFSTR
CFLAGS-crypt.c += -DLIBC_ALIAS_ENCRYPT -DLIBC_ALIAS_SETKEY
CFLAGS-fnmatch-fbsd.c += -DLIBC_ALIAS_FNMATCH
-CFLAGS-fts.c += -DLIBC_ALIAS_FTS_CHILDREN -DLIBC_ALIAS_FTS_CLOSE -DLIBC_ALIAS_FTS_OPEN -DLIBC_ALIAS_FTS_READ -DLIBC_ALIAS_FTS_SET
+CFLAGS-fts.c += -DLIBC_ALIAS_FTS_CHILDREN -DLIBC_ALIAS_FTS_CLOSE -DLIBC_ALIAS_FTS_OPEN -DLIBC_ALIAS_FTS_OPEN_B -DLIBC_ALIAS_FTS_READ -DLIBC_ALIAS_FTS_SET
CFLAGS-glob-fbsd.c += -DLIBC_ALIAS_GLOB
+CFLAGS-glob_b-fbsd.c += -DLIBC_ALIAS_GLOB_B
CFLAGS-lockf-fbsd.c += -DLIBC_ALIAS_LOCKF
CFLAGS-nanosleep.c += -DLIBC_ALIAS_NANOSLEEP
CFLAGS-nftw.c += -DLIBC_ALIAS_FTW -DLIBC_ALIAS_NFTW
CFLAGS-opendir-fbsd.c += -DLIBC_ALIAS___OPENDIR2 -DLIBC_ALIAS_OPENDIR
CFLAGS-rewinddir-fbsd.c += -DLIBC_ALIAS_REWINDDIR
CFLAGS-pause-fbsd.c += -DLIBC_ALIAS_PAUSE
+CFLAGS-popen-fbsd.c += -DLIBC_ALIAS_POPEN
CFLAGS-pselect-fbsd.c += -DLIBC_ALIAS_PSELECT
CFLAGS-seekdir-fbsd.c += -DLIBC_ALIAS_SEEKDIR
+CFLAGS-setmode-fbsd.c += -DLIBC_ALIAS_SETMODE
CFLAGS-sleep-fbsd.c += -DLIBC_ALIAS_SLEEP
CFLAGS-telldir-fbsd.c += -DLIBC_ALIAS__SEEKDIR -DLIBC_ALIAS_TELLDIR
CFLAGS-termios-fbsd.c += -DLIBC_ALIAS_TCDRAIN
confstr.3 crypt.3 devname.3 directory.3 fts.3 ftw.3 \
getdomainname.3 getloadavg.3 \
getttyent.3 getusershell.3 getvfsbyname.3 \
- malloc.3 malloc_size.3 nlist.3 \
+ malloc.3 malloc_size.3 malloc_zone_malloc.3 nlist.3 posix_memalign.3 \
pwcache.3 setjmp.3 sigsetops.3 strtofflags.3 syslog.3 \
tcgetpgrp.3 tcsendbreak.3 tcsetattr.3 tcsetpgrp.3 tzset.3 \
uname.3 wordexp.3 intro.3
getbsize.3 getcap.3 getcontext.3 getcwd.3 \
gethostname.3 getmntinfo.3 getpagesize.3 getpass.3 \
getpeereid.3 getprogname.3 glob.3 \
- isgreater.3 lockf.3 makecontext.3 nice.3 \
+ lockf.3 makecontext.3 nice.3 \
pause.3 popen.3 pselect.3 psignal.3 \
raise.3 rand48.3 readpassphrase.3 \
scandir.3 setmode.3 \
err.3 warn.3 \
err.3 warnc.3 \
err.3 warnx.3
+.ifdef FEATURE_BLOCKS
+MLINKS+= err.3 err_set_exit_b.3
+.endif # FEATURE_BLOCKS
MLINKS+= exec.3 execl.3 \
exec.3 execle.3 \
fts.3 fts_open.3 \
fts.3 fts_read.3 \
fts.3 fts_set.3
+.ifdef FEATURE_BLOCKS
+MLINKS+= fts.3 fts_open_b.3
+.endif # FEATURE_BLOCKS
MLINKS+= ftw.3 nftw.3
MLINKS+= glob.3 globfree.3
-MLINKS+= isgreater.3 isgreaterequal.3 \
- isgreater.3 isless.3 \
- isgreater.3 islessequal.3 \
- isgreater.3 islessgreater.3 \
- isgreater.3 isunordered.3
-
MLINKS+= malloc.3 calloc.3 \
malloc.3 free.3 \
malloc.3 realloc.3 \
MLINKS+= malloc_size.3 malloc_good_size.3
+MLINKS+= malloc_zone_malloc.3 malloc_create_zone.3 \
+ malloc_zone_malloc.3 malloc_destroy_zone.3 \
+ malloc_zone_malloc.3 malloc_default_zone.3 \
+ malloc_zone_malloc.3 malloc_zone_from_ptr.3 \
+ malloc_zone_malloc.3 malloc_zone_calloc.3 \
+ malloc_zone_malloc.3 malloc_zone_valloc.3 \
+ malloc_zone_malloc.3 malloc_zone_realloc.3 \
+ malloc_zone_malloc.3 malloc_zone_memalign.3 \
+ malloc_zone_malloc.3 malloc_zone_free.3
+
MLINKS+= popen.3 pclose.3
MLINKS+= psignal.3 sys_siglist.3 \
rand48.3 srand48.3
MLINKS+= scandir.3 alphasort.3
+.ifdef FEATURE_BLOCKS
+MLINKS+= scandir.3 scandir_b.3
+.endif # FEATURE_BLOCKS
MLINKS+= strtofflags.3 fflagstostr.3
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
*
* @APPLE_LICENSE_HEADER_END@
*/
-#import <libc.h>
-#import <stdio.h>
-#import <stdlib.h>
-#import <NSSystemDirectories.h>
-
-// Names of directories; index into this with NSSearchPathDirectory - 1
-#define numDirs 15
-static const struct {
- unsigned char invalidDomainMask; // Domains in which this dir does not appear
- unsigned char alternateDomainMask; // Domains in which this dir uses the alternate domain path
- const char *dirPath;
-} dirInfo[numDirs] = {
- {0, 0, "Applications"},
- {0, 0, "Applications/Demos"},
- {0, 0, "Developer/Applications"},
- {0, 0, "Applications/Utilities"},
- {0, 0x8, "Library"}, // Uses alternate form in System domain
- {0, 0, "Developer"},
- {0x9, 0, "Users"}, // Not valid in the System and User domains
- {0, 0x8, "Library/Documentation"}, // Uses alternate form in System domain
- {0xe, 0, "Documents"}, // Only valid in user domain
- {0x7, 0, "Library/CoreServices"}, // Only valid in System domain
- {0xe, 0, "Documents/Autosaved"}, // Only valid in user domain; not public API yet
- {0xe, 0, "Desktop"}, // Only valid in user domain
- {0, 0, "Library/Caches"},
- {0, 0, "Library/Application Support"},
- {0xe, 0, "Downloads"}, // Only valid in user domain
-};
-
-// Unpublicized values for NSSearchPathDirectory
-enum {
- NSAutosavedDocumentsDirectory = 11
-};
-
-// Ordered list of where to find applications in each domain (the numbers are NSSearchPathDirectory)
-#define numApplicationDirs 4
-static const char applicationDirs[numApplicationDirs] = {1, 4, 3, 2};
-
-// Ordered list of where to find resources in each domain (the numbers are NSSearchPathDirectory)
-#define numLibraryDirs 2
-static const char libraryDirs[numLibraryDirs] = {5, 6};
-
-// Names of domains; index into this log2(domainMask). If the search path ordering is ever changed, then we need an indirection (as the domainMask values cannot be changed).
-#define numDomains 4
-static const struct {
- char needsRootPrepended;
- const char *domainPath;
- const char *alternateDomainPath;
-} domainInfo[numDomains] = {
- {0, "~", "~"},
- {1, "", ""},
- {1, "/Network", "/Network"},
- {1, "", "/System"}
-};
-
-#define invalidDomains 0x00 // some domains may be invalid on non-Mach systems
+#include <NSSystemDirectories.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/param.h>
+#include <unistd.h>
+
+#define NSUserDomainIndex 0
+#define NSLocalDomainIndex 1
+#define NSNetworkDomainIndex 2
+#define NSSystemDomainIndex 3
+
+#define numDomains (NSSystemDomainIndex + 1)
+#define DomainMask ((1 << numDomains) - 1)
+
+#define addNextRoot(x) (*(x) == '/' || *(x) == 0)
+
+#define Network "/Network"
+#define System "/System"
+#define Tilde "~"
+
+#define NSApplicationDirectoryBase "/Applications"
+#define NSDemoApplicationDirectoryBase "/Applications/Demos"
+#define NSDeveloperApplicationDirectoryBase "/Developer/Applications"
+#define NSAdminApplicationDirectoryBase "/Applications/Utilities"
+#define NSLibraryDirectoryBase "/Library"
+#define NSDeveloperDirectoryBase "/Developer"
+#define NSUserDirectoryBase "/Users"
+#define NSDocumentationDirectoryBase "/Library/Documentation"
+#define NSDocumentDirectoryBase "/Documents"
+#define NSCoreServiceDirectoryBase "/Library/CoreServices"
+#define NSAutosavedDocumentsDirectoryBase "/Library/Autosave Information"
+#define NSDesktopDirectoryBase "/Desktop"
+#define NSCachesDirectoryBase "/Library/Caches"
+#define NSInputMethodsDirectoryBase "/Library/Input Methods"
+#define NSMoviesDirectoryBase "/Movies"
+#define NSMusicDirectoryBase "/Music"
+#define NSPicturesDirectoryBase "/Pictures"
+#define NSPrinterDescriptionDirectoryBase "/Library/Printers/PPDs"
+#define NSSharedPublicDirectoryBase "/Public"
+#define NSPreferencePanesDirectoryBase "/Library/PreferencePanes"
+#define NSApplicationSupportDirectoryBase "/Library/Application Support"
+#define NSDownloadsDirectoryBase "/Downloads"
+
+static const char * const prefixAll[] = {
+ Tilde,
+ "",
+ Network,
+ ""
+};
+static const char * const prefixAllSystem[] = {
+ Tilde,
+ "",
+ Network,
+ System
+};
+static const char * const prefixNoUserSystem[] = {
+ NULL,
+ "",
+ Network,
+ NULL
+};
+static const char * const prefixNoNetwork[] = {
+ Tilde,
+ "",
+ NULL,
+ System
+};
+static const char * const prefixSystemOnly[] = {
+ NULL,
+ NULL,
+ NULL,
+ System
+};
+static const char * const prefixUserOnly[] = {
+ Tilde,
+ NULL,
+ NULL,
+ NULL
+};
+
+static const char * const _prefixNetwork4[] = {
+ Network,
+ Network,
+ Network,
+ Network
+};
+static const char * const _prefixNone4[] = {
+ "",
+ "",
+ "",
+ ""
+};
+static const char * const _prefixTilde4[] = {
+ Tilde,
+ Tilde,
+ Tilde,
+ Tilde
+};
+static const char * const * const prefixAllApplicationsDirectory[] = {
+ _prefixTilde4,
+ _prefixNone4,
+ _prefixNetwork4,
+ _prefixNone4
+};
+static const char * const baseAllApplicationsDirectory[] = {
+ NSApplicationDirectoryBase,
+ NSAdminApplicationDirectoryBase,
+ NSDeveloperApplicationDirectoryBase,
+ NSDemoApplicationDirectoryBase
+};
+
+static const char * const _prefixNetwork2[] = {
+ Network,
+ Network
+};
+static const char * const _prefixNone2[] = {
+ "",
+ ""
+};
+static const char * const _prefixSystemNone2[] = {
+ System,
+ ""
+};
+static const char * const _prefixTilde2[] = {
+ Tilde,
+ Tilde
+};
+static const char * const * const prefixAllLibrariesDirectory[] = {
+ _prefixTilde2,
+ _prefixNone2,
+ _prefixNetwork2,
+ _prefixSystemNone2
+};
+static const char * const baseAllLibrariesDirectory[] = {
+ NSLibraryDirectoryBase,
+ NSDeveloperDirectoryBase
+};
+
+// The dirInfo table drives path creation
+static struct {
+ int pathsPerDomain;
+ const void * const * const prefix;
+ const void * const base;
+} dirInfo[] = {
+ { // NSApplicationDirectory
+ 1,
+ (const void * const * const)prefixAll,
+ (const void * const)NSApplicationDirectoryBase
+ },
+ { // NSDemoApplicationDirectory
+ 1,
+ (const void * const * const)prefixAll,
+ (const void * const)NSDemoApplicationDirectoryBase
+ },
+ { // NSDeveloperApplicationDirectory
+ 1,
+ (const void * const * const)prefixAll,
+ (const void * const)NSDeveloperApplicationDirectoryBase
+ },
+ { // NSAdminApplicationDirectory
+ 1,
+ (const void * const * const)prefixAll,
+ (const void * const)NSAdminApplicationDirectoryBase
+ },
+ { // NSLibraryDirectory
+ 1,
+ (const void * const * const)prefixAllSystem,
+ (const void * const)NSLibraryDirectoryBase
+ },
+ { // NSDeveloperDirectory
+ 1,
+ (const void * const * const)prefixAll,
+ (const void * const)NSDeveloperDirectoryBase
+ },
+ { // NSUserDirectory
+ 1,
+ (const void * const * const)prefixNoUserSystem,
+ (const void * const)NSUserDirectoryBase
+ },
+ { // NSDocumentationDirectory
+ 1,
+ (const void * const * const)prefixAllSystem,
+ (const void * const)NSDocumentationDirectoryBase
+ },
+ { // NSDocumentDirectory
+ 1,
+ (const void * const * const)prefixUserOnly,
+ (const void * const)NSDocumentDirectoryBase
+ },
+ { // NSCoreServiceDirectory
+ 1,
+ (const void * const * const)prefixSystemOnly,
+ (const void * const)NSCoreServiceDirectoryBase
+ },
+ { // NSAutosavedInformationDirectory
+ 1,
+ (const void * const * const)prefixUserOnly,
+ (const void * const)NSAutosavedDocumentsDirectoryBase
+ },
+ { // NSDesktopDirectory
+ 1,
+ (const void * const * const)prefixUserOnly,
+ (const void * const)NSDesktopDirectoryBase
+ },
+ { // NSCachesDirectory
+ 1,
+ (const void * const * const)prefixAll,
+ (const void * const)NSCachesDirectoryBase
+ },
+ { // NSApplicationSupportDirectory
+ 1,
+ (const void * const * const)prefixAll,
+ (const void * const)NSApplicationSupportDirectoryBase
+ },
+ { // NSDownloadsDirectory
+ 1,
+ (const void * const * const)prefixUserOnly,
+ (const void * const)NSDownloadsDirectoryBase
+ },
+ { // NSInputMethodsDirectory
+ 1,
+ (const void * const * const)prefixAll,
+ (const void * const)NSInputMethodsDirectoryBase
+ },
+ { // NSMoviesDirectory
+ 1,
+ (const void * const * const)prefixUserOnly,
+ (const void * const)NSMoviesDirectoryBase
+ },
+ { // NSMusicDirectory
+ 1,
+ (const void * const * const)prefixUserOnly,
+ (const void * const)NSMusicDirectoryBase
+ },
+ { // NSPicturesDirectory
+ 1,
+ (const void * const * const)prefixUserOnly,
+ (const void * const)NSPicturesDirectoryBase
+ },
+ { // NSPrinterDescriptionDirectory
+ 1,
+ (const void * const * const)prefixSystemOnly,
+ (const void * const)NSPrinterDescriptionDirectoryBase
+ },
+ { // NSSharedPublicDirectory
+ 1,
+ (const void * const * const)prefixUserOnly,
+ (const void * const)NSSharedPublicDirectoryBase
+ },
+ { // NSPreferencePanesDirectory
+ 1,
+ (const void * const * const)prefixNoNetwork,
+ (const void * const)NSPreferencePanesDirectoryBase
+ },
+ { // NSAllApplicationsDirectory
+ 4,
+ (const void * const * const)prefixAllApplicationsDirectory,
+ (const void * const)baseAllApplicationsDirectory
+ },
+ { // NSAllLibrariesDirectory
+ 2,
+ (const void * const * const)prefixAllLibrariesDirectory,
+ (const void * const)baseAllLibrariesDirectory
+ }
+};
+
+#define Index(dir) (((dir) >= NSApplicationDirectory && (dir) <= NSPreferencePanesDirectory) ? ((dir) - 1) : (((dir) >= NSAllApplicationsDirectory && (dir) <= NSAllLibrariesDirectory) ? ((dir) - NSAllApplicationsDirectory + NSPreferencePanesDirectory) : -1))
+
+#define invalidDomains 0x00 // some domains may be invalid on non-Mach systems
+#define ByteMask 0xff
+#define DirShift 24
+#define IndexShift 16
NSSearchPathEnumerationState NSStartSearchPathEnumeration(NSSearchPathDirectory dir, NSSearchPathDomainMask domainMask) {
// The state is AABBCCCC, where
// BB is the current state of dirs (if AA < 100, then this is always 0; otherwise it goes up to number of dirs)
// CCCC is the domains requested
// the state always contains the next item; if CCCC is 0, then we're done
- domainMask = domainMask & ((1 << numDomains) - 1) & ~invalidDomains; // Just leave useful bits in there
- if (dir != NSAllLibrariesDirectory && dir != NSLibraryDirectory && dir != NSUserDirectory && dir != NSDocumentationDirectory && (domainMask & NSLocalDomainMask) && (domainMask & NSSystemDomainMask)) domainMask = domainMask & ~NSSystemDomainMask; // Hack to avoid duplication
- return (((unsigned int)dir) << 24) + ((unsigned int)domainMask);
+ int i;
+
+ if((i = Index(dir)) < 0) {
+ return 0;
+ }
+ domainMask = domainMask & DomainMask & ~invalidDomains; // Just leave useful bits in there
+
+ // Trim Duplicates - This assumes the compiler generates a single address
+ // for multiple occurrences of the same literal strings.
+ if ((domainMask & (NSLocalDomainMask | NSSystemDomainMask)) == (NSLocalDomainMask | NSSystemDomainMask) && dirInfo[i].prefix[NSLocalDomainIndex] == dirInfo[i].prefix[NSSystemDomainIndex]) {
+ domainMask &= ~NSSystemDomainMask;
+ }
+
+ return (dir << DirShift) + domainMask;
}
NSSearchPathEnumerationState NSGetNextSearchPathEnumeration(NSSearchPathEnumerationState state, char *path) {
static const char *nextRoot = NULL;
- unsigned dir = (state >> 24) & 0xff;
- unsigned dirState = (state >> 16) & 0xff;
- unsigned domainMask = state & 0xffff;
- unsigned int curDomain; // The current domain we're at...
- unsigned int curDir = 0; // The current dir...
+ int dir = (state >> DirShift) & ByteMask;
+ int domainMask = state & DomainMask;
+ int domain, i, n;
+ const char *prefix, *base;
- do {
- if (domainMask == 0) return 0; // Looks like we're done
- for (curDomain = 0; curDomain < numDomains; curDomain++) if ((domainMask & (1 << curDomain))) break;
-
- // Determine directory
- if (dir < NSAllApplicationsDirectory) { // One directory per domain, simple...
- curDir = dir;
- } else { // Can return multiple directories for each domain
- if (dir == NSAllApplicationsDirectory) {
- curDir = applicationDirs[dirState];
- if (++dirState == numApplicationDirs) dirState = 0;
- } else if (dir == NSAllLibrariesDirectory) {
- curDir = libraryDirs[dirState];
- if (++dirState == numLibraryDirs) dirState = 0;
- }
- }
- if (dirState == 0) domainMask &= ~(1 << curDomain); // If necessary, jump to next domain
- } while ((dirInfo[curDir - 1].invalidDomainMask & (1 << curDomain))); // If invalid, try again...
-
- // Get NEXT_ROOT, if necessary.
- if (domainInfo[curDomain].needsRootPrepended && nextRoot == 0) {
- if (!issetugid() && (nextRoot = getenv("NEXT_ROOT")) != NULL) {
- nextRoot = strdup(nextRoot);
+ if ((i = Index(dir)) < 0 || (domain = ffs(domainMask)) == 0)
+ return 0;
+ domain--; // adjust to zero-based index
+
+ if ((n = dirInfo[i].pathsPerDomain) == 1) {
+ const char * const *p = (const char * const *)dirInfo[i].prefix;
+ for (;;) { // loop, skipping over invalid domains (prefix is NULL)
+ domainMask &= ~(1 << domain);
+ if ((prefix = p[domain]) != NULL) {
+ break;
+ }
+ if ((domain = ffs(domainMask)) == 0) {
+ return 0;
+ }
+ domain--; // adjust to zero-based index
+ }
+ base = (const char *)dirInfo[i].base;
+ state = (dir << DirShift) + domainMask;
+ } else { // multiple paths per domain
+ const char * const **p = (const char * const **)dirInfo[i].prefix;
+ const char * const *b = (const char * const *)dirInfo[i].base;
+ int dirIndex = (state >> IndexShift) & ByteMask;
+
+ if (dirIndex >= n) { // done with the current domain, go to the next
+ domainMask &= ~(1 << domain);
+ if ((domain = ffs(domainMask)) == 0) {
+ return 0;
+ }
+ domain--; // adjust to zero-based index
+ dirIndex = 0;
}
- if (nextRoot == NULL) {
- nextRoot = "";
- }
+ prefix = p[domain][dirIndex];
+ base = b[dirIndex];
+ state = (dir << DirShift) + (++dirIndex << IndexShift) + domainMask;
}
- snprintf(path, PATH_MAX, "%s%s/%s", domainInfo[curDomain].needsRootPrepended ? nextRoot : "", (dirInfo[curDir - 1].alternateDomainMask & (1 << curDomain)) ? domainInfo[curDomain].alternateDomainPath : domainInfo[curDomain].domainPath, dirInfo[curDir - 1].dirPath);
-
- return (dir << 24) + (dirState << 16) + domainMask;
+ if (addNextRoot(prefix)) {
+ if (nextRoot == NULL) { // Get NEXT_ROOT
+ if (!issetugid() && (nextRoot = getenv("NEXT_ROOT")) != NULL) {
+ nextRoot = strdup(nextRoot);
+ }
+ if (nextRoot == NULL) {
+ nextRoot = "";
+ }
+ }
+ strlcpy(path, nextRoot, PATH_MAX);
+ } else {
+ *path = 0;
+ }
+ strlcat(path, prefix, PATH_MAX);
+ strlcat(path, base, PATH_MAX);
+
+ return state;
}
-
-
---- utmpx.c.orig 2008-07-17 11:34:16.000000000 -0700
-+++ utmpx.c 2008-07-17 12:05:58.000000000 -0700
-@@ -49,34 +49,29 @@ __RCSID("$NetBSD: utmpx.c,v 1.21 2003/09
+--- utmpx.c.orig 2009-04-01 04:01:12.000000000 -0700
++++ utmpx.c 2009-04-01 04:09:50.000000000 -0700
+@@ -49,48 +49,57 @@ __RCSID("$NetBSD: utmpx.c,v 1.21 2003/09
#include <sys/time.h>
#include <sys/wait.h>
static char utfile[MAXPATHLEN] = _PATH_UTMPX;
-static char llfile[MAXPATHLEN] = _PATH_LASTLOGX;
+__private_extern__ int utfile_system = 1; /* are we using _PATH_UTMPX? */
++__private_extern__ pthread_mutex_t utmpx_mutex = PTHREAD_MUTEX_INITIALIZER;
-static struct utmpx *utmp_update(const struct utmpx *);
+static struct utmpx *_getutxid(const struct utmpx *);
-static const char vers[] = "utmpx-1.00";
+__private_extern__ const char _utmpx_vers[] = "utmpx-1.00";
- void
- setutxent()
-@@ -85,7 +80,11 @@ setutxent()
+-void
+-setutxent()
++__private_extern__ void
++_setutxent()
+ {
+
(void)memset(&ut, 0, sizeof(ut));
if (fp == NULL)
return;
}
-@@ -105,6 +104,9 @@ endutxent()
- struct utmpx *
- getutxent()
+ void
+-endutxent()
++setutxent()
++{
++ UTMPX_LOCK;
++ _setutxent();
++ UTMPX_UNLOCK;
++}
++
++
++__private_extern__ void
++_endutxent()
+ {
+
+ (void)memset(&ut, 0, sizeof(ut));
+@@ -102,9 +111,21 @@ endutxent()
+ }
+
+
+-struct utmpx *
+-getutxent()
++void
++endutxent()
{
++ UTMPX_LOCK;
++ _endutxent();
++ UTMPX_UNLOCK;
++}
++
++
++static struct utmpx *
++_getutxent()
++{
+#ifdef __LP64__
+ struct utmpx32 ut32;
+#endif /* __LP64__ */
if (fp == NULL) {
struct stat st;
-@@ -124,42 +126,80 @@ getutxent()
+@@ -116,7 +137,8 @@ getutxent()
+ else
+ readonly = 1;
+ }
+-
++
++ fcntl(fileno(fp), F_SETFD, 1); /* set close-on-exec flag */
+
+ /* get file size in order to check if new file */
+ if (fstat(fileno(fp), &st) == -1)
+@@ -124,27 +146,51 @@ getutxent()
if (st.st_size == 0) {
/* new file, add signature record */
fail:
(void)memset(&ut, 0, sizeof(ut));
return NULL;
- }
+@@ -152,14 +198,45 @@ fail:
+
--
struct utmpx *
++getutxent()
++{
++ struct utmpx *ret;
++ UTMPX_LOCK;
++ ret = _getutxent();
++ UTMPX_UNLOCK;
++ return ret;
++}
++
++struct utmpx *
getutxid(const struct utmpx *utx)
{
+ struct utmpx temp;
+ const struct utmpx *ux;
++ struct utmpx *ret;
_DIAGASSERT(utx != NULL);
if (utx->ut_type == EMPTY)
return NULL;
++ UTMPX_LOCK;
+ /* make a copy as needed, and auto-fill if requested */
+ ux = _utmpx_working_copy(utx, &temp, 1);
-+ if (!ux)
++ if (!ux) {
++ UTMPX_UNLOCK;
+ return NULL;
++ }
+
-+ return _getutxid(ux);
++ ret = _getutxid(ux);
++ UTMPX_UNLOCK;
++ return ret;
+}
+
+
do {
if (ut.ut_type == EMPTY)
continue;
-@@ -225,30 +265,68 @@ getutxline(const struct utmpx *utx)
+@@ -193,7 +270,7 @@ getutxid(const struct utmpx *utx)
+ default:
+ return NULL;
+ }
+- } while (getutxent() != NULL);
++ } while (_getutxent() != NULL);
+ return NULL;
+ }
+
+@@ -204,6 +281,7 @@ getutxline(const struct utmpx *utx)
+
+ _DIAGASSERT(utx != NULL);
+
++ UTMPX_LOCK;
+ do {
+ switch (ut.ut_type) {
+ case EMPTY:
+@@ -211,13 +289,16 @@ getutxline(const struct utmpx *utx)
+ case LOGIN_PROCESS:
+ case USER_PROCESS:
+ if (strncmp(ut.ut_line, utx->ut_line,
+- sizeof(ut.ut_line)) == 0)
++ sizeof(ut.ut_line)) == 0) {
++ UTMPX_UNLOCK;
+ return &ut;
++ }
+ break;
+ default:
+ break;
+ }
+- } while (getutxent() != NULL);
++ } while (_getutxent() != NULL);
++ UTMPX_UNLOCK;
+ return NULL;
+ }
+
+@@ -225,156 +306,180 @@ getutxline(const struct utmpx *utx)
struct utmpx *
pututxline(const struct utmpx *utx)
{
- if (strcmp(_PATH_UTMPX, utfile) == 0)
- if ((fp != NULL && readonly) || (fp == NULL && geteuid() != 0))
- return utmp_update(utx);
+-
++ UTMPX_LOCK;
+ if ((ux = _pututxline(utx)) != NULL && utfile_system) {
+ _utmpx_asl(ux); /* the equivalent of wtmpx and lastlogx */
+#ifdef UTMP_COMPAT
+ _write_utmp_compat(ux);
+#endif /* UTMP_COMPAT */
+ }
++ UTMPX_UNLOCK;
+ return ux;
+}
+- (void)memcpy(&temp, utx, sizeof(temp));
+__private_extern__ struct utmpx *
+_pututxline(const struct utmpx *utx)
+{
+#ifdef __LP64__
+ struct utmpx32 ut32;
+#endif /* __LP64__ */
-+ int gotlock = 0;
-
-- (void)memcpy(&temp, utx, sizeof(temp));
++ struct flock fl;
++#define gotlock (fl.l_start >= 0)
++
++ fl.l_start = -1; /* also means we haven't locked */
+ if (utfile_system)
+ if ((fp != NULL && readonly) || (fp == NULL && geteuid() != 0)) {
+ errno = EPERM;
+ }
if (fp == NULL) {
- (void)getutxent();
+- (void)getutxent();
- if (fp == NULL || readonly)
++ (void)_getutxent();
+ if (fp == NULL || readonly) {
+ errno = EPERM;
return NULL;
}
- if (getutxid(&temp) == NULL) {
+- setutxent();
+- if (getutxid(&temp) == NULL) {
+- if (lockf(fileno(fp), F_LOCK, (off_t)0) == -1)
+ /* make a copy as needed, and auto-fill if requested */
+ ux = _utmpx_working_copy(utx, &temp, 0);
+ if (!ux)
+ return NULL;
+
+ if ((x = _getutxid(ux)) == NULL) {
- setutxent();
-- if (getutxid(&temp) == NULL) {
++ _setutxent();
+ if ((x = _getutxid(ux)) == NULL) {
+ /*
+ * utx->ut_type has any original mask bits, while
+ errno = EINVAL;
+ return NULL;
+ }
- if (lockf(fileno(fp), F_LOCK, (off_t)0) == -1)
++ /*
++ * Replace lockf() with fcntl() and a fixed start
++ * value. We should already be at EOF.
++ */
++ if ((fl.l_start = lseek(fileno(fp), 0, SEEK_CUR)) < 0)
++ return NULL;
++ fl.l_len = 0;
++ fl.l_whence = SEEK_SET;
++ fl.l_type = F_WRLCK;
++ if (fcntl(fileno(fp), F_SETLKW, &fl) == -1)
return NULL;
- gotlock++;
-@@ -258,99 +336,66 @@ pututxline(const struct utmpx *utx)
+- gotlock++;
+ if (fseeko(fp, (off_t)0, SEEK_END) == -1)
+ goto fail;
+ }
}
if (!gotlock) {
+ notify_post(UTMPX_CHANGE_NOTIFICATION);
fail:
if (gotlock) {
+- if (lockf(fileno(fp), F_ULOCK, (off_t)0) == -1)
+ int save = errno;
- if (lockf(fileno(fp), F_ULOCK, (off_t)0) == -1)
++ fl.l_type = F_UNLCK;
++ if (fcntl(fileno(fp), F_SETLK, &fl) == -1)
return NULL;
+ errno = save;
}
size_t len;
- _DIAGASSERT(fname != NULL);
++ UTMPX_LOCK;
+ if (fname == NULL) {
+ strcpy(utfile, _PATH_UTMPX);
+ utfile_system = 1;
-+ endutxent();
++ _endutxent();
++ UTMPX_UNLOCK;
+ return 1;
+ }
len = strlen(fname);
-@@ -363,18 +408,17 @@ utmpxname(const char *fname)
+- if (len >= sizeof(utfile))
++ if (len >= sizeof(utfile)) {
++ UTMPX_UNLOCK;
+ return 0;
++ }
+
+ /* must end in x! */
+- if (fname[len - 1] != 'x')
++ if (fname[len - 1] != 'x') {
++ UTMPX_UNLOCK;
+ return 0;
++ }
(void)strlcpy(utfile, fname, sizeof(utfile));
- endutxent();
+- endutxent();
++ _endutxent();
+ utfile_system = 0;
++ UTMPX_UNLOCK;
return 1;
}
(void)memcpy(u->ut_line, ux->ut_line, sizeof(u->ut_line));
(void)memcpy(u->ut_host, ux->ut_host, sizeof(u->ut_host));
u->ut_time = ux->ut_tv.tv_sec;
-@@ -384,109 +428,16 @@ void
+@@ -384,109 +489,16 @@ void
getutmpx(const struct utmp *u, struct utmpx *ux)
{
+++ /dev/null
-/*
- * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-#include <sys/types.h>
-#include <sys/sysctl.h>
-#include <stdbool.h>
-
-static int osi_oid[2] = {-1, 0};
-
-bool
-OSSystemInfo(int selector, unsigned long long *resultp)
-{
- int oid[3];
- size_t size;
-
- /*
- * Check cached OID, look it up if we haven't already.
- *
- * NB. Whilst this isn't strictly thread safe, since the
- * result as written by any thread will be the same
- * there is no actual risk of corruption.
- */
- if (osi_oid[0] == -1) {
- size = 2;
- if (sysctlnametomib("hw.systeminfo", &osi_oid, &size) ||
- (size != 2))
- return(false);
- }
-
- /* build OID */
- oid[0] = osi_oid[0];
- oid[1] = osi_oid[1];
- oid[2] = selector;
-
- /* make the call */
- size = sizeof(*resultp);
- if (sysctl(oid, 3, resultp, &size, NULL, 0) ||
- (size != sizeof(*resultp)))
- return(false);
-
- return(true);
-}
-
/*
- * Copyright (c) 2005, 2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2009 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
+#ifndef VARIANT_DYLD
#include <setjmp.h>
+#endif /* !VARIANT_DYLD */
#include <sys/types.h>
#include <unistd.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#include <asl.h>
+#include <fcntl.h>
+#include <sys/syslog.h>
#include <sys/time.h>
#include <sys/socket.h>
#include <sys/un.h>
typedef struct _SBUF {
BUF b;
+#ifndef VARIANT_DYLD
jmp_buf j;
+#endif /* !VARIANT_DYLD */
} SBUF;
static int asl_socket;
/* private extern exports from asl.c */
const char *_asl_escape(unsigned char);
-int _asl_server_socket(int *, struct sockaddr_un *);
/* flush the buffer */
static void
sold = SBUF_SIZE(b);
snew = (sold + VM_PAGE_SIZE) & ~(VM_PAGE_SIZE - 1);
if(vm_allocate(mach_task_self(), &new, snew, 1) != 0)
+#ifndef VARIANT_DYLD
longjmp(((SBUF *)b)->j, 1); /* out of memory */
+#else /* VARIANT_DYLD */
+ abort(); /* out of memory */
+#endif /* !VARIANT_DYLD */
diff = new - (vm_address_t)b->buf;
memcpy((void *)new, b->buf, sold);
if((intptr_t)(b->buf) & (VM_PAGE_SIZE - 1)) {
int
_simple_vesprintf(_SIMPLE_STRING b, _esc_func esc, const char *fmt, va_list ap)
{
+#ifndef VARIANT_DYLD
if(setjmp(((SBUF *)b)->j))
return -1;
+#endif /* !VARIANT_DYLD */
__simple_bprintf((BUF *)b, esc, fmt, ap);
return 0;
}
*/
int _simple_esappend(_SIMPLE_STRING b, _esc_func esc, const char *str)
{
+#ifndef VARIANT_DYLD
if(setjmp(((SBUF *)b)->j))
return -1;
+#endif /* !VARIANT_DYLD */
put_s((BUF *)b, esc, str);
return 0;
}
socket_init(void)
{
struct sockaddr_un server;
- _asl_server_socket(&asl_socket, &server);
+
+ server.sun_family = AF_UNIX;
+ strncpy(server.sun_path, _PATH_LOG, sizeof(server.sun_path));
+ asl_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
+ if (asl_socket < 0) return;
+
+ fcntl(asl_socket, F_SETFD, 1);
+
+ if (connect(asl_socket, (struct sockaddr *)&server, sizeof(server)) == -1)
+ {
+ close(asl_socket);
+ asl_socket = -1;
+ }
}
void
-.\" Copyright (c) 2005-2008 Apple Inc.
+.\" Copyright (c) 2005-2007 Apple Inc.
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
to set or change the master filter mask,
and that only root may change a per-client remote-control filter mask
for a root (UID 0) process.
+.Pp
+The per-process remote control filter value is kept as a state value
+associated with a key managed by
+.Nm notifyd .
+The key is protected by an access control mechanism that only permits the
+filter value to be accessed and modified by the same effective UID as the
+ASL client at the time that the first ASL connection was created.
+Remote filter control using
+.Nm syslog Fl c
+will fail for processes that change effective UID after starting an ASL connection.
+Those processes should close all ASL client handles and then re-open ASL connections
+if remote filter control support is desired.
.Sh HISTORY
These functions first appeared in
Mac OS X 10.4.
#include <pthread.h>
#include <asl_ipc.h>
-#define ASL_SERVICE_NAME "com.apple.system.logger"
-
#define streq(A, B) (strcmp(A, B) == 0)
#define strcaseeq(A, B) (strcasecmp(A, B) == 0)
+#ifndef ASL_QUERY_OP_FALSE
+#define ASL_QUERY_OP_FALSE 0
+#endif
+
#define forever for(;;)
#define TOKEN_NULL 0
uint32_t notify_register_plain(const char *name, int *out_token);
/* from asl_util.c */
-int _asl_server_socket(int *sock, struct sockaddr_un *server);
int asl_is_utf8(const char *str);
uint8_t *asl_b64_encode(const uint8_t *buf, size_t len);
+/* fork handling in syslog.c */
+extern void _syslog_fork_child();
+
/* character encoding lengths */
static const uint8_t char_encode_len[128] =
{
typedef struct
{
int notify_count;
+ int rc_change_token;
int notify_token;
int master_token;
+ uint64_t proc_filter;
+ uint64_t master_filter;
+ int port_count;
+ mach_port_t server_port;
char *sender;
pthread_mutex_t lock;
+ pthread_mutex_t port_lock;
asl_client_t *asl;
} _asl_global_t;
#ifndef BUILDING_VARIANT
-__private_extern__ _asl_global_t _asl_global = {0, -1, -1, NULL, PTHREAD_MUTEX_INITIALIZER, NULL};
+__private_extern__ _asl_global_t _asl_global = {0, -1, -1, -1, 0LL, 0LL, 0, MACH_PORT_NULL, NULL, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL};
-static mach_port_t asl_server_port = MACH_PORT_NULL;
+#define ASL_SERVICE_NAME "com.apple.system.logger"
-static int
-_asl_connect(asl_client_t *asl)
+/*
+ * Called from the child process inside fork() to clean up
+ * inherited state from the parent process.
+ *
+ * NB. A lock isn't required, since we're single threaded in this call.
+ */
+__private_extern__ void
+_asl_fork_child()
{
- if (asl->sock >= 0) return 0;
+ _asl_global.notify_count = 0;
+ _asl_global.rc_change_token = -1;
+ _asl_global.master_token = -1;
+ _asl_global.notify_token = -1;
+
+ _asl_global.port_count = 0;
+ _asl_global.server_port = MACH_PORT_NULL;
- return _asl_server_socket(&asl->sock, &asl->server);
+ /* clean up in syslog.c */
+ _syslog_fork_child();
}
static int
_asl_notify_open(int do_lock)
{
char *notify_name;
- const char *prefix;
uint32_t status;
+ uint32_t euid;
if (do_lock != 0) pthread_mutex_lock(&_asl_global.lock);
return 0;
}
- notify_name = NULL;
-
- prefix = NOTIFY_PREFIX_USER;
- if (getuid() == 0) prefix = NOTIFY_PREFIX_SYSTEM;
+ if (_asl_global.rc_change_token == -1)
+ {
+ status = notify_register_check(NOTIFY_RC, &_asl_global.rc_change_token);
+ if (status != NOTIFY_STATUS_OK) _asl_global.rc_change_token = -1;
+ }
if (_asl_global.master_token == -1)
{
if (status != NOTIFY_STATUS_OK) _asl_global.master_token = -1;
}
- asprintf(¬ify_name, "%s.%d", prefix, getpid());
+ euid = geteuid();
+ notify_name = NULL;
+ if (euid == 0) asprintf(¬ify_name, "%s.%d", NOTIFY_PREFIX_SYSTEM, getpid());
+ else asprintf(¬ify_name, "user.uid.%d.syslog.%d", euid, getpid());
if (notify_name != NULL)
{
return;
}
+ if (_asl_global.rc_change_token > 0) notify_cancel(_asl_global.rc_change_token);
+ _asl_global.rc_change_token = -1;
+
if (_asl_global.master_token > 0) notify_cancel(_asl_global.master_token);
_asl_global.master_token = -1;
{
char *name, *x;
asl_client_t *asl;
+ kern_return_t kstatus;
asl = (asl_client_t *)calloc(1, sizeof(asl_client_t));
if (asl == NULL)
asl->sock = -1;
- if (asl->options & ASL_OPT_NO_DELAY)
+ pthread_mutex_lock(&(_asl_global.port_lock));
+
+ if (_asl_global.server_port == MACH_PORT_NULL)
{
- if (_asl_connect(asl) < 0)
- {
- free(asl);
- return NULL;
- }
+ _asl_global.port_count = 0;
+
+ kstatus = bootstrap_look_up(bootstrap_port, ASL_SERVICE_NAME, &_asl_global.server_port);
+ if (kstatus == KERN_SUCCESS) _asl_global.port_count = 1;
+ else _asl_global.server_port = MACH_PORT_NULL;
+ }
+ else
+ {
+ _asl_global.port_count++;
}
+ pthread_mutex_unlock(&(_asl_global.port_lock));
+
asl->pid = getpid();
asl->uid = getuid();
asl->gid = getgid();
asl->name = strdup(ident);
if (asl->name == NULL)
{
- close(asl->sock);
+ if (asl->sock >= 0) close(asl->sock);
free(asl);
return NULL;
}
asl->name = strdup(x);
if (asl->name == NULL)
{
- close(asl->sock);
+ if (asl->sock >= 0) close(asl->sock);
free(asl);
return NULL;
}
else asl->facility = strdup(asl_syslog_faciliy_num_to_name(LOG_USER));
if (asl->facility == NULL)
{
- close(asl->sock);
+ if (asl->sock >= 0) close(asl->sock);
free(asl);
return NULL;
}
if (asl == NULL) return;
if (asl->sock >= 0) close(asl->sock);
+
+ pthread_mutex_lock(&(_asl_global.port_lock));
+
+ if (_asl_global.port_count > 0) _asl_global.port_count--;
+ if (_asl_global.port_count == 0)
+ {
+ mach_port_deallocate(mach_task_self(), _asl_global.server_port);
+ _asl_global.server_port = MACH_PORT_NULL;
+ }
+
+ pthread_mutex_unlock(&(_asl_global.port_lock));
+
if (asl->name != NULL) free(asl->name);
if (asl->facility != NULL) free(asl->facility);
if (!(asl->options & ASL_OPT_NO_REMOTE)) _asl_notify_close();
__private_extern__ asl_client_t *
_asl_open_default()
{
+ if (_asl_global.asl != NULL) return _asl_global.asl;
+
pthread_mutex_lock(&_asl_global.lock);
if (_asl_global.asl != NULL)
{
}
static int
-_asl_msg_op_test(uint32_t op, char *q, char *m, uint32_t n)
+_asl_msg_basic_test(uint32_t op, char *q, char *m, uint32_t n)
{
int cmp;
uint32_t t;
t = op & ASL_QUERY_OP_TRUE;
+ /* NULL value from query or message string fails */
+ if ((q == NULL) || (m == NULL)) return (t & ASL_QUERY_OP_NOT_EQUAL);
+
if (op & ASL_QUERY_OP_REGEX)
{
+ /* greater than or less than make no sense in substring search */
+ if ((t == ASL_QUERY_OP_GREATER) || (t == ASL_QUERY_OP_LESS)) return 0;
+
memset(&rex, 0, sizeof(regex_t));
rflags = REG_EXTENDED | REG_NOSUB;
if (op & ASL_QUERY_OP_CASEFOLD) rflags |= REG_ICASE;
- if (regcomp(&rex, q, rflags) != 0) return 0;
+ /* A bad reqular expression matches nothing */
+ if (regcomp(&rex, q, rflags) != 0) return (t & ASL_QUERY_OP_NOT_EQUAL);
+
cmp = regexec(&rex, m, 0, NULL, 0);
regfree(&rex);
+
+ if (t == ASL_QUERY_OP_NOT_EQUAL) return (cmp != 0);
return (cmp == 0);
}
if (op & ASL_QUERY_OP_NUMERIC)
{
- /* We assume the query contains a numeric string */
- if (_asl_isanumber(m) == 0) return 0;
+ if (_asl_isanumber(q) == 0) return (t == ASL_QUERY_OP_NOT_EQUAL);
+ if (_asl_isanumber(m) == 0) return (t == ASL_QUERY_OP_NOT_EQUAL);
nq = atoi(q);
nm = atoi(m);
case ASL_QUERY_OP_LESS: return (nm < nq);
case ASL_QUERY_OP_LESS_EQUAL: return (nm <= nq);
case ASL_QUERY_OP_NOT_EQUAL: return (nm != nq);
- default: return 0;
+ default: return (t == ASL_QUERY_OP_NOT_EQUAL);
}
}
case ASL_QUERY_OP_LESS: return (cmp < 0);
case ASL_QUERY_OP_LESS_EQUAL: return (cmp <= 0);
case ASL_QUERY_OP_NOT_EQUAL: return (cmp != 0);
- default: return 0;
}
- return 0;
+ return (t == ASL_QUERY_OP_NOT_EQUAL);
}
static int
-_asl_msg_test_op_substr(uint32_t op, char *q, char *m)
+_asl_msg_test_substring(uint32_t op, char *q, char *m)
{
- uint32_t i, d, lm, lq;
+ uint32_t t, i, d, lm, lq, match, newop;
- lm = strlen(m);
- lq = strlen(q);
+ t = op & ASL_QUERY_OP_TRUE;
+
+ lm = 0;
+ if (m != NULL) lm = strlen(m);
+
+ lq = 0;
+ if (q != NULL) lq = strlen(q);
+
+ /* NULL is a substring of any string */
+ if (lq == 0) return (t & ASL_QUERY_OP_EQUAL);
- if (lq > lm) return 0;
+ /* A long string is defined to be not equal to a short string */
+ if (lq > lm) return (t == ASL_QUERY_OP_NOT_EQUAL);
+ /* greater than or less than make no sense in substring search */
+ if ((t == ASL_QUERY_OP_GREATER) || (t == ASL_QUERY_OP_LESS)) return 0;
+
+ /*
+ * We scan the string doing an equality test.
+ * If the input test is equality, we stop as soon as we hit a match.
+ * Otherwise we keep scanning the whole message string.
+ */
+ newop = op & 0xff0;
+ newop |= ASL_QUERY_OP_EQUAL;
+
+ match = 0;
d = lm - lq;
for (i = 0; i <= d; i++)
{
- if (_asl_msg_op_test(op, q, m + i, lq) != 0) return 1;
+ if (_asl_msg_basic_test(newop, q, m + i, lq) != 0)
+ {
+ if (t & ASL_QUERY_OP_EQUAL) return 1;
+ match++;
+ }
}
- return 0;
+ /* If the input test was for equality, no matches were found */
+ if (t & ASL_QUERY_OP_EQUAL) return 0;
+
+ /* The input test was for not equal. Return true if no matches were found */
+ return (match == 0);
}
static int
-_asl_msg_test_op_prefix(uint32_t op, char *q, char *m)
+_asl_msg_test_prefix(uint32_t op, char *q, char *m)
{
- uint32_t lm, lq;
+ uint32_t lm, lq, t;
+
+ t = op & ASL_QUERY_OP_TRUE;
+
+ lm = 0;
+ if (m != NULL) lm = strlen(m);
- lm = strlen(m);
- lq = strlen(q);
+ lq = 0;
+ if (q != NULL) lq = strlen(q);
- if (lq > lm) return 0;
+ /* NULL is a prefix of any string */
+ if (lq == 0) return (t & ASL_QUERY_OP_EQUAL);
- return _asl_msg_op_test(op, q, m, lq);
+ /* A long string is defined to be not equal to a short string */
+ if (lq > lm) return (t == ASL_QUERY_OP_NOT_EQUAL);
+
+ /* Compare two equal-length strings */
+ return _asl_msg_basic_test(op, q, m, lq);
}
static int
-_asl_msg_test_op_suffix(uint32_t op, char *q, char *m)
+_asl_msg_test_suffix(uint32_t op, char *q, char *m)
{
- uint32_t lm, lq, d;
+ uint32_t lm, lq, d, t;
+
+ t = op & ASL_QUERY_OP_TRUE;
+
+ lm = 0;
+ if (m != NULL) lm = strlen(m);
+
+ lq = 0;
+ if (q != NULL) lq = strlen(q);
- lm = strlen(m);
- lq = strlen(q);
+ /* NULL is a suffix of any string */
+ if (lq == 0) return (t & ASL_QUERY_OP_EQUAL);
- if (lq > lm) return 0;
+ /* A long string is defined to be not equal to a short string */
+ if (lq > lm) return (t == ASL_QUERY_OP_NOT_EQUAL);
+ /* Compare two equal-length strings */
d = lm - lq;
- return _asl_msg_op_test(op, q, m + d, lq);
+ return _asl_msg_basic_test(op, q, m + d, lq);
}
+/*
+ * Splits out prefix, suffix, and substring tests.
+ * Sends the rest to _asl_msg_basic_test().
+ */
static int
-_asl_msg_test_op(uint32_t op, char *q, char *m)
+_asl_msg_test_expression(uint32_t op, char *q, char *m)
{
uint32_t t;
if (op & ASL_QUERY_OP_PREFIX)
{
- if (op & ASL_QUERY_OP_SUFFIX) return _asl_msg_test_op_substr(op, q, m);
- return _asl_msg_test_op_prefix(op, q, m);
+ if (op & ASL_QUERY_OP_SUFFIX) return _asl_msg_test_substring(op, q, m);
+ return _asl_msg_test_prefix(op, q, m);
}
- if (op & ASL_QUERY_OP_SUFFIX) return _asl_msg_test_op_suffix(op, q, m);
+ if (op & ASL_QUERY_OP_SUFFIX) return _asl_msg_test_suffix(op, q, m);
- return _asl_msg_op_test(op, q, m, 0);
+ return _asl_msg_basic_test(op, q, m, 0);
}
+/*
+ * Special case for comparing time values.
+ * If both inputs are time strings, this compares the time
+ * value in seconds. Otherwise it just does normal matching.
+ */
static int
-_asl_msg_test_time_op(uint32_t op, char *q, char *m)
+_asl_msg_test_time_expression(uint32_t op, char *q, char *m)
{
time_t tq, tm;
- char *vq, *vm;
- struct tm gtime;
- uint32_t t, do_numeric;
- int cmp;
-
- do_numeric = 1;
+ uint32_t t;
- if ((op & ASL_QUERY_OP_PREFIX) || (op & ASL_QUERY_OP_SUFFIX) || (op & ASL_QUERY_OP_REGEX) || (op & ASL_QUERY_OP_CASEFOLD)) do_numeric = 0;
+ if ((op & ASL_QUERY_OP_PREFIX) || (op & ASL_QUERY_OP_SUFFIX) || (op & ASL_QUERY_OP_REGEX)) return _asl_msg_test_expression(op, q, m);
+ if ((q == NULL) || (m == NULL)) return _asl_msg_test_expression(op, q, m);
tq = asl_parse_time(q);
- if (tq < 0) return _asl_msg_test_op(op, q, m);
+ if (tq < 0) return _asl_msg_test_expression(op, q, m);
tm = asl_parse_time(m);
- if (tm < 0) return _asl_msg_test_op(op, q, m);
+ if (tm < 0) return _asl_msg_test_expression(op, q, m);
- if (do_numeric == 1)
+ t = op & ASL_QUERY_OP_TRUE;
+
+ switch (t)
{
- t = op & ASL_QUERY_OP_TRUE;
- switch (t)
+ case ASL_QUERY_OP_FALSE:
{
- case ASL_QUERY_OP_EQUAL:
- if (tm == tq) return 1;
- return 0;
- case ASL_QUERY_OP_GREATER:
- if (tm > tq) return 1;
- return 0;
- case ASL_QUERY_OP_GREATER_EQUAL:
- if (tm >= tq) return 1;
- return 0;
- case ASL_QUERY_OP_LESS:
- if (tm < tq) return 1;
- return 0;
- case ASL_QUERY_OP_LESS_EQUAL:
- if (tm <= tq) return 1;
- return 0;
- case ASL_QUERY_OP_NOT_EQUAL:
- if (tm != tq) return 1;
- return 0;
- default:
- return 0;
+ return 0;
+ }
+ case ASL_QUERY_OP_EQUAL:
+ {
+ if (tm == tq) return 1;
+ return 0;
+ }
+ case ASL_QUERY_OP_GREATER:
+ {
+ if (tm > tq) return 1;
+ return 0;
+ }
+ case ASL_QUERY_OP_GREATER_EQUAL:
+ {
+ if (tm >= tq) return 1;
+ return 0;
+ }
+ case ASL_QUERY_OP_LESS:
+ {
+ if (tm < tq) return 1;
+ return 0;
+ }
+ case ASL_QUERY_OP_LESS_EQUAL:
+ {
+ if (tm <= tq) return 1;
+ return 0;
+ }
+ case ASL_QUERY_OP_NOT_EQUAL:
+ {
+ if (tm != tq) return 1;
+ return 0;
+ }
+ case ASL_QUERY_OP_TRUE:
+ {
+ return 1;
}
-
- return 0;
}
- memset(>ime, 0, sizeof(struct tm));
- gmtime_r(&tq, >ime);
-
- /* Canonical form: YYYY.MM.DD hh:mm:ss UTC */
- vq = NULL;
- asprintf(&vq, "%d.%02d.%02d %02d:%02d:%02d UTC", gtime.tm_year + 1900, gtime.tm_mon + 1, gtime.tm_mday, gtime.tm_hour, gtime.tm_min, gtime.tm_sec);
- if (vq == NULL) return 0;
-
- memset(>ime, 0, sizeof(struct tm));
- gmtime_r(&tm, >ime);
-
- /* Canonical form: YYYY.MM.DD hh:mm:ss UTC */
- vm = NULL;
- asprintf(&vm, "%d.%02d.%02d %02d:%02d:%02d UTC", gtime.tm_year + 1900, gtime.tm_mon + 1, gtime.tm_mday, gtime.tm_hour, gtime.tm_min, gtime.tm_sec);
- if (vm == NULL) return 0;
-
- cmp = _asl_msg_test_op(op, q, m);
-
- free(vq);
- free(vm);
-
- return cmp;
+ /* NOTREACHED */
+ return 0;
}
+/* test a query against a message */
static int
_asl_msg_test(asl_msg_t *q, asl_msg_t *m)
{
- uint32_t i, j;
+ uint32_t i, j, t;
int cmp;
- char *val;
+ /*
+ * Check each simple expression (key op val) separately.
+ * The query suceeds (returns 1) if all simple expressions
+ * succeed (i.e. AND the simple expressions).
+ */
for (i = 0; i < q->count; i++)
{
+ /* Find query key[i] in the message */
j = _asl_msg_index(m, q->key[i]);
- if (j == (uint32_t)-1) return 0;
- if (q->val[i] == NULL) continue;
+ /* NULL op is meaningless, but we allow it to succeed */
if (q->op == NULL) continue;
- if ((q->op[i] & ASL_QUERY_OP_TRUE) == ASL_QUERY_OP_TRUE) continue;
+ /* ASL_QUERY_OP_TRUE tests if key[i] is present in the message */
+ t = q->op[i] & ASL_QUERY_OP_TRUE;
+ if (t == ASL_QUERY_OP_TRUE)
+ {
+ if (j == (uint32_t)-1) return 0;
+ continue;
+ }
- if (m->val[j] == NULL) return 0;
+ /* ASL_QUERY_OP_FALSE tests if the key is NOT present in the message */
+ if (t == ASL_QUERY_OP_FALSE)
+ {
+ if (j != (uint32_t)-1) return 0;
+ continue;
+ }
- val = q->val[i];
+ if (j == (uint32_t)-1)
+ {
+ /* the message does NOT have query key[i] - fail unless we are testing not equal */
+ if (t == ASL_QUERY_OP_NOT_EQUAL) continue;
+ return 0;
+ }
cmp = 1;
if (streq(q->key[i], ASL_KEY_TIME))
{
- cmp = _asl_msg_test_time_op(q->op[i], q->val[i], m->val[j]);
+ cmp = _asl_msg_test_time_expression(q->op[i], q->val[i], m->val[j]);
}
else
{
- cmp = _asl_msg_test_op(q->op[i], val, m->val[j]);
+ cmp = _asl_msg_test_expression(q->op[i], q->val[i], m->val[j]);
}
if (cmp == 0) return 0;
int
asl_send(aslclient ac, aslmsg msg)
{
- char *str, *out_raw, *out;
- uint32_t i, len, level, lmask, outstatus, filter, senderx, facilityx;
+ char *str, *out_raw;
+ caddr_t out;
+ uint32_t i, len, outlen, level, lmask, outstatus, filter, check, senderx, facilityx;
uint64_t v64;
const char *val;
char *name, *x;
int use_global_lock;
asl_msg_t *mt;
char hname[_POSIX_HOST_NAME_MAX];
+ kern_return_t kstatus;
use_global_lock = 0;
asl = (asl_client_t *)ac;
lmask = ASL_FILTER_MASK(level);
- filter = asl->filter;
- rc_filter = 0;
-
if (!(asl->options & ASL_OPT_NO_REMOTE))
{
pthread_mutex_lock(&_asl_global.lock);
- if (_asl_global.notify_token >= 0)
+ if (_asl_global.rc_change_token >= 0)
{
- v64 = 0;
-
- status = notify_get_state(_asl_global.notify_token, &v64);
- if ((status == NOTIFY_STATUS_OK) && (v64 != 0))
+ /* initialize or re-check process-specific and master filters */
+ check = 0;
+ status = notify_check(_asl_global.rc_change_token, &check);
+ if ((status == NOTIFY_STATUS_OK) && (check != 0))
{
- filter = v64;
- rc_filter = 1;
- }
- }
-
- if ((rc_filter == 0) && (_asl_global.master_token >= 0))
- {
- v64 = 0;
+ if (_asl_global.master_token >= 0)
+ {
+ v64 = 0;
+ status = notify_get_state(_asl_global.master_token, &v64);
+ if (status == NOTIFY_STATUS_OK) _asl_global.master_filter = v64;
+ }
- status = notify_get_state(_asl_global.master_token, &v64);
- if ((status == NOTIFY_STATUS_OK) && (v64 != 0))
- {
- filter = v64;
+ if (_asl_global.notify_token >= 0)
+ {
+ v64 = 0;
+ status = notify_get_state(_asl_global.notify_token, &v64);
+ if (status == NOTIFY_STATUS_OK) _asl_global.proc_filter = v64;
+ }
}
}
pthread_mutex_unlock(&_asl_global.lock);
}
+ filter = asl->filter;
+ rc_filter = 0;
+
+ /* master filter overrides local filter */
+ if (_asl_global.master_filter != 0)
+ {
+ filter = _asl_global.master_filter;
+ rc_filter = 1;
+ }
+
+ /* process-specific filter overrides local and master */
+ if (_asl_global.proc_filter != 0)
+ {
+ filter = _asl_global.proc_filter;
+ rc_filter = 1;
+ }
+
/*
* Time, TimeNanoSec, Host, PID, UID, and GID values get set here
*/
}
}
- outstatus = 0;
+ /* Set "ASLOption store" if remote control is active */
+ if (rc_filter != 0)
+ {
+ val = asl_get(msg, ASL_KEY_OPTION);
+ if (val == NULL)
+ {
+ asl_set(msg, ASL_KEY_OPTION, ASL_OPT_STORE);
+ }
+ else
+ {
+ str = NULL;
+ asprintf(&str, "%s %s", ASL_OPT_STORE, val);
+ if (str != NULL)
+ {
+ asl_set(msg, ASL_KEY_OPTION, str);
+ free(str);
+ str = NULL;
+ }
+ }
+ }
+
+ outstatus = -1;
if (use_global_lock != 0) pthread_mutex_lock(&_asl_global.lock);
if ((out_raw != NULL) && (len != 0))
{
- asprintf(&out, "%10u %s\n", len + 1, out_raw);
- if (out != NULL)
+ /* send a mach message to syslogd */
+ outlen = len + 11;
+ kstatus = vm_allocate(mach_task_self(), (vm_address_t *)&out, outlen + 1, TRUE);
+ if (kstatus == KERN_SUCCESS)
{
- if (asl->sock == -1) _asl_connect(asl);
+ memset(out, 0, outlen + 1);
+ snprintf((char *)out, outlen, "%10u %s", len, out_raw);
+
+ status = 0;
- if (asl->sock >= 0)
+ pthread_mutex_lock(&(_asl_global.port_lock));
+
+ if (_asl_global.server_port == MACH_PORT_NULL)
{
- status = write(asl->sock, out, len + 12);
- if (status < 0)
- {
- /* Write failed - try resetting */
- close(asl->sock);
- asl->sock = -1;
- _asl_connect(asl);
- if (asl->sock >= 0) status = write(asl->sock, out, len + 12);
- if (status < 0) outstatus = -1;
- }
+ _asl_global.port_count = 0;
+
+ kstatus = bootstrap_look_up(bootstrap_port, ASL_SERVICE_NAME, &_asl_global.server_port);
+ if (kstatus == KERN_SUCCESS) _asl_global.port_count = 1;
+ else _asl_global.server_port = MACH_PORT_NULL;
}
- else outstatus = -1;
- free(out);
+ pthread_mutex_unlock(&(_asl_global.port_lock));
+
+ if (kstatus == KERN_SUCCESS) kstatus = _asl_server_message(_asl_global.server_port, (caddr_t)out, outlen + 1);
+ else vm_deallocate(mach_task_self(), (vm_address_t)out, outlen + 1);
+
+ if (kstatus == KERN_SUCCESS) outstatus = 0;
}
free(out_raw);
}
}
+ outstatus = 0;
+
+ /* write to file descriptors */
for (i = 0; i < asl->fd_count; i++)
{
if (asl->fd_list[i] < 0) continue;
/*
* Called if there's a malloc error while manipulating a message in asl_set_query.
- * Cleans up the key, kap, and op fields, sets count to zero.
+ * Cleans up the key, val, and op fields, sets count to zero.
*/
static void
_asl_clear_msg(asl_msg_t *msg)
int
asl_set_query(aslmsg a, const char *key, const char *val, uint32_t op)
{
- uint32_t i;
+ uint32_t i, len;
char *dk, *dv;
asl_msg_t *msg;
msg = (asl_msg_t *)a;
if (msg == NULL) return 0;
-
if (key == NULL) return -1;
dv = NULL;
- if (streq(key, ASL_KEY_LEVEL))
+ if ((streq(key, ASL_KEY_MSG)) && (val != NULL))
+ {
+ /* strip trailing newlines */
+ dv = strdup(val);
+ if (dv == NULL) return -1;
+
+ len = strlen(dv);
+ i = len - 1;
+ while ((len > 0) && (dv[i] == '\n'))
+ {
+ dv[i] = '\0';
+ i--;
+ len--;
+ }
+ }
+ else if (streq(key, ASL_KEY_LEVEL))
{
if (val == NULL) return -1;
if (val[0] == '\0') return -1;
}
dk = strdup(key);
- if (dk == NULL) return -1;
+ if (dk == NULL)
+ {
+ if (dv != NULL) free(dv);
+ _asl_clear_msg(msg);
+ return -1;
+ }
msg->key[msg->count] = dk;
msg->val[msg->count] = dv;
asl_search(aslclient ac, aslmsg a)
{
asl_search_result_t query, *out;
- asl_msg_t *qlist[1];
- uint32_t status;
- uint64_t last_id;
+ asl_msg_t *q, *qlist[1];
+ uint32_t status, x;
+ uint64_t last_id, start_id;
asl_store_t *store;
if (a == NULL) return NULL;
+ q = (asl_msg_t *)a;
+
+ /* check for "ASLMessageId >[=] n" and set start_id */
+ start_id = 0;
+ x = _asl_msg_index(q, ASL_KEY_MSG_ID);
+ if ((x != (uint32_t)-1) && (q->val[x] != NULL) && (q->op != NULL) && (q->op[x] & ASL_QUERY_OP_GREATER))
+ {
+ if (q->op[x] & ASL_QUERY_OP_EQUAL) start_id = atoi(q->val[x]);
+ else start_id = atoi(q->val[x]) + 1;
+ }
+
store = NULL;
status = asl_store_open_read(NULL, &store);
if (status != 0) return NULL;
query.count = 1;
query.msg = qlist;
- status = asl_store_match(store, &query, &out, &last_id, 0, 0, 1);
+ status = asl_store_match(store, &query, &out, &last_id, start_id, 0, 1);
asl_store_close(store);
return out;
return -1;
}
-#ifdef ASL_SYSLOG_COMPAT
-
-__private_extern__ void
-asl_syslog_syslog(int pri, const char *fmt, ...)
-{
- va_list ap;
- asl_msg_t *m;
-
- if (fmt == NULL) return;
-
- m = asl_new(ASL_TYPE_MSG);
-
- va_start(ap, fmt);
- asl_vlog(NULL, m, pri, fmt, ap);
- va_end(ap);
-
- asl_free(m);
-}
-
-__private_extern__ void
-asl_syslog_vsyslog(int pri, const char *fmt, va_list ap)
-{
- asl_msg_t *m;
-
- m = asl_new(ASL_TYPE_MSG);
- asl_vlog(NULL, m, pri, fmt, ap);
- asl_free(m);
-}
-
-__private_extern__ void
-asl_syslog_openlog(const char *ident, int flags, int facility)
-{
- const char *fname;
- uint32_t opts;
-
- opts = 0;
-
- if (flags & LOG_NDELAY) opts |= ASL_OPT_NO_DELAY;
- if (flags & LOG_PERROR) opts |= ASL_OPT_STDERR;
-
- fname = asl_syslog_faciliy_num_to_name(facility);
- if (fname == NULL) fname = "user";
-
- asl_global_client = asl_open(ident, fname, opts);
-}
-
-__private_extern__ void
-asl_syslog_closelog()
-{
- asl_close();
-}
-
-__private_extern__ int
-asl_syslog_setlogmask(int p)
-{
- return asl_set_filter(p);
-}
-
-#endif ASL_SYSLOG_COMPAT
-
#endif /* BUILDING_VARIANT */
if (s == NULL) return 0;
l = inlen;
+ if (l == 0)
+ {
+ if (s[0] == '\0') return 0;
+ l = strlen(s);
+ }
len = l;
a = b = 0x9e3779b9;
/*
- * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
*/
#define RECORD_COMMON_LEN 6
#define RECORD_TYPE_LEN 2
-#define RECORD_OFFSET_FLAGS 30
-#define RECORD_OFFSET_RUID 44
-#define RECORD_OFFSET_RGID 48
+#define BUFFER_OFFSET_KVCOUNT 56
#define SCRATCH_BUFFER_SIZE (MSG_RECORD_FIXED_LENGTH + (20 * sizeof(uint64_t)))
if (s == NULL) return ASL_STATUS_INVALID_STORE;
if (s->store == NULL) return ASL_STATUS_INVALID_STORE;
+ if ((off + sizeof(uint32_t)) > s->file_size) return ASL_STATUS_READ_FAILED;
status = fseeko(s->store, off, SEEK_SET);
if (status != 0) return ASL_STATUS_READ_FAILED;
if (s == NULL) return ASL_STATUS_INVALID_STORE;
if (s->store == NULL) return ASL_STATUS_INVALID_STORE;
+ if ((off + sizeof(uint64_t)) > s->file_size) return ASL_STATUS_READ_FAILED;
status = fseeko(s->store, off, SEEK_SET);
if (status != 0) return ASL_STATUS_READ_FAILED;
}
if (s->store != NULL) fclose(s->store);
+ if (s->scratch != NULL) free(s->scratch);
memset(s, 0, sizeof(asl_file_t));
free(s);
return ASL_STATUS_FAILED;
}
- out->file_size = sb.st_size;
-
i = fread(buf, DB_HEADER_LEN, 1, out->store);
if (i < 1)
{
return ASL_STATUS_READ_FAILED;
}
+ /* check cookie */
+ if (strncmp(buf, ASL_DB_COOKIE, ASL_DB_COOKIE_LEN))
+ {
+ asl_file_close(out);
+ return ASL_STATUS_INVALID_STORE;
+ }
+
/* check version */
vers = _asl_get_32(buf + DB_HEADER_VERS_OFFSET);
if (vers != DB_VERSION)
out->dob = _asl_get_64(buf + DB_HEADER_TIME_OFFSET);
out->first = _asl_get_64(buf + DB_HEADER_FIRST_OFFSET);
out->last = _asl_get_64(buf + DB_HEADER_LAST_OFFSET);
+ out->file_size = (size_t)sb.st_size;
+
+ /* detect bogus last pointer */
+ if (out->last >= out->file_size) out->last = 0;
aslstatus = asl_file_read_set_position(out, ASL_FILE_POSITION_LAST);
if (aslstatus != ASL_STATUS_OK)
return ASL_STATUS_READ_FAILED;
}
- out->file_size = ftello(out->store);
+ out->file_size = (size_t)ftello(out->store);
/* scratch buffer for file writes (we test for NULL before using it) */
out->scratch = malloc(SCRATCH_BUFFER_SIZE);
type = htons(ASL_FILE_TYPE_STR);
i = fwrite(&type, sizeof(uint16_t), 1, s->store);
if (i != 1) return ASL_STATUS_WRITE_FAILED;
- s->file_size += sizeof(uint16_t);
/* Length (includes trailing nul) */
x32 = htonl(len + 1);
i = fwrite(&x32, sizeof(uint32_t), 1, s->store);
if (i != 1) return ASL_STATUS_WRITE_FAILED;
- s->file_size += sizeof(uint32_t);
/* String data (nul terminated) */
i = fwrite(str, len + 1, 1, s->store);
if (i != 1) return ASL_STATUS_WRITE_FAILED;
- s->file_size += len;
/* create file_string_t and insert into the cache */
sx = (file_string_t *)calloc(1, offsetof(file_string_t, str) + len + 1);
{
if (s->flags & ASL_FILE_FLAG_PRESERVE_MSG_ID) *mid = atoll(msg->val[i]);
}
+ else if (!strcmp(msg->key[i], ASL_KEY_OPTION))
+ {
+ /* ignore - we don't save ASLOption */
+ }
else
{
status = asl_file_string_encode(s, msg->key[i], &k);
free(kvlist);
kvlist = NULL;
- if (status != 0) return ASL_STATUS_WRITE_FAILED;
-
/* write record at end of file */
status = fseeko(s->store, 0, SEEK_END);
if (status != 0) return ASL_STATUS_WRITE_FAILED;
s->last = (uint64_t)ftello(s->store);
+
v = asl_core_htonq(s->last);
status = fwrite(buf, len, 1, s->store);
status = fseeko(s->store, 0, SEEK_END);
if (status != 0) return ASL_STATUS_WRITE_FAILED;
+ s->file_size = (size_t)ftello(s->store);
+
s->prev = s->last;
return ASL_STATUS_OK;
}
static uint32_t
-asl_file_fetch_object(asl_file_t *s, uint64_t where, char **out)
+asl_file_fetch_object(asl_file_t *s, uint64_t where, char **out, uint32_t *outlen)
{
static char ils[9];
char *p;
uint16_t type;
off_t off;
+ *out = NULL;
+ *outlen = 0;
+
if (s == NULL) return ASL_STATUS_INVALID_STORE;
if (out == NULL) return ASL_STATUS_INVALID_ARG;
if (where == 0) return ASL_STATUS_INVALID_ARG;
- *out = NULL;
-
inls = 0;
x64 = asl_core_htonq(where);
memcpy(&inls, &x64, 1);
if (inls & 0x80)
{
/* inline string */
- memset(ils, 0, sizeof(ils));
inls &= 0x0f;
+ if (inls > 7) return ASL_STATUS_INVALID_STORE;
+
p = 1 + (char *)&x64;
+ memset(ils, 0, sizeof(ils));
memcpy(ils, p, inls);
*out = strdup(ils);
-
if (*out == NULL) return ASL_STATUS_NO_MEMORY;
+
+ *outlen = inls;
return ASL_STATUS_OK;
}
off = where;
+ if ((off + sizeof(uint16_t) + sizeof(uint32_t)) > s->file_size) return ASL_STATUS_READ_FAILED;
+
status = fseeko(s->store, off, SEEK_SET);
if (status != 0) return ASL_STATUS_READ_FAILED;
/* Type */
status = fread(&type, sizeof(uint16_t), 1, s->store);
if (status != 1) return ASL_STATUS_READ_FAILED;
+ off += sizeof(uint16_t);
/* Length */
len = 0;
status = fread(&len, sizeof(uint32_t), 1, s->store);
if (status != 1) return ASL_STATUS_READ_FAILED;
+ off += sizeof(uint32_t);
+
len = ntohl(len);
+ if ((off + len) > s->file_size) return ASL_STATUS_READ_FAILED;
*out = calloc(1, len);
if (*out == NULL) return ASL_STATUS_NO_MEMORY;
return ASL_STATUS_READ_FAILED;
}
+ *outlen = len;
return ASL_STATUS_OK;
}
}
static uint64_t
-asl_file_fetch_helper_str(asl_file_t *s, char **p, aslmsg m, const char *key)
+asl_file_fetch_helper_str(asl_file_t *s, char **p, aslmsg m, const char *key, uint32_t *err)
{
uint64_t out;
char *val;
- uint32_t status;
+ uint32_t status, len;
out = _asl_get_64(*p);
*p += sizeof(uint64_t);
val = NULL;
- status = asl_file_fetch_object(s, out, &val);
+ len = 0;
+ status = ASL_STATUS_OK;
+ if (out != 0) status = asl_file_fetch_object(s, out, &val, &len);
+
+ if (err != NULL) *err = status;
if ((status == ASL_STATUS_OK) && (val != NULL))
{
asl_set(m, key, val);
{
char *buf, *p, *k, *v;
file_record_t r;
- uint32_t i, status;
+ uint32_t i, status, len, buflen, kvn;
uint64_t x64, kv;
aslmsg out;
off_t off;
if ((s->flags & ASL_FILE_FLAG_READ_ONLY) == 0) return ASL_STATUS_WRITE_ONLY;
buf = NULL;
- status = asl_file_fetch_object(s, where, &buf);
- if (buf == NULL) return status;
+ buflen = 0;
+ status = asl_file_fetch_object(s, where, &buf, &buflen);
+ if ((status != ASL_STATUS_OK) || (buf == NULL))
+ {
+ s->cursor = 0;
+ s->cursor_xid = 0;
+ return status;
+ }
+
+ /* check buffer size */
+ kvn = _asl_get_32(buf + BUFFER_OFFSET_KVCOUNT);
+ if (buflen < (MSG_RECORD_FIXED_LENGTH - RECORD_COMMON_LEN + (kvn * sizeof(uint64_t))))
+ {
+ free(buf);
+ s->cursor = 0;
+ s->cursor_xid = 0;
+ return ASL_STATUS_READ_FAILED;
+ }
out = asl_new(ASL_TYPE_MSG);
if (out == NULL) return ASL_STATUS_NO_MEMORY;
r.rgid = asl_file_fetch_helper_32(s, &p, out, ASL_KEY_READ_GID, 1, (uint32_t)-1);
r.refpid = asl_file_fetch_helper_32(s, &p, out, ASL_KEY_REF_PID, 1, 0);
r.kvcount = asl_file_fetch_helper_32(s, &p, NULL, NULL, 0, 0);
- r.host = asl_file_fetch_helper_str(s, &p, out, ASL_KEY_HOST);
- r.sender = asl_file_fetch_helper_str(s, &p, out, ASL_KEY_SENDER);
- r.facility = asl_file_fetch_helper_str(s, &p, out, ASL_KEY_FACILITY);
- r.message = asl_file_fetch_helper_str(s, &p, out, ASL_KEY_MSG);
- r.refproc = asl_file_fetch_helper_str(s, &p, out, ASL_KEY_REF_PROC);
- r.session = asl_file_fetch_helper_str(s, &p, out, ASL_KEY_SESSION);
-
- for (i = 0; i < r.kvcount / 2; i++)
+
+ status = ASL_STATUS_OK;
+ r.host = asl_file_fetch_helper_str(s, &p, out, ASL_KEY_HOST, &status); /* 68 */
+ if (status == ASL_STATUS_OK) r.sender = asl_file_fetch_helper_str(s, &p, out, ASL_KEY_SENDER, &status); /* 76 */
+ if (status == ASL_STATUS_OK) r.facility = asl_file_fetch_helper_str(s, &p, out, ASL_KEY_FACILITY, &status); /* 84 */
+ if (status == ASL_STATUS_OK) r.message = asl_file_fetch_helper_str(s, &p, out, ASL_KEY_MSG, &status); /* 92 */
+ if (status == ASL_STATUS_OK) r.refproc = asl_file_fetch_helper_str(s, &p, out, ASL_KEY_REF_PROC, &status); /* 100 */
+ if (status == ASL_STATUS_OK) r.session = asl_file_fetch_helper_str(s, &p, out, ASL_KEY_SESSION, &status); /* 108 */
+
+ if (status != ASL_STATUS_OK)
+ {
+ asl_free(out);
+ free(buf);
+ s->cursor = 0;
+ s->cursor_xid = 0;
+ return status;
+ }
+
+ kvn = r.kvcount / 2;
+
+ for (i = 0; i < kvn; i++)
{
kv = _asl_get_64(p);
p += sizeof(uint64_t);
k = NULL;
- status = asl_file_fetch_object(s, kv, &k);
+ len = 0;
+ status = asl_file_fetch_object(s, kv, &k, &len);
+ if (status != ASL_STATUS_OK)
+ {
+ asl_free(out);
+ free(buf);
+ s->cursor = 0;
+ s->cursor_xid = 0;
+ return status;
+ }
kv = _asl_get_64(p);
p += sizeof(uint64_t);
v = NULL;
- status = asl_file_fetch_object(s, kv, &v);
+ len = 0;
+
+ if (kv != 0)
+ {
+ status = asl_file_fetch_object(s, kv, &v, &len);
+ if (status != ASL_STATUS_OK)
+ {
+ asl_free(out);
+ free(buf);
+ s->cursor = 0;
+ s->cursor_xid = 0;
+ return status;
+ }
+ }
if ((status == ASL_STATUS_OK) && (k != NULL))
{
}
}
- r.prev = asl_file_fetch_helper_64(s, &p, NULL, NULL);
+ r.prev = asl_file_fetch_helper_64(s, &p, NULL, NULL); /* 116 */
free(buf);
if (s->cursor != 0)
{
off = s->cursor + RECORD_COMMON_LEN + sizeof(uint64_t);
+ if (off > s->file_size)
+ {
+ asl_free(out);
+ s->cursor = 0;
+ s->cursor_xid = 0;
+ /*
+ * Next record offset is past the end of the file.
+ * This is an error, but we allow it to fail quietly
+ * so that the current record fetch succeeds.
+ */
+ return ASL_STATUS_OK;
+ }
+
status = fseeko(s->store, off, SEEK_SET);
- if (status != 0) return ASL_STATUS_READ_FAILED;
+ if (status != 0)
+ {
+ asl_free(out);
+ s->cursor = 0;
+ s->cursor_xid = 0;
+ return ASL_STATUS_READ_FAILED;
+ }
status = fread(&x64, sizeof(uint64_t), 1, s->store);
- if (status != 1) return ASL_STATUS_READ_FAILED;
+ if (status != 1)
+ {
+ asl_free(out);
+ s->cursor = 0;
+ s->cursor_xid = 0;
+ return ASL_STATUS_READ_FAILED;
+ }
s->cursor_xid = asl_core_ntohq(x64);
}
char buf[DB_HEADER_LEN];
off_t off;
asl_legacy1_t *legacy;
+ struct stat sb;
+
+ memset(&sb, 0, sizeof(struct stat));
+ if (stat(path, &sb) != 0) return ASL_STATUS_FAILED;
f = fopen(path, "r");
if (f == NULL)
out->first = _asl_get_64(buf + DB_HEADER_FIRST_OFFSET);
out->last = _asl_get_64(buf + DB_HEADER_LAST_OFFSET);
+ out->file_size = (size_t)sb.st_size;
+
+ /* detect bogus last pointer */
+ if (out->last >= out->file_size) out->last = 0;
out->store = f;
status = asl_file_read_uint64(s, off, &next);
if (status != ASL_STATUS_OK) return status;
+ /* detect bogus next pointer */
+ if (next >= s->file_size) next = 0;
+
if (next == 0)
{
if (s->cursor == 0) return ASL_STATUS_OK;
if (s->cursor == s->last) return ASL_STATUS_NO_RECORDS;
if (s->cursor == 0) return ASL_STATUS_NO_RECORDS;
+ /* set offset to read the "next" field in the current record */
off = s->cursor + RECORD_COMMON_LEN;
}
else return ASL_STATUS_INVALID_ARG;
status = asl_file_read_uint64(s, off, &(s->cursor));
if (status != ASL_STATUS_OK) return ASL_STATUS_READ_FAILED;
+ /* detect bogus next pointer */
+ if (s->cursor >= s->file_size) s->cursor = 0;
+
if (s->cursor == 0) return ASL_STATUS_NO_RECORDS;
/* read ID of the record */
}
else
{
- (*res)->msg = (asl_msg_t **)realloc((*res)->msg, ((*res)->count + 1) * sizeof(asl_msg_t *));
+ (*res)->msg = (asl_msg_t **)reallocf((*res)->msg, ((*res)->count + 1) * sizeof(asl_msg_t *));
if ((*res)->msg == NULL)
{
free(*res);
rescount++;
}
- if (work->list->file->cursor_xid == 0)
+ if ((status != ASL_STATUS_OK) || (work->list->file->cursor_xid == 0))
{
n = work->list->next;
free(work->list);
#define __ASL_FILE_H__
/*
- * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
+/*
+ * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
#include <mach/std_types.defs>
#include <mach/mach_types.defs>
out status : int;
SecToken token : security_token_t
);
+
+simpleroutine _asl_server_message
+(
+ server : mach_port_t;
+ message : ooline_data, dealloc;
+ ServerAuditToken token : audit_token_t
+);
/*
- * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
/*
- * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#define NOTIFY_SYSTEM_ASL_FILTER "com.apple.system.syslog.asl_filter"
#define NOTIFY_PREFIX_SYSTEM "com.apple.system.syslog"
#define NOTIFY_PREFIX_USER "user.syslog"
+#define NOTIFY_RC "com.apple.asl.remote"
#define ASL_MSG_FMT_RAW "raw"
#define ASL_MSG_FMT_STD "std"
#define ASL_KEY_REF_PID "RefPID"
#define ASL_KEY_REF_PROC "RefProc"
+#define ASL_KEY_OPTION "ASLOption"
+
+#define ASL_OPT_IGNORE "ignore"
+#define ASL_OPT_STORE "store"
typedef struct __aslclient
{
/*
- * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
/*
* The ASL Store is organized as a set of files in a common directory.
* Files are prefixed by the date (YYYY.MM.DD) of their contents.
- * There are also files for long-TTL (> 1 day) messages.
*
* Messages with no access controls are saved in YYYY.MM.DD.asl
- * Messages with access limited to UID U are saved in YYYY.MM.DD.uU.asl
- * Messages with access limited to GID G are saved in YYYY.MM.DD.gG.asl
- * Messages with access limited to UID U and GID G are saved in YYYY.MM.DD.uU.gG.asl
+ * Messages with access limited to UID uuu are saved in YYYY.MM.DD.Uuuu.asl
+ * Messages with access limited to GID ggg are saved in YYYY.MM.DD.Gggg.asl
+ * Messages with access limited to UID uuu and GID ggg are saved in YYYY.MM.DD.Uuuu.Gggg.asl
+ *
+ * Messages that have a value for ASLExpireTime are saved in BB.YYYY.MM.DD.asl
+ * where the timestamp is the "Best Before" date of the file. Access controls
+ * are implemented as above with Uuuu and Gggg in the file name. Note that the
+ * Best Before files are for the last day of the month, so a single file contains
+ * messages that expire in that month.
*
* An external tool runs daily and deletes "old" files.
*/
+static time_t
+_asl_start_today()
+{
+ time_t now;
+ struct tm ctm;
+
+ memset(&ctm, 0, sizeof(struct tm));
+ now = time(NULL);
+
+ if (localtime_r((const time_t *)&now, &ctm) == NULL) return 0;
+
+ ctm.tm_sec = 0;
+ ctm.tm_min = 0;
+ ctm.tm_hour = 0;
+
+ return mktime(&ctm);
+}
+
/*
* The base directory contains a data file which stores
* the last record ID.
asl_store_open_write(const char *basedir, asl_store_t **s)
{
asl_store_t *out;
- asl_file_t *db;
struct stat sb;
- uint32_t i, status;
- char *path, *subpath;
- time_t now;
- struct tm ctm;
+ uint32_t i, flags;
+ char *path;
FILE *sd;
uint64_t last_id;
+ time_t start;
if (s == NULL) return ASL_STATUS_INVALID_ARG;
+ start = _asl_start_today();
+ if (start == 0) return ASL_STATUS_FAILED;
+
if (basedir == NULL) basedir = PATH_ASL_STORE;
memset(&sb, 0, sizeof(struct stat));
last_id = 0;
+ /* Create new StoreData file (8 bytes ID + 4 bytes flags) */
+
if (fwrite(&last_id, sizeof(uint64_t), 1, sd) != 1)
{
fclose(sd);
return ASL_STATUS_WRITE_FAILED;
}
+
+ flags = 0;
+ if (fwrite(&flags, sizeof(uint32_t), 1, sd) != 1)
+ {
+ fclose(sd);
+ return ASL_STATUS_WRITE_FAILED;
+ }
}
else
{
last_id = asl_core_ntohq(last_id);
}
- memset(&ctm, 0, sizeof(struct tm));
- now = time(NULL);
-
- if (localtime_r((const time_t *)&now, &ctm) == NULL)
- {
- fclose(sd);
- return ASL_STATUS_FAILED;
- }
-
- subpath = NULL;
- asprintf(&subpath, "%s/%d.%02d.%02d", basedir, ctm.tm_year + 1900, ctm.tm_mon + 1, ctm.tm_mday);
- if (subpath == NULL)
- {
- fclose(sd);
- return ASL_STATUS_NO_MEMORY;
- }
-
- path = NULL;
- asprintf(&path, "%s.asl", subpath);
- free(subpath);
- if (path == NULL)
- {
- fclose(sd);
- return ASL_STATUS_NO_MEMORY;
- }
-
- db = NULL;
- status = asl_file_open_write(path, 0644, 0, 0, &db);
- free(path);
- if ((status != ASL_STATUS_OK) || (db == NULL))
- {
- fclose(sd);
- return ASL_STATUS_FAILED;
- }
-
out = (asl_store_t *)calloc(1, sizeof(asl_store_t));
if (out == NULL)
{
fclose(sd);
- asl_file_close(db);
return ASL_STATUS_NO_MEMORY;
}
if (out->base_dir == NULL)
{
fclose(sd);
- asl_file_close(db);
free(out);
return ASL_STATUS_NO_MEMORY;
}
- ctm.tm_sec = 0;
- ctm.tm_min = 0;
- ctm.tm_hour = 0;
-
- out->start_today = mktime(&ctm);
+ out->start_today = start;
out->start_tomorrow = out->start_today + SECONDS_PER_DAY;
- out->db = db;
out->storedata = sd;
out->next_id = last_id + 1;
asl_store_statistics(asl_store_t *s, aslmsg *msg)
{
aslmsg out;
- char str[256];
if (s == NULL) return ASL_STATUS_INVALID_STORE;
if (msg == NULL) return ASL_STATUS_INVALID_ARG;
out = (aslmsg)calloc(1, sizeof(asl_msg_t));
if (out == NULL) return ASL_STATUS_NO_MEMORY;
- snprintf(str, sizeof(str), "%u", s->db->string_count);
- asl_set(out, "StringCount", str);
+ /* does nothing for now */
*msg = out;
return ASL_STATUS_OK;
s->file_cache[i].path = NULL;
s->file_cache[i].u = -1;
s->file_cache[i].g = -1;
+ s->file_cache[i].bb = 0;
s->file_cache[i].ts = 0;
}
}
if (s->base_dir != NULL) free(s->base_dir);
s->base_dir = NULL;
- asl_file_close(s->db);
asl_store_file_closeall(s);
if (s->storedata != NULL) fclose(s->storedata);
{
char *str;
int semfd;
+ uint64_t xid;
+ uint32_t status;
if (s == NULL) return ASL_STATUS_INVALID_STORE;
if (semfd < 0) return ASL_STATUS_WRITE_FAILED;
+ status = ASL_STATUS_OK;
+
+ /* write the current message ID in the SweepStore file */
+ xid = asl_core_htonq(s->next_id - 1);
+ if (write(semfd, &xid, sizeof(uint64_t)) != sizeof(uint64_t)) status = ASL_STATUS_WRITE_FAILED;
+
close(semfd);
- return ASL_STATUS_OK;
+ return status;
}
/*
* Returns least recently used or unused cache slot.
*/
static uint32_t
-asl_store_file_cache_lru(asl_store_t *s, time_t now)
+asl_store_file_cache_lru(asl_store_t *s, time_t now, uint32_t ignorex)
{
time_t min;
uint32_t i, x;
x = 0;
min = now - FILE_CACHE_TTL;
-
+
for (i = 0; i < FILE_CACHE_SIZE; i++)
{
- if (s->file_cache[i].ts < min)
+ if ((i != ignorex) && (s->file_cache[i].ts < min))
{
asl_file_close(s->file_cache[i].f);
s->file_cache[i].f = NULL;
s->file_cache[i].path = NULL;
s->file_cache[i].u = -1;
s->file_cache[i].g = -1;
- s->file_cache[i].ts = 0;
+ s->file_cache[i].bb = 0;
+ s->file_cache[i].ts = 0;
}
-
+
if (s->file_cache[i].ts < s->file_cache[x].ts) x = i;
}
{
if (s == NULL) return ASL_STATUS_INVALID_STORE;
- asl_store_file_cache_lru(s, time(NULL));
+ asl_store_file_cache_lru(s, time(NULL), FILE_CACHE_SIZE);
return ASL_STATUS_OK;
}
static uint32_t
-asl_store_file_open_write(asl_store_t *s, char *subpath, int32_t ruid, int32_t rgid, asl_file_t **f, time_t now, uint32_t check_cache)
+asl_store_file_open_write(asl_store_t *s, char *tstring, int32_t ruid, int32_t rgid, time_t bb, asl_file_t **f, time_t now, uint32_t check_cache)
{
char *path;
mode_t m;
/* see if the file is already open and in the cache */
for (i = 0; i < FILE_CACHE_SIZE; i++)
{
- if ((s->file_cache[i].u == ruid) && (s->file_cache[i].g == rgid) && (s->file_cache[i].f != NULL))
+ if ((s->file_cache[i].u == ruid) && (s->file_cache[i].g == rgid) && (s->file_cache[i].bb == bb) && (s->file_cache[i].f != NULL))
{
s->file_cache[i].ts = now;
*f = s->file_cache[i].f;
- if (check_cache == 1) asl_store_file_cache_lru(s, now);
+ if (check_cache == 1) asl_store_file_cache_lru(s, now, i);
return ASL_STATUS_OK;
}
}
{
if (rgid == -1)
{
- asprintf(&path, "%s.asl", subpath);
+ asprintf(&path, "%s/%s.asl", s->base_dir, tstring);
}
else
{
g = rgid;
m = 0640;
- asprintf(&path, "%s.G%d.asl", subpath, g);
+ asprintf(&path, "%s/%s.G%d.asl", s->base_dir, tstring, g);
}
}
else
if (rgid == -1)
{
m = 0600;
- asprintf(&path, "%s.U%d.asl", subpath, u);
+ asprintf(&path, "%s/%s.U%d.asl", s->base_dir, tstring, u);
}
else
{
g = rgid;
m = 0640;
- asprintf(&path, "%s.U%d.G%u.asl", subpath, u, g);
+ asprintf(&path, "%s/%s.U%d.G%u.asl", s->base_dir, tstring, u, g);
}
}
return status;
}
- x = asl_store_file_cache_lru(s, now);
+ x = asl_store_file_cache_lru(s, now, FILE_CACHE_SIZE);
if (s->file_cache[x].f != NULL) asl_file_close(s->file_cache[x].f);
if (s->file_cache[x].path != NULL) free(s->file_cache[x].path);
s->file_cache[x].path = path;
s->file_cache[x].u = ruid;
s->file_cache[x].g = rgid;
+ s->file_cache[x].bb = bb;
s->file_cache[x].ts = time(NULL);
*f = out;
uint32_t i;
if (s == NULL) return;
+ if (f == NULL) return;
for (i = 0; i < FILE_CACHE_SIZE; i++)
{
s->file_cache[i].path = NULL;
s->file_cache[i].u = -1;
s->file_cache[i].g = -1;
+ s->file_cache[i].bb = 0;
s->file_cache[i].ts = 0;
return;
}
asl_store_save(asl_store_t *s, aslmsg msg)
{
struct tm ctm;
- time_t t, now;
- char *path, *subpath;
+ time_t msg_time, now, bb;
+ char *path, *tmp_path, *tstring, *scratch;
const char *val;
uid_t ruid;
gid_t rgid;
asl_file_t *f;
- uint32_t status, check_cache;
- asl_store_t *tmp;
+ uint32_t status, check_cache, signal_sweep, len;
uint64_t xid, ftime;
size_t fsize;
now = time(NULL);
+ check_cache = 0;
+ if ((s->last_write + FILE_CACHE_TTL) <= now) check_cache = 1;
+
+ signal_sweep = 0;
+
+ msg_time = 0;
val = asl_get(msg, ASL_KEY_TIME);
- t = 0;
- if (val == NULL) t = now;
- else t = asl_parse_time(val);
+ if (val == NULL) msg_time = now;
+ else msg_time = asl_parse_time(val);
- if (t >= s->start_tomorrow)
+ if (msg_time >= s->start_tomorrow)
{
if (now >= s->start_tomorrow)
{
/* new day begins */
- tmp = NULL;
- status = asl_store_open_write(s->base_dir, &tmp);
- asl_file_close(s->db);
- s->db = NULL;
- if (status != ASL_STATUS_OK)
- {
- fclose(s->storedata);
- free(s->base_dir);
- free(s);
- return status;
- }
-
- s->db = tmp->db;
- s->start_today = tmp->start_today;
- s->start_tomorrow = tmp->start_tomorrow;
- free(tmp->base_dir);
- fclose(tmp->storedata);
- free(tmp);
-
- status = asl_store_signal_sweep(s);
- /* allow this to fail quietly */
+ check_cache = 0;
+ signal_sweep = 1;
+ asl_store_file_closeall(s);
+
+ /*
+ * _asl_start_today should never fail, but if it does,
+ * just push forward one day. That will probably be correct, and if
+ * it isn't, the next message that gets saved will push it ahead again
+ * until we get to the right date.
+ */
+ s->start_today = _asl_start_today();
+ if (s->start_today == 0) s->start_today = s->start_tomorrow;
+
+ s->start_tomorrow = s->start_today + SECONDS_PER_DAY;
}
}
rgid = -1;
if (val != NULL) rgid = atoi(val);
+ bb = 0;
+ val = asl_get(msg, ASL_KEY_EXPIRE_TIME);
+ if (val != NULL)
+ {
+ bb = 1;
+ msg_time = asl_parse_time(val);
+ }
+
if (fseeko(s->storedata, 0, SEEK_SET) != 0) return ASL_STATUS_WRITE_FAILED;
xid = asl_core_htonq(s->next_id);
xid = s->next_id;
s->next_id++;
- check_cache = 0;
- if ((s->last_write + FILE_CACHE_TTL) <= now) check_cache = 1;
-
s->last_write = now;
-
- if ((t >= s->start_today) && (t < s->start_tomorrow) && (ruid == -1) && (rgid == -1))
+
+ if (localtime_r((const time_t *)&msg_time, &ctm) == NULL) return ASL_STATUS_FAILED;
+
+ tstring = NULL;
+ if (bb == 1)
{
- status = asl_file_save(s->db, msg, &xid);
- if (check_cache == 1) asl_store_file_cache_lru(s, now);
- return status;
+ /*
+ * This supports 12 monthy "Best Before" buckets.
+ * We advance the actual expiry time to day zero of the following month.
+ * mktime() is clever enough to know that you actually mean the last day
+ * of the previous month. What we get back from localtime is the last
+ * day of the month in which the message expires, which we use in the name.
+ */
+ ctm.tm_sec = 0;
+ ctm.tm_min = 0;
+ ctm.tm_hour = 0;
+ ctm.tm_mday = 0;
+ ctm.tm_mon += 1;
+
+ bb = mktime(&ctm);
+
+ if (localtime_r((const time_t *)&bb, &ctm) == NULL) return ASL_STATUS_FAILED;
+ asprintf(&tstring, "BB.%d.%02d.%02d", ctm.tm_year + 1900, ctm.tm_mon + 1, ctm.tm_mday);
+ }
+ else
+ {
+ asprintf(&tstring, "%d.%02d.%02d", ctm.tm_year + 1900, ctm.tm_mon + 1, ctm.tm_mday);
}
- if (localtime_r((const time_t *)&t, &ctm) == NULL) return ASL_STATUS_FAILED;
-
- asprintf(&subpath, "%s/%d.%02d.%02d", s->base_dir, ctm.tm_year + 1900, ctm.tm_mon + 1, ctm.tm_mday);
- if (subpath == NULL) return ASL_STATUS_NO_MEMORY;
+ if (tstring == NULL) return ASL_STATUS_NO_MEMORY;
- f = NULL;
- status = asl_store_file_open_write(s, subpath, ruid, rgid, &f, now, check_cache);
- free(subpath);
- subpath = NULL;
+ status = asl_store_file_open_write(s, tstring, ruid, rgid, bb, &f, now, check_cache);
+ free(tstring);
+ tstring = NULL;
if (status != ASL_STATUS_OK) return status;
fsize = asl_file_size(f);
ftime = asl_file_ctime(f);
- /* if file is larger than max_file_size, rename it and create semaphore file in the store */
+ /* if file is larger than max_file_size, rename it and touch semaphore file in the store */
if ((s->max_file_size != 0) && (fsize > s->max_file_size))
{
+ signal_sweep = 1;
status = ASL_STATUS_OK;
path = asl_store_file_path(s, f);
- subpath = NULL;
asl_store_file_close(s, f);
if (path != NULL)
{
- asprintf(&subpath, "%s.%llu", path, ftime);
- if (subpath == NULL)
+ tmp_path = NULL;
+
+ len = strlen(path);
+ if ((len >= 4) && (!strcmp(path + len - 4, ".asl")))
+ {
+ /* rename xxxxxxx.asl to xxxxxxx.timestamp.asl */
+ scratch = strdup(path);
+ if (scratch != NULL)
+ {
+ scratch[len - 4] = '\0';
+ asprintf(&tmp_path, "%s.%llu.asl", scratch, ftime);
+ free(scratch);
+
+ }
+ }
+ else
+ {
+ /* append timestamp */
+ asprintf(&tmp_path, "%s.%llu", path, ftime);
+ }
+
+ if (tmp_path == NULL)
{
status = ASL_STATUS_NO_MEMORY;
}
else
{
- if (rename(path, subpath) != 0) status = ASL_STATUS_FAILED;
- free(subpath);
+ if (rename(path, tmp_path) != 0) status = ASL_STATUS_FAILED;
+ free(tmp_path);
}
free(path);
}
-
- if (status == ASL_STATUS_OK) status = asl_store_signal_sweep(s);
}
+ if (signal_sweep != 0) asl_store_signal_sweep(s);
+
return status;
}
#define __ASL_STORE_H__
/*
- * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
time_t ts;
uid_t u;
gid_t g;
+ time_t bb;
char *path;
asl_file_t *f;
} asl_cached_file_t;
char *base_dir;
FILE *storedata;
uint64_t next_id;
- asl_file_t *db;
asl_cached_file_t file_cache[FILE_CACHE_SIZE];
void *work;
time_t start_today;
#include <errno.h>
#include <unistd.h>
-#define _PATH_ASL_IN "/var/run/asl_input"
-
static uint8_t *b64charset = (uint8_t *)"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
-__private_extern__ int
-_asl_server_socket(int *sock, struct sockaddr_un *server)
-{
- socklen_t len;
- int status, flags;
-
- *sock = socket(AF_UNIX, SOCK_STREAM, 0);
- if (*sock < 0) return -1;
-
- memset(server, 0, sizeof(struct sockaddr_un));
- server->sun_family = AF_UNIX;
-
- strcpy(server->sun_path, _PATH_ASL_IN);
- server->sun_len = strlen(server->sun_path) + 1;
- len = sizeof(server->sun_len) + sizeof(server->sun_family) + server->sun_len;
-
- status = connect(*sock, (const struct sockaddr *)server, len);
-
- if (status < 0)
- {
- close(*sock);
- *sock = -1;
- return -1;
- }
-
- /* set close-on-exec flag */
- fcntl(*sock, F_SETFD, 1);
-
- /* set non-blocking flag */
- flags = fcntl(*sock, F_GETFL, 0);
- if (flags >= 0) fcntl(*sock, F_SETFL, flags | O_NONBLOCK);
-
- return 0;
-}
-
__private_extern__ const char *
_asl_escape(unsigned char c)
{
#include <stdio.h>
#include <stdlib.h>
+extern const char *__crashreporter_info__;
+static const char badasprintf[] =
+ "Assertion failed and asprintf also failed to create full error string";
+
void
__assert_rtn(func, file, line, failedexpr)
const char *func, *file;
int line;
const char *failedexpr;
{
- if (func == NULL)
+ char *str = NULL;
+
+ if (func == NULL) {
(void)fprintf(stderr,
"Assertion failed: (%s), file %s, line %d.\n", failedexpr,
file, line);
- else
+ if (!__crashreporter_info__) {
+ asprintf(&str,
+ "Assertion failed: (%s), file %s, line %d.\n",
+ failedexpr, file, line);
+ __crashreporter_info__ = str ? str : badasprintf;
+ }
+ } else {
(void)fprintf(stderr,
"Assertion failed: (%s), function %s, file %s, line %d.\n",
failedexpr, func, file, line);
+ if (!__crashreporter_info__) {
+ asprintf(&str,
+ "Assertion failed: (%s), function %s, file %s, line %d.\n",
+ failedexpr, func, file, line);
+ __crashreporter_info__ = str ? str : badasprintf;
+ }
+ }
abort();
/* NOTREACHED */
}
/*
- * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <sys/uio.h>
#include <dlfcn.h>
+#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
}
#if __LP64__
-#define _BACKTRACE_FORMAT "%-4d%-35s 0x%016x %s + %u"
+#define _BACKTRACE_FORMAT "%-4d%-35s 0x%016lx %s + %lu"
#define _BACKTRACE_FORMAT_SIZE 82
#else
-#define _BACKTRACE_FORMAT "%-4d%-35s 0x%08x %s + %u"
+#define _BACKTRACE_FORMAT "%-4d%-35s 0x%08lx %s + %lu"
#define _BACKTRACE_FORMAT_SIZE 65
#endif
if (info->dli_sname) {
symbol = info->dli_sname;
} else {
- snprintf(symbuf, sizeof(symbuf), "0x%x", info->dli_saddr);
+ snprintf(symbuf, sizeof(symbuf), "0x%lx", (uintptr_t)info->dli_saddr);
}
return snprintf(buf, size,
_BACKTRACE_FORMAT,
frame,
image,
- addr,
+ (uintptr_t)addr,
symbol,
- addr - info->dli_saddr) + 1;
+ (uintptr_t)addr - (uintptr_t)info->dli_saddr) + 1;
}
char** backtrace_symbols(void* const* buffer, int size) {
.Pp
Setting the environment variable
.Ev COMMAND_MODE
-to the value unix03 causes utility programs to obey the
+to the value unix2003 causes utility programs to obey the
.St -susv3
standards even if doing so would alter the behavior of flags used in 10.3.
.Pp
The value of
.Ev COMMAND_MODE
-is case insensitive and if it is unset or set to something other than legacy or unix03 it behaves as if it were set to unix03.
+is case insensitive and if it is unset or set to something other than legacy or unix2003 it behaves as if it were set to unix2003.
.Sh 32-BIT COMPILATION
Defining
.Dv _NONSTD_SOURCE
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#include <stdio.h> /* for P_tmpdir */
#include <dirhelper_priv.h>
buf[len - 1] = '\0';
free(p);
}
- return (tlen + 1);
+ return (tlen);
case _CS_POSIX_V6_ILP32_OFF32_CFLAGS:
case _CS_XBS5_ILP32_OFF32_CFLAGS: /* legacy */
errno = ENOMEM;
return (CONFSTR_ERR_RET);
}
- if (_dirhelper(DIRHELPER_USER_LOCAL_TEMP, p, PATH_MAX) == NULL)
+ if (_dirhelper(DIRHELPER_USER_LOCAL_TEMP, p, PATH_MAX) == NULL) {
+ /*
+ * If _dirhelper() fails, try TMPDIR and P_tmpdir,
+ * finally failing otherwise.
+ */
+ if ((p = getenv("TMPDIR")) && access(p, W_OK) == 0)
+ goto docopy;
+ if (access(p = P_tmpdir, W_OK) == 0)
+ goto docopy;
return (CONFSTR_ERR_RET);
+ }
goto docopy;
case _CS_DARWIN_USER_CACHE_DIR:
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <sys/time.h>
#define DKTYPENAMES
#include <sys/disklabel.h>
-#include <ufs/ufs/dinode.h>
-#include <ufs/ffs/fs.h>
+/* from ufs/ffs/fs.h */
+#define BBSIZE 8192
+#define SBSIZE 8192
#include <errno.h>
#include <fcntl.h>
#include <vis.h>
#include "un-namespace.h"
+#ifdef __BLOCKS__
+#include <Block.h>
+#endif /* __BLOCKS__ */
#include "libc_private.h"
+#define ERR_EXIT_UNDEF 0
+#ifdef __BLOCKS__
+#define ERR_EXIT_BLOCK 1
+#endif /* __BLOCKS__ */
+#define ERR_EXIT_FUNC 2
+struct _e_err_exit {
+ unsigned int type;
+#ifdef __BLOCKS__
+ union {
+#endif /* __BLOCKS__ */
+ void (*func)(int);
+#ifdef __BLOCKS__
+ void (^block)(int);
+ };
+#endif /* __BLOCKS__ */
+};
+
#ifdef BUILDING_VARIANT
__private_extern__ FILE *_e_err_file; /* file to use for error output */
-__private_extern__ void (*_e_err_exit)(int);
+__private_extern__ struct _e_err_exit _e_err_exit;
__private_extern__ void _e_visprintf(FILE * __restrict, const char * __restrict, va_list);
#else /* !BUILDING_VARIANT */
__private_extern__ FILE *_e_err_file = NULL; /* file to use for error output */
-__private_extern__ void (*_e_err_exit)(int) = NULL;
+__private_extern__ struct _e_err_exit _e_err_exit = {ERR_EXIT_UNDEF};
/*
* zero means pass as is
void
err_set_exit(void (*ef)(int))
{
- _e_err_exit = ef;
+ _e_err_exit.type = ERR_EXIT_FUNC;
+ _e_err_exit.func = ef;
+}
+
+#ifdef __BLOCKS__
+void
+err_set_exit_b(void (^ef)(int))
+{
+ _e_err_exit.type = ERR_EXIT_BLOCK;
+ _e_err_exit.block = Block_copy(ef);
}
+#endif /* __BLOCKS__ */
#endif /* !BUILDING_VARIANT */
__weak_reference(_err, err);
fprintf(_e_err_file, ": ");
}
fprintf(_e_err_file, "%s\n", strerror(code));
- if (_e_err_exit)
- _e_err_exit(eval);
+ if (_e_err_exit.type)
+#ifdef __BLOCKS__
+ if (_e_err_exit.type == ERR_EXIT_BLOCK)
+ _e_err_exit.block(eval);
+ else
+#endif /* __BLOCKS__ */
+ _e_err_exit.func(eval);
exit(eval);
}
if (fmt != NULL)
_e_visprintf(_e_err_file, fmt, ap);
fprintf(_e_err_file, "\n");
- if (_e_err_exit)
- _e_err_exit(eval);
+ if (_e_err_exit.type)
+#ifdef __BLOCKS__
+ if (_e_err_exit.type == ERR_EXIT_BLOCK)
+ _e_err_exit.block(eval);
+ else
+#endif /* __BLOCKS__ */
+ _e_err_exit.func(eval);
exit(eval);
}
+++ /dev/null
-./err.3
\ No newline at end of file
--- /dev/null
+.\" Copyright (c) 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. All advertising materials mentioning features or use of this software
+.\" must display the following acknowledgement:
+.\" This product includes software developed by the University of
+.\" California, Berkeley and its contributors.
+.\" 4. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" From: @(#)err.3 8.1 (Berkeley) 6/9/93
+.\" $FreeBSD: src/lib/libc/gen/err.3,v 1.20 2004/10/04 14:04:37 jkoshy Exp $
+.\"
+.Dd May 20, 2008
+.Dt ERR 3
+.Os
+.Sh NAME
+.Nm err ,
+.Nm verr ,
+.Nm errc ,
+.Nm verrc ,
+.Nm errx ,
+.Nm verrx ,
+.Nm warn ,
+.Nm vwarn ,
+.Nm warnc ,
+.Nm vwarnc ,
+.Nm warnx ,
+.Nm vwarnx ,
+.Nm err_set_exit ,
+#ifdef UNIFDEF_BLOCKS
+.Nm err_set_exit_b ,
+#endif
+.Nm err_set_file
+.Nd formatted error messages
+.Sh LIBRARY
+.Lb libc
+.Sh SYNOPSIS
+.In err.h
+.Ft void
+.Fn err "int eval" "const char *fmt" "..."
+.Ft void
+.Fn err_set_exit "void (*exitf)(int)"
+#ifdef UNIFDEF_BLOCKS
+.Ft void
+.Fn err_set_exit_b "void (^exitb)(int)"
+#endif
+.Ft void
+.Fn err_set_file "void *vfp"
+.Ft void
+.Fn errc "int eval" "int code" "const char *fmt" "..."
+.Ft void
+.Fn errx "int eval" "const char *fmt" "..."
+.Ft void
+.Fn warn "const char *fmt" "..."
+.Ft void
+.Fn warnc "int code" "const char *fmt" "..."
+.Ft void
+.Fn warnx "const char *fmt" "..."
+.In stdarg.h
+.Ft void
+.Fn verr "int eval" "const char *fmt" "va_list args"
+.Ft void
+.Fn verrc "int eval" "int code" "const char *fmt" "va_list args"
+.Ft void
+.Fn verrx "int eval" "const char *fmt" "va_list args"
+.Ft void
+.Fn vwarn "const char *fmt" "va_list args"
+.Ft void
+.Fn vwarnc "int code" "const char *fmt" "va_list args"
+.Ft void
+.Fn vwarnx "const char *fmt" "va_list args"
+.Sh DESCRIPTION
+The
+.Fn err
+and
+.Fn warn
+family of functions display a formatted error message on the standard
+error output, or on another file specified using the
+.Fn err_set_file
+function.
+In all cases, the last component of the program name, a colon character,
+and a space are output.
+If the
+.Fa fmt
+argument is not NULL, the
+.Xr printf 3
+-like formatted error message is output.
+The output is terminated by a newline character.
+.Pp
+The
+.Fn err ,
+.Fn errc ,
+.Fn verr ,
+.Fn verrc ,
+.Fn warn ,
+.Fn warnc ,
+.Fn vwarn ,
+and
+.Fn vwarnc
+functions append an error message obtained from
+.Xr strerror 3
+based on a code or the global variable
+.Va errno ,
+preceded by another colon and space unless the
+.Fa fmt
+argument is
+.Dv NULL .
+.Pp
+In the case of the
+.Fn errc ,
+.Fn verrc ,
+.Fn warnc ,
+and
+.Fn vwarnc
+functions,
+the
+.Fa code
+argument is used to look up the error message.
+.Pp
+The
+.Fn err ,
+.Fn verr ,
+.Fn warn ,
+and
+.Fn vwarn
+functions use the global variable
+.Va errno
+to look up the error message.
+.Pp
+The
+.Fn errx
+and
+.Fn warnx
+functions do not append an error message.
+.Pp
+The
+.Fn err ,
+.Fn verr ,
+.Fn errc ,
+.Fn verrc ,
+.Fn errx ,
+and
+.Fn verrx
+functions do not return, but exit with the value of the argument
+.Fa eval .
+It is recommended that the standard values defined in
+.Xr sysexits 3
+be used for the value of
+.Fa eval .
+The
+.Fn err_set_exit
+function can be used to specify a function which is called before
+.Xr exit 3
+to perform any necessary cleanup; passing a null function pointer for
+.Va exitf
+resets the hook to do nothing.
+#ifdef UNIFDEF_BLOCKS
+The
+.Fn err_set_exit_b
+function is like
+.Fn err_set_exit
+except it takes a block pointer instead of a function pointer.
+.Bd -ragged -offset indent
+Note: The
+.Fn Block_copy
+function (defined in
+.In Blocks.h )
+is used by
+.Fn err_set_exit_b
+to make a copy of the block, especially for the case when a stack-based
+block might go out of scope when the subroutine returns.
+.Ed
+.Pp
+#endif
+The
+.Fn err_set_file
+function sets the output stream used by the other functions.
+Its
+.Fa vfp
+argument must be either a pointer to an open stream
+(possibly already converted to void *)
+or a null pointer
+(in which case the output stream is set to standard error).
+.Sh EXAMPLES
+Display the current errno information string and exit:
+.Bd -literal -offset indent
+if ((p = malloc(size)) == NULL)
+ err(1, NULL);
+if ((fd = open(file_name, O_RDONLY, 0)) == -1)
+ err(1, "%s", file_name);
+.Ed
+.Pp
+Display an error message and exit:
+.Bd -literal -offset indent
+if (tm.tm_hour < START_TIME)
+ errx(1, "too early, wait until %s", start_time_string);
+.Ed
+.Pp
+Warn of an error:
+.Bd -literal -offset indent
+if ((fd = open(raw_device, O_RDONLY, 0)) == -1)
+ warnx("%s: %s: trying the block device",
+ raw_device, strerror(errno));
+if ((fd = open(block_device, O_RDONLY, 0)) == -1)
+ err(1, "%s", block_device);
+.Ed
+.Pp
+Warn of an error without using the global variable
+.Va errno :
+.Bd -literal -offset indent
+error = my_function(); /* returns a value from <errno.h> */
+if (error != 0)
+ warnc(error, "my_function");
+.Ed
+.Sh SEE ALSO
+.Xr exit 3 ,
+.Xr fmtmsg 3 ,
+.Xr printf 3 ,
+.Xr strerror 3 ,
+.Xr sysexits 3
+.Sh HISTORY
+The
+.Fn err
+and
+.Fn warn
+functions first appeared in
+.Bx 4.4 .
+The
+.Fn err_set_exit
+and
+.Fn err_set_file
+functions first appeared in
+.Fx 2.1 .
+The
+.Fn errc
+and
+.Fn warnc
+functions first appeared in
+.Fx 3.0 .
+#ifdef UNIFDEF_BLOCKS
+The
+.Fn err_set_exit_b
+function first appeared in Mac OS X 10.6.
+#endif
return (0);
}
+int
+filesec_unset_property(filesec_t fsec, filesec_property_t property)
+{
+ return filesec_set_property(fsec, property, _FILESEC_UNSET_PROPERTY);
+}
+
int
filesec_query_property(filesec_t fsec, filesec_property_t property, int *validptr)
{
.\" @(#)fts.3 8.5 (Berkeley) 4/16/94
.\" $FreeBSD: src/lib/libc/gen/fts.3,v 1.13 2001/09/20 12:32:45 ru Exp $
.\"
-.Dd April 16, 1994
+.Dd May 20, 2008
.Dt FTS 3
.Os
.Sh NAME
.In fts.h
.Ft FTS *
.Fn fts_open "char * const *path_argv" "int options" "int (*compar)(const FTSENT **, const FTSENT **)"
+.Ft FTS *
+.Fn fts_open_b "char * const *path_argv" "int options" "int (^compar)(const FTSENT **, const FTSENT **)"
.Ft FTSENT *
.Fn fts_read "FTS *ftsp"
.Ft FTSENT *
file hierarchies.
A simple overview is that the
.Fn fts_open
-function returns a
+and
+.Fn fts_open_b
+functions return a
.Dq handle
on a file hierarchy, which is then supplied to
the other
.Ql ..\&
which was not specified as a file name to
.Fn fts_open
+or
+.Fn fts_open_b
(see
.Dv FTS_SEEDOT ) .
.It Dv FTS_DP
The path for the file relative to the root of the traversal.
This path contains the path specified to
.Fn fts_open
+or
+.Fn fts_open_b
as a prefix.
.It Fa fts_pathlen
The length of the string referenced by
.Fa path_argv
for the root paths, and in the order listed in the directory for
everything else.
+.Sh FTS_OPEN_B
+The
+.Fn fts_open_b
+function is like
+.Fn fts_open
+except
+.Fa compar
+is a block pointer instead of a function pointer.
+This block is passed to
+.Xr qsort_b 3
+(whereas
+.Fn fts_open
+passes its function pointer to
+.Xr qsort 3 ) .
+.Bd -ragged -offset indent
+Note: The
+.Fn Block_copy
+function (defined in
+.In Blocks.h )
+is used by
+.Fn fts_open_b
+to make a copy of the block, especially for the case when a stack-based
+block might go out of scope when the subroutine returns.
+.Ed
.Sh FTS_READ
The
.Fn fts_read
.Xr find 1 ,
.Xr chdir 2 ,
.Xr stat 2 ,
-.Xr qsort 3
+.Xr qsort 3 ,
+.Xr qsort_b 3
.Sh STANDARDS
The
.Nm
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2000, 2003, 2005, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#ifdef __BLOCKS__
+#include <Block.h>
+#endif /* __BLOCKS__ */
static FTSENT *fts_alloc(FTS *, char *, int);
static FTSENT *fts_build(FTS *, int);
#define BNAMES 2 /* fts_children, names only */
#define BREAD 3 /* fts_read */
-FTS *
-fts_open(argv, options, compar)
+/* 5653270
+ * For directories containing > 64k subdirectories (or HFS+ with > 64k files
+ * and subdirectories), struct stat's st_nlink (16 bits) will overflow. This
+ * causes the case with FTS_NOSTAT and FTS_PHYSICAL set to prematurely stop
+ * recursing into subdirectories, because of an optimization that expects
+ * st_nlink to be the number of subdirectories (once that number has been
+ * encountered, no further calls to stat should be needed).
+ *
+ * However, on Mac OS X, another optimization largely nullifies the st_nlink
+ * optimization. struct dirent contains d_type, which can distinguish
+ * directories from files without initially calling stat. So stat is only
+ * called on known directories, rather than on other files. With this
+ * optimization, the difference in also using the st_nlink optimization is
+ * pretty minimal (tests show an improvement of a percent or two, probably
+ * due to additional if statement clauses that need to be evaluated).
+ *
+ * So removing the st_nlink optimization code will fix the > 64k subdirectories
+ * problem. And if we replace the multiple if clause logic with a single
+ * switch statement, we can recover the minimal performance lose. We can
+ * go even further and for the case of FTS_NOSTAT and FTS_LOGICAL set, we
+ * can use d_type to also distinguish symbolic links, and so we only need to
+ * call stat on directories and symlinks, not on all files. This provides
+ * a significant performance boost in that special case.
+ */
+/*
+ * The following macros defines values of the dostat variable, which is or-ed
+ * with the value of d_type, and the result used in a switch statement to
+ * determine whether to call stat or not. (We order the macros to minimize
+ * the size of any jump table that the compiler may generate.)
+ */
+#define F_SHIFT 4 /* shift to leave space for d_type */
+#define F_NOSTAT (0 << F_SHIFT) /* don't do any stat's */
+#define F_STATDIRSYM (1 << F_SHIFT) /* only stat directories and symlinks (and unknowns) */
+#define F_ALWAYSSTAT (2 << F_SHIFT) /* always stat */
+#define F_STATDIR (3 << F_SHIFT) /* only stat directories (and unknowns) */
+
+static FTS *
+__fts_open(argv, sp)
char * const *argv;
- register int options;
- int (*compar)();
-{
register FTS *sp;
+{
register FTSENT *p, *root;
register int nitems;
FTSENT *parent, *tmp;
int len;
- /* Options check. */
- if (options & ~FTS_OPTIONMASK) {
- errno = EINVAL;
- return (NULL);
- }
-
- /* Allocate/initialize the stream */
- if ((sp = malloc((u_int)sizeof(FTS))) == NULL)
- return (NULL);
- memset(sp, 0, sizeof(FTS));
- sp->fts_compar = compar;
- sp->fts_options = options;
-
/* Logical walks turn on NOCHDIR; symbolic links are too hard. */
if (ISSET(FTS_LOGICAL))
SET(FTS_NOCHDIR);
* If comparison routine supplied, traverse in sorted
* order; otherwise traverse in the order specified.
*/
- if (compar) {
+ if (sp->fts_compar) {
p->fts_link = root;
root = p;
} else {
}
}
}
- if (compar && nitems > 1)
+ if (sp->fts_compar && nitems > 1)
root = fts_sort(sp, root, nitems);
/*
return (NULL);
}
+FTS *
+fts_open(argv, options, compar)
+ char * const *argv;
+ int options;
+ int (*compar)();
+{
+ register FTS *sp;
+
+ /* Options check. */
+ if (options & ~FTS_OPTIONMASK) {
+ errno = EINVAL;
+ return (NULL);
+ }
+
+ /* Allocate/initialize the stream */
+ if ((sp = malloc((u_int)sizeof(FTS))) == NULL)
+ return (NULL);
+ memset(sp, 0, sizeof(FTS));
+ sp->fts_compar = compar;
+ sp->fts_options = options;
+
+ return __fts_open(argv, sp);
+}
+
+#ifdef __BLOCKS__
+FTS *
+fts_open_b(argv, options, compar)
+ char * const *argv;
+ int options;
+ int (^compar)(const FTSENT **, const FTSENT **);
+{
+ register FTS *sp;
+
+ /* Options check. */
+ if (options & ~FTS_OPTIONMASK) {
+ errno = EINVAL;
+ return (NULL);
+ }
+
+ /* Allocate/initialize the stream */
+ if ((sp = malloc((u_int)sizeof(FTS))) == NULL)
+ return (NULL);
+ memset(sp, 0, sizeof(FTS));
+ sp->fts_compar_b = (int (^)())Block_copy(compar);
+ sp->fts_options = options | FTS_BLOCK_COMPAR;
+
+ return __fts_open(argv, sp);
+}
+#endif /* __BLOCKS__ */
+
static void
fts_load(sp, p)
FTS *sp;
(void)close(sp->fts_rfd);
}
+#ifdef __BLOCKS__
+ /* Free up any block pointer. */
+ if (ISSET(FTS_BLOCK_COMPAR) && sp->fts_compar_b != NULL)
+ Block_release(sp->fts_compar_b);
+#endif /* __BLOCKS__ */
+
/* Free up the stream pointer. */
free(sp);
* and fts_read. There are lots of special cases.
*
* The real slowdown in walking the tree is the stat calls. If FTS_NOSTAT is
- * set and it's a physical walk (so that symbolic links can't be directories),
- * we can do things quickly. First, if it's a 4.4BSD file system, the type
- * of the file is in the directory entry. Otherwise, we assume that the number
- * of subdirectories in a node is equal to the number of links to the parent.
- * The former skips all stat calls. The latter skips stat calls in any leaf
- * directories and for any files after the subdirectories in the directory have
- * been found, cutting the stat calls by about 2/3.
+ * set, we can use d_type to determine if the entry is a directory (or for
+ * logical walks, a directory or symlink) and not call stat for other file
+ * types. This cuts the number of stat calls significantly.
*/
static FTSENT *
fts_build(sp, type)
FTSENT *cur, *tail;
DIR *dirp;
void *adjaddr;
- int cderrno, descend, len, level, maxlen, nlinks, oflag, saved_errno;
+ int cderrno, descend, len, level, maxlen, dostat, oflag, saved_errno;
char *cp;
/* Set current node pointer. */
return (NULL);
}
- /*
- * Nlinks is the number of possible entries of type directory in the
- * directory if we're cheating on stat calls, 0 if we're not doing
- * any stat calls at all, -1 if we're doing stats on everything.
- */
if (type == BNAMES)
- nlinks = 0;
- else if (ISSET(FTS_NOSTAT) && ISSET(FTS_PHYSICAL))
- nlinks = cur->fts_nlink - (ISSET(FTS_SEEDOT) ? 0 : 2);
+ dostat = F_NOSTAT;
+ else if (ISSET(FTS_NOSTAT))
+ dostat = ISSET(FTS_PHYSICAL) ? F_STATDIR : F_STATDIRSYM;
else
- nlinks = -1;
+ dostat = F_ALWAYSSTAT;
#ifdef notdef
- (void)printf("nlinks == %d (cur: %d)\n", nlinks, cur->fts_nlink);
+ (void)printf("dostat == %d\n", dostat);
(void)printf("NOSTAT %d PHYSICAL %d SEEDOT %d\n",
ISSET(FTS_NOSTAT), ISSET(FTS_PHYSICAL), ISSET(FTS_SEEDOT));
#endif
* checking FTS_NS on the returned nodes.
*/
cderrno = 0;
- if (nlinks || type == BREAD)
+ if (dostat || type == BREAD)
if (FCHDIR(sp, dirfd(dirp))) {
- if (nlinks && type == BREAD)
+ if (dostat && type == BREAD)
cur->fts_errno = errno;
cur->fts_flags |= FTS_DONTCHDIR;
descend = 0;
#endif
if (cderrno) {
- if (nlinks) {
+ if (dostat) {
p->fts_info = FTS_NS;
p->fts_errno = cderrno;
} else
p->fts_info = FTS_NSOK;
p->fts_accpath = cur->fts_accpath;
- } else if (nlinks == 0
-#ifdef DT_DIR
- || nlinks > 0 &&
- dp->d_type != DT_DIR && dp->d_type != DT_UNKNOWN
-#endif
- ) {
- p->fts_accpath =
- ISSET(FTS_NOCHDIR) ? p->fts_path : p->fts_name;
- p->fts_info = FTS_NSOK;
} else {
- /* Build a file name for fts_stat to stat. */
- if (ISSET(FTS_NOCHDIR)) {
- p->fts_accpath = p->fts_path;
- memmove(cp, p->fts_name, p->fts_namelen + 1);
- } else
- p->fts_accpath = p->fts_name;
- /* Stat it. */
- p->fts_info = fts_stat(sp, p, 0);
-
- /* Decrement link count if applicable. */
- if (nlinks > 0 && (p->fts_info == FTS_D ||
- p->fts_info == FTS_DC || p->fts_info == FTS_DOT))
- --nlinks;
+ /*
+ * We need to know all file types values that d_type may
+ * be set to. So if that changes, the following needs
+ * to be modified appropriately.
+ */
+ switch(dostat | dp->d_type) {
+ case (F_STATDIR | DT_UNKNOWN):
+ case (F_STATDIR | DT_DIR):
+ case (F_STATDIRSYM | DT_UNKNOWN):
+ case (F_STATDIRSYM | DT_DIR):
+ case (F_STATDIRSYM | DT_LNK):
+ case (F_ALWAYSSTAT | DT_UNKNOWN):
+ case (F_ALWAYSSTAT | DT_FIFO):
+ case (F_ALWAYSSTAT | DT_CHR):
+ case (F_ALWAYSSTAT | DT_DIR):
+ case (F_ALWAYSSTAT | DT_BLK):
+ case (F_ALWAYSSTAT | DT_REG):
+ case (F_ALWAYSSTAT | DT_LNK):
+ case (F_ALWAYSSTAT | DT_SOCK):
+ case (F_ALWAYSSTAT | DT_WHT):
+ /* Build a file name for fts_stat to stat. */
+ if (ISSET(FTS_NOCHDIR)) {
+ p->fts_accpath = p->fts_path;
+ memmove(cp, p->fts_name, p->fts_namelen + 1);
+ } else
+ p->fts_accpath = p->fts_name;
+ /* Stat it. */
+ p->fts_info = fts_stat(sp, p, 0);
+ break;
+ default:
+ /* No stat necessary */
+ p->fts_accpath =
+ ISSET(FTS_NOCHDIR) ? p->fts_path : p->fts_name;
+ p->fts_info = FTS_NSOK;
+ break;
+ }
}
/* We walk in directory order so "ls -f" doesn't get upset. */
}
for (ap = sp->fts_array, p = head; p; p = p->fts_link)
*ap++ = p;
- qsort((void *)sp->fts_array, nitems, sizeof(FTSENT *), sp->fts_compar);
+#ifdef __BLOCKS__
+ if (ISSET(FTS_BLOCK_COMPAR))
+ qsort_b((void *)sp->fts_array, nitems, sizeof(FTSENT *), (int (^)(const void *, const void *))sp->fts_compar_b);
+ else
+#endif /* __BLOCKS__ */
+ qsort((void *)sp->fts_array, nitems, sizeof(FTSENT *), sp->fts_compar);
for (head = *(ap = sp->fts_array); --nitems; ++ap)
ap[0]->fts_link = ap[1];
ap[0]->fts_link = NULL;
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/gen/gethostname.c,v 1.5 2003/08/19 23:01:46 wollman Exp $");
+#include <string.h>
#include <sys/param.h>
#include <sys/sysctl.h>
#include <limits.h>
.\" @(#)getmntinfo.3 8.1 (Berkeley) 6/9/93
.\" $FreeBSD: src/lib/libc/gen/getmntinfo.3,v 1.12 2002/12/19 09:40:21 ru Exp $
.\"
-.Dd June 9, 1993
+.Dd May 15, 2008
.Dt GETMNTINFO 3
.Os
.Sh NAME
.Nm getmntinfo
.Nd get information about mounted file systems
-.Sh LIBRARY
-.Lb libc
.Sh SYNOPSIS
.In sys/param.h
.In sys/ucred.h
.Ft int
.Fn getmntinfo "struct statfs **mntbufp" "int flags"
#ifdef UNIFDEF_LEGACY_64_APIS
+.Sh TRANSITIIONAL SYNOPSIS (NOW DEPRECATED)
.Ft int
-.Fn getmntinfo64 "struct statfs64 **mntbufp" "int flags"
+.br
+.Fn getmntinfo64 "struct statfs64 **mntbufp" "int flags" ;
#endif /* UNIFDEF_LEGACY_64_APIS */
.Sh DESCRIPTION
The
.Ft statfs
structures describing each currently mounted file system (see
.Xr statfs 2 ) .
-#ifdef UNIFDEF_LEGACY_64_APIS
-Likewise, the
-.Fn getmntinfo64
-function
-returns an array of
-.Ft statfs64
-structures describing each currently mounted file system.
-#endif /* UNIFDEF_LEGACY_64_APIS */
.Pp
The
.Fn getmntinfo
passes its
.Fa flags
argument transparently to
+.Xr getfsstat 2 .
#ifdef UNIFDEF_LEGACY_64_APIS
+.Pp
+Like
.Xr getfsstat 2 ,
-while the
-.Fn getmntinfo64
-function
-passes its
-.Fa flags
-argument transparently to
-.Fn getfsstat64 .
-#else /* !UNIFDEF_LEGACY_64_APIS */
-.Xr getfsstat 2 .
+when the macro
+.Dv _DARWIN_FEATURE_64_BIT_INODE
+is defined, the
+.Ft ino_t
+type will be 64-bits (force 64-bit inode mode by defining the
+.Dv _DARWIN_USE_64_BIT_INODE
+macro before including header files).
+This will cause the symbol variant of
+.Fn getmntinfo ,
+with the
+.Fa $INODE64
+suffixes, to be automatically linked in.
+In addition, the
+.Ft statfs
+structure will be the 64-bit inode version.
+If
+.Dv _DARWIN_USE_64_BIT_INODE
+is not defined, both
+.Fn getmntinfo
+and the
+.Ft statfs
+structure will refer to the 32-bit inode versions.
#endif /* UNIFDEF_LEGACY_64_APIS */
.Sh RETURN VALUES
On successful completion,
.Fn getmntinfo
-#ifdef UNIFDEF_LEGACY_64_APIS
-and
-.Fn getmntinfo64
-return a count of the number of elements in the array.
-#else /* !UNIFDEF_LEGACY_64_APIS */
returns a count of the number of elements in the array.
-#endif /* UNIFDEF_LEGACY_64_APIS */
The pointer to the array is stored into
.Fa mntbufp .
.Pp
.Fa mntbufp
will be unmodified, any information previously returned by
.Fn getmntinfo
-#ifdef UNIFDEF_LEGACY_64_APIS
-or
-.Fn getmntinfo64
-#endif /* UNIFDEF_LEGACY_64_APIS */
will be lost.
.Sh ERRORS
The
.Fn getmntinfo
-#ifdef UNIFDEF_LEGACY_64_APIS
-and
-.Fn getmntinfo64
-functions
-#else /* !UNIFDEF_LEGACY_64_APIS */
function
-#endif /* UNIFDEF_LEGACY_64_APIS */
may fail and set errno for any of the errors specified for the library
routines
.Xr getfsstat 2
or
.Xr malloc 3 .
+#ifdef UNIFDEF_LEGACY_64_APIS
+.Sh TRANSITIONAL DESCRIPTION (NOW DEPRECATED)
+The
+.Fn getmntinfo64
+routine is equivalent to its corresponding non-64-suffixed routine,
+when 64-bit inodes are in effect.
+It was added before there was support for the symbol variants, and so is
+now deprecated.
+Instead of using it, set the
+.Dv _DARWIN_USE_64_BIT_INODE
+macro before including header files to force 64-bit inode support.
+.Pp
+The
+.Ft statfs64
+structure used by this deprecated routine is the same as the
+.Ft statfs
+structure when 64-bit inodes are in effect.
+#endif /* UNIFDEF_LEGACY_64_APIS */
.Sh SEE ALSO
.Xr getfsstat 2 ,
.Xr mount 2 ,
.Fn getmntinfo64
functions write the array of structures to an internal static object
#else /* !UNIFDEF_LEGACY_64_APIS */
-function write the array of structures to an internal static object
+function writes the array of structures to an internal static object
#endif /* UNIFDEF_LEGACY_64_APIS */
and returns
a pointer to that object.
static void qprintf(const char *, Char *);
#endif
-int
-glob(pattern, flags, errfunc, pglob)
+static int
+__glob(pattern, pglob)
const char *pattern;
- int flags, (*errfunc)(const char *, int);
glob_t *pglob;
{
const u_char *patnext;
int mb_cur_max = MB_CUR_MAX_L(loc);
patnext = (u_char *) pattern;
- if (!(flags & GLOB_APPEND)) {
+ if (!(pglob->gl_flags & GLOB_APPEND)) {
pglob->gl_pathc = 0;
pglob->gl_pathv = NULL;
- if (!(flags & GLOB_DOOFFS))
+ if (!(pglob->gl_flags & GLOB_DOOFFS))
pglob->gl_offs = 0;
}
- if (flags & GLOB_LIMIT) {
+ if (pglob->gl_flags & GLOB_LIMIT) {
limit = pglob->gl_matchc;
if (limit == 0)
limit = ARG_MAX;
} else
limit = 0;
- pglob->gl_flags = flags & ~GLOB_MAGCHAR;
- pglob->gl_errfunc = errfunc;
pglob->gl_matchc = 0;
bufnext = patbuf;
bufend = bufnext + MAXPATHLEN - 1;
- if (flags & GLOB_NOESCAPE) {
+ if (pglob->gl_flags & GLOB_NOESCAPE) {
memset(&mbs, 0, sizeof(mbs));
while (bufend - bufnext >= mb_cur_max) {
clen = mbrtowc_l(&wc, (const char *)patnext, MB_LEN_MAX, &mbs, loc);
}
*bufnext = EOS;
- if (flags & GLOB_BRACE)
+ if (pglob->gl_flags & GLOB_BRACE)
return globexp1(patbuf, pglob, &limit, loc);
else
return glob0(patbuf, pglob, &limit, loc);
}
+int
+glob(pattern, flags, errfunc, pglob)
+ const char *pattern;
+ int flags, (*errfunc)(const char *, int);
+ glob_t *pglob;
+{
+#ifdef __BLOCKS__
+ pglob->gl_flags = flags & ~(GLOB_MAGCHAR | _GLOB_ERR_BLOCK);
+#else /* !__BLOCKS__ */
+ pglob->gl_flags = flags & ~GLOB_MAGCHAR;
+#endif /* __BLOCKS__ */
+ pglob->gl_errfunc = errfunc;
+ return __glob(pattern, pglob);
+}
+
+#ifdef __BLOCKS__
+int
+glob_b(pattern, flags, errblk, pglob)
+ const char *pattern;
+ int flags, (^errblk)(const char *, int);
+ glob_t *pglob;
+{
+ pglob->gl_flags = flags & ~GLOB_MAGCHAR;
+ pglob->gl_flags |= _GLOB_ERR_BLOCK;
+ pglob->gl_errblk = errblk;
+ return __glob(pattern, pglob);
+}
+#endif /* __BLOCKS__ */
+
/*
* Expand recursively a glob {} pattern. When there is no more expansion
* invoke the standard globbing routine to glob the rest of the magic
if (pglob->gl_errfunc) {
if (g_Ctoc(pathbuf, buf, sizeof(buf), loc))
return (GLOB_ABORTED);
+#ifdef __BLOCKS__
+ if (pglob->gl_flags & _GLOB_ERR_BLOCK) {
+ if (pglob->gl_errblk(buf, errno))
+ return (GLOB_ABORTED);
+ } else
+#endif /* __BLOCKS__ */
if (pglob->gl_errfunc(buf, errno))
return (GLOB_ABORTED);
}
.\" @(#)glob.3 8.3 (Berkeley) 4/16/94
.\" $FreeBSD: src/lib/libc/gen/glob.3,v 1.30 2004/09/01 23:28:27 tjr Exp $
.\"
-.Dd September 1, 2004
+.Dd May 20, 2008
.Dt GLOB 3
.Os
.Sh NAME
.Nm glob ,
+#ifdef UNIFDEF_BLOCKS
+.Nm glob_b ,
+#endif
.Nm globfree
.Nd generate pathnames matching a pattern
-.Sh LIBRARY
-.Lb libc
.Sh SYNOPSIS
.In glob.h
.Ft int
.Fo glob
.Fa "const char *restrict pattern"
.Fa "int flags"
-.Fa "int (*errfunc)(const char *epath, int eerno)"
+.Fa "int (*errfunc)(const char *epath, int errno)"
.Fa "glob_t *restrict pglob"
.Fc
+#ifdef UNIFDEF_BLOCKS
+.Ft int
+.Fo glob_b
+.Fa "const char *restrict pattern"
+.Fa "int flags"
+.Fa "int (^errblk)(const char *epath, int errno)"
+.Fa "glob_t *restrict pglob"
+.Fc
+#endif
.Ft void
.Fo globfree
.Fa "glob_t *pglob"
or
.Fa errfunc
returns zero, the error is ignored.
+#ifdef UNIFDEF_BLOCKS
+.Pp
+The
+.Fn glob_b
+function is like
+.Fn glob
+except that the error callback is a block pointer instead of a function
+pointer.
+#endif
.Pp
The
.Fn globfree
function frees any space associated with
.Fa pglob
from a previous call(s) to
+#ifdef UNIFDEF_BLOCKS
+.Fn glob
+or
+.Fn glob_b .
+#else
.Fn glob .
+#endif
.Sh RETURN VALUES
On successful completion,
.Fn glob
+#ifdef UNIFDEF_BLOCKS
+and
+.Fn glob_b
+return zero.
+#else
returns zero.
+#endif
In addition, the fields of
.Fa pglob
contain the values described below:
contains the total number of matched pathnames so far.
This includes other matches from previous invocations of
.Fn glob
+#ifdef UNIFDEF_BLOCKS
+or
+.Fn glob_b
+#endif
if
.Dv GLOB_APPEND
was specified.
.It Fa gl_matchc
contains the number of matched pathnames in the current invocation of
+#ifdef UNIFDEF_BLOCKS
+.Fn glob
+or
+.Fn glob_b .
+#else
.Fn glob .
+#endif
.It Fa gl_flags
contains a copy of the
.Fa flags
.Pp
If
.Fn glob
+#ifdef UNIFDEF_BLOCKS
+or
+.Fn glob_b
+#endif
terminates due to an error, it sets errno and returns one of the
following non-zero constants, which are defined in the include
file
.Sh CAVEATS
The
.Fn glob
-function will not match filenames that begin with a period
+#ifdef UNIFDEF_BLOCKS
+and
+.Fn glob_b
+functions
+#else
+function
+#endif
+will not match filenames that begin with a period
unless this is specifically requested (e.g., by ".*").
.Sh SEE ALSO
.Xr sh 1 ,
.Fn globfree
functions first appeared in
.Bx 4.4 .
+#ifdef UNIFDEF_BLOCKS
+The
+.Fn glob_b
+function first appeared in Mac OS X 10.6.
+#endif
.Sh BUGS
Patterns longer than
.Dv MAXPATHLEN
.Pp
The
.Fn glob
-argument
+#ifdef UNIFDEF_BLOCKS
+and
+.Fn glob_b
+functions
+#else
+function
+#endif
may fail and set errno for any of the errors specified for the
library routines
.Xr stat 2 ,
#include <unistd.h>
#include <sys/filio.h>
#include <sys/conf.h>
+#include <sys/ioctl.h>
#include <errno.h>
int
+++ /dev/null
-.\" Copyright (c) 2003 David Schultz <dschultz@uclink.Berkeley.EDU>
-.\" All rights reserved.
-.\"
-.\" Redistribution and use in source and binary forms, with or without
-.\" modification, are permitted provided that the following conditions
-.\" are met:
-.\" 1. Redistributions of source code must retain the above copyright
-.\" notice, this list of conditions and the following disclaimer.
-.\" 2. Redistributions in binary form must reproduce the above copyright
-.\" notice, this list of conditions and the following disclaimer in the
-.\" documentation and/or other materials provided with the distribution.
-.\"
-.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-.\" SUCH DAMAGE.
-.\"
-.\" $FreeBSD: src/lib/libc/gen/isgreater.3,v 1.2 2003/06/01 19:19:59 ru Exp $
-.\"
-.Dd February 12, 2003
-.Dt ISGREATER 3
-.Os
-.Sh NAME
-.Nm isgreater , isgreaterequal , isless , islessequal ,
-.Nm islessgreater , isunordered
-.Nd "compare two floating-point numbers"
-.Sh LIBRARY
-.Lb libc
-.Sh SYNOPSIS
-.In math.h
-.Ft int
-.Fn isgreater "real-floating x" "real-floating y"
-.Ft int
-.Fn isgreaterequal "real-floating x" "real-floating y"
-.Ft int
-.Fn isless "real-floating x" "real-floating y"
-.Ft int
-.Fn islessequal "real-floating x" "real-floating y"
-.Ft int
-.Fn islessgreater "real-floating x" "real-floating y"
-.Ft int
-.Fn isunordered "real-floating x" "real-floating y"
-.Sh DESCRIPTION
-Each of the macros
-.Fn isgreater ,
-.Fn isgreaterequal ,
-.Fn isless ,
-.Fn islessequal ,
-and
-.Fn islessgreater
-takes arguments
-.Fa x
-and
-.Fa y
-and returns a non-zero value if and only if its nominal
-relation on
-.Fa x
-and
-.Fa y
-is true.
-These macros always return zero if either
-argument is not a number (NaN), but unlike the corresponding C
-operators, they never raise a floating point exception.
-.Pp
-The
-.Fn isunordered
-macro takes arguments
-.Fa x
-and
-.Fa y ,
-returning non-zero if and only if neither
-.Fa x
-nor
-.Fa y
-are NaNs.
-For any pair of floating-point values, one
-of the relationships (less, greater, equal, unordered) holds.
-.Sh SEE ALSO
-.Xr fpclassify 3 ,
-.Xr math 3 ,
-.Xr signbit 3
-.Sh STANDARDS
-The
-.Fn isgreater ,
-.Fn isgreaterequal ,
-.Fn isless ,
-.Fn islessequal ,
-.Fn islessgreater ,
-and
-.Fn isunordered
-macros conform to
-.St -isoC-99 .
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
--- /dev/null
+/*
+ * Copyright (c) 1999, 2006, 2008 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/* Author: Bertrand Serlet, August 1999 */
+
+/*
+ Multithread enhancements for "tiny" allocations introduced February 2008.
+ These are in the spirit of "Hoard". See:
+ Berger, E.D.; McKinley, K.S.; Blumofe, R.D.; Wilson, P.R. (2000).
+ "Hoard: a scalable memory allocator for multithreaded applications".
+ ACM SIGPLAN Notices 35 (11): 117-128. Berger2000.
+ <http://portal.acm.org/citation.cfm?id=356989.357000>
+ Retrieved on 2008-02-22.
+*/
+
+/* gcc -g -O3 magazine_malloc.c malloc.c -o libmagmalloc.dylib -I. \
+ -I/System/Library/Frameworks/System.framework/PrivateHeaders/ -funit-at-a-time \
+ -dynamiclib -Wall -arch x86_64 -arch i386 -arch ppc */
+
+#include "scalable_malloc.h"
+#include "malloc_printf.h"
+#include "_simple.h"
+#include "magmallocProvider.h"
+
+#include <pthread_internals.h> /* for pthread_lock_t SPI */
+#include <pthread.h> /* for pthread API */
+
+#include <stdint.h>
+#include <unistd.h>
+#include <mach/vm_statistics.h>
+#include <mach/mach_init.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/sysctl.h>
+#include <libkern/OSAtomic.h>
+#include <mach-o/dyld.h> /* for NSVersionOfLinkTimeLibrary() */
+
+/********************* DEFINITIONS ************************/
+
+#define DEBUG_MALLOC 0 // set to one to debug malloc itself
+
+#define DEBUG_CLIENT 0 // set to one to debug malloc client
+
+#if DEBUG_MALLOC
+#warning DEBUG_MALLOC ENABLED
+# define INLINE
+# define ALWAYSINLINE
+# define CHECK_MAGAZINE_PTR_LOCKED(szone, mag_ptr, fun) \
+do { \
+ if (__is_threaded && TRY_LOCK(mag_ptr->magazine_lock)) { \
+ malloc_printf("*** magazine_lock was not set %p in %s\n", \
+ mag_ptr->magazine_lock, fun); \
+ } \
+} while (0)
+#else
+# define INLINE __inline__
+# define ALWAYSINLINE __attribute__((always_inline))
+# define CHECK_MAGAZINE_PTR_LOCKED(szone, mag_ptr, fun) {}
+#endif
+
+# define NOINLINE __attribute__((noinline))
+
+#if defined(__i386__) || defined(__x86_64__)
+#define CACHE_ALIGN __attribute__ ((aligned (128) )) /* Future-proofing at 128B */
+#elif defined(__ppc__) || defined(__ppc64__)
+#define CACHE_ALIGN __attribute__ ((aligned (128) ))
+#else
+#define CACHE_ALIGN
+#endif
+
+/*
+ * Access to global variables is slow, so optimise our handling of vm_page_size
+ * and vm_page_shift.
+ */
+#define _vm_page_size vm_page_size /* to get to the originals */
+#define _vm_page_shift vm_page_shift
+#define vm_page_size 4096 /* our normal working sizes */
+#define vm_page_shift 12
+
+/*
+ * msize - a type to refer to the number of quanta of a tiny or small
+ * allocation. A tiny block with an msize of 3 would be 3 << SHIFT_TINY_QUANTUM
+ * bytes in size.
+ */
+typedef unsigned short msize_t;
+
+typedef union {
+ void *p;
+ uintptr_t u;
+} ptr_union;
+
+typedef struct {
+ ptr_union previous;
+ ptr_union next;
+} free_list_t;
+
+typedef unsigned int grain_t; // N.B. wide enough to index all free slots
+
+typedef int mag_index_t;
+
+#define CHECK_REGIONS (1 << 31)
+
+#define MAX_RECORDER_BUFFER 256
+
+/********************* DEFINITIONS for tiny ************************/
+
+/*
+ * Memory in the Tiny range is allocated from regions (heaps) pointed to by the
+ * szone's hashed_regions pointer.
+ *
+ * Each region is laid out as a heap, followed by a header block, all within
+ * a 1MB (2^20) block. This means there are 64520 16-byte blocks and the header
+ * is 16138 bytes, making the total 1048458 bytes, leaving 118 bytes unused.
+ *
+ * The header block is arranged as in struct tiny_region defined just below, and
+ * consists of two bitfields (or bit arrays) interleaved 32 bits by 32 bits.
+ *
+ * Each bitfield comprises NUM_TINY_BLOCKS bits, and refers to the corresponding
+ * TINY_QUANTUM block within the heap.
+ *
+ * The bitfields are used to encode the state of memory within the heap. The header bit indicates
+ * that the corresponding quantum is the first quantum in a block (either in use or free). The
+ * in-use bit is set for the header if the block has been handed out (allocated). If the header
+ * bit is not set, the in-use bit is invalid.
+ *
+ * The szone maintains an array of NUM_TINY_SLOTS freelists, each of which is used to hold
+ * free objects of the corresponding quantum size.
+ *
+ * A free block is laid out depending on its size, in order to fit all free
+ * blocks in 16 bytes, on both 32 and 64 bit platforms. One quantum blocks do
+ * not store their size in the block, instead relying on the header information
+ * to determine their size. Blocks of two or more quanta have room to store
+ * their size in the block, and store it both after the 'next' pointer, and in
+ * the last 2 bytes of the block.
+ *
+ * 1-quantum block
+ * Offset (32-bit mode) (64-bit mode)
+ * 0x0 0x0 : previous
+ * 0x4 0x08 : next
+ * end end
+ *
+ * >1-quantum block
+ * Offset (32-bit mode) (64-bit mode)
+ * 0x0 0x0 : previous
+ * 0x4 0x08 : next
+ * 0x8 0x10 : size (in quantum counts)
+ * end - 2 end - 2 : size (in quantum counts)
+ * end end
+ *
+ * All fields are pointer-sized, except for the size which is an unsigned short.
+ *
+ */
+
+#define SHIFT_TINY_QUANTUM 4 // Required for AltiVec
+#define TINY_QUANTUM (1 << SHIFT_TINY_QUANTUM)
+
+#define FOLLOWING_TINY_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_TINY_QUANTUM))
+
+#ifdef __LP64__
+#define NUM_TINY_SLOTS 64 // number of slots for free-lists
+#else
+#define NUM_TINY_SLOTS 32 // number of slots for free-lists
+#endif
+
+#define NUM_TINY_BLOCKS 64520
+#define SHIFT_TINY_CEIL_BLOCKS 16 // ceil(log2(NUM_TINY_BLOCKS))
+#define NUM_TINY_CEIL_BLOCKS (1 << SHIFT_TINY_CEIL_BLOCKS)
+#define TINY_BLOCKS_ALIGN (SHIFT_TINY_CEIL_BLOCKS + SHIFT_TINY_QUANTUM) // 20
+
+/*
+ * Enough room for the data, followed by the bit arrays (2-bits per block)
+ * plus rounding to the nearest page.
+ */
+#define CEIL_NUM_TINY_BLOCKS_WORDS (((NUM_TINY_BLOCKS + 31) & ~31) >> 5)
+#define TINY_METADATA_SIZE (sizeof(region_trailer_t) + sizeof(tiny_header_inuse_pair_t) * CEIL_NUM_TINY_BLOCKS_WORDS)
+#define TINY_REGION_SIZE \
+ ((NUM_TINY_BLOCKS * TINY_QUANTUM + TINY_METADATA_SIZE + vm_page_size - 1) & ~ (vm_page_size - 1))
+
+#define TINY_METADATA_START (NUM_TINY_BLOCKS * TINY_QUANTUM)
+
+/*
+ * Beginning and end pointers for a region's heap.
+ */
+#define TINY_REGION_ADDRESS(region) ((void *)(region))
+#define TINY_REGION_END(region) ((void *)(((uintptr_t)(region)) + (NUM_TINY_BLOCKS * TINY_QUANTUM)))
+
+/*
+ * Locate the heap base for a pointer known to be within a tiny region.
+ */
+#define TINY_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << TINY_BLOCKS_ALIGN) - 1)))
+
+/*
+ * Convert between byte and msize units.
+ */
+#define TINY_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_TINY_QUANTUM)
+#define TINY_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_TINY_QUANTUM)
+
+#ifdef __LP64__
+# define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[8])
+#else
+# define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[4])
+#endif
+#define TINY_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
+
+/*
+ * Layout of a tiny region
+ */
+typedef uint32_t tiny_block_t[4]; // assert(TINY_QUANTUM == sizeof(tiny_block_t))
+
+typedef struct tiny_header_inuse_pair
+{
+ uint32_t header;
+ uint32_t inuse;
+} tiny_header_inuse_pair_t;
+
+typedef struct region_trailer
+{
+ struct region_trailer *prev;
+ struct region_trailer *next;
+ boolean_t recirc_suitable;
+ unsigned bytes_used;
+ mag_index_t mag_index;
+} region_trailer_t;
+
+typedef struct tiny_region
+{
+ tiny_block_t blocks[NUM_TINY_BLOCKS];
+
+ region_trailer_t trailer;
+
+ // The interleaved bit arrays comprising the header and inuse bitfields.
+ // The unused bits of each component in the last pair will be initialized to sentinel values.
+ tiny_header_inuse_pair_t pairs[CEIL_NUM_TINY_BLOCKS_WORDS];
+
+ uint8_t pad[TINY_REGION_SIZE - (NUM_TINY_BLOCKS * sizeof(tiny_block_t)) - TINY_METADATA_SIZE];
+} *tiny_region_t;
+
+/*
+ * Per-region meta data for tiny allocator
+ */
+#define REGION_TRAILER_FOR_TINY_REGION(r) (&(((tiny_region_t)(r))->trailer))
+#define MAGAZINE_INDEX_FOR_TINY_REGION(r) (REGION_TRAILER_FOR_TINY_REGION(r)->mag_index)
+#define BYTES_USED_FOR_TINY_REGION(r) (REGION_TRAILER_FOR_TINY_REGION(r)->bytes_used)
+
+/*
+ * Locate the block header for a pointer known to be within a tiny region.
+ */
+#define TINY_BLOCK_HEADER_FOR_PTR(_p) ((void *)&(((tiny_region_t)TINY_REGION_FOR_PTR(_p))->pairs))
+
+/*
+ * Locate the inuse map for a given block header pointer.
+ */
+#define TINY_INUSE_FOR_HEADER(_h) ((void *)&(((tiny_header_inuse_pair_t *)(_h))->inuse))
+
+/*
+ * Compute the bitmap index for a pointer known to be within a tiny region.
+ */
+#define TINY_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_TINY_QUANTUM) & (NUM_TINY_CEIL_BLOCKS - 1))
+
+#define TINY_CACHE 1 // This governs a last-free cache of 1 that bypasses the free-list
+
+#if ! TINY_CACHE
+#warning TINY_CACHE turned off
+#endif
+
+#define TINY_REGION_PAYLOAD_BYTES (NUM_TINY_BLOCKS * TINY_QUANTUM)
+
+/********************* DEFINITIONS for small ************************/
+
+/*
+ * Memory in the Small range is allocated from regions (heaps) pointed to by the szone's hashed_regions
+ * pointer.
+ *
+ * Each region is laid out as a heap, followed by the metadata array, all within an 8MB (2^23) block.
+ * The array is arranged as an array of shorts, one for each SMALL_QUANTUM in the heap.
+ * This means there are 16320 512-blocks and the array is 16320*2 bytes, which totals 8388480, leaving
+ * 128 bytes unused.
+ *
+ * The MSB of each short is set for the first quantum in a free block. The low 15 bits encode the
+ * block size (in SMALL_QUANTUM units), or are zero if the quantum is not the first in a block.
+ *
+ * The szone maintains an array of 32 freelists, each of which is used to hold free objects
+ * of the corresponding quantum size.
+ *
+ * A free block is laid out as:
+ *
+ * Offset (32-bit mode) (64-bit mode)
+ * 0x0 0x0 : previous
+ * 0x4 0x08 : next
+ * 0x8 0x10 : size (in quantum counts)
+ * end - 2 end - 2 : size (in quantum counts)
+ * end end
+ *
+ * All fields are pointer-sized, except for the size which is an unsigned short.
+ *
+ */
+
+#define SMALL_IS_FREE (1 << 15)
+
+#define SHIFT_SMALL_QUANTUM (SHIFT_TINY_QUANTUM + 5) // 9
+#define SMALL_QUANTUM (1 << SHIFT_SMALL_QUANTUM) // 512 bytes
+
+#define FOLLOWING_SMALL_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_SMALL_QUANTUM))
+
+/*
+ * The number of slots in the free-list for small blocks. To avoid going to
+ * vm system as often on large memory machines, increase the number of free list
+ * spots above some amount of RAM installed in the system.
+ */
+#define NUM_SMALL_SLOTS 32
+#define NUM_SMALL_SLOTS_LARGEMEM 256
+#define SMALL_BITMAP_WORDS 8
+
+/*
+ * We can only represent up to 1<<15 for msize; but we choose to stay even below that to avoid the
+ * convention msize=0 => msize = (1<<15)
+ */
+#define NUM_SMALL_BLOCKS 16320
+#define SHIFT_SMALL_CEIL_BLOCKS 14 // ceil(log2(NUM_SMALL_BLOCKs))
+#define NUM_SMALL_CEIL_BLOCKS (1 << SHIFT_SMALL_CEIL_BLOCKS)
+#define SMALL_BLOCKS_ALIGN (SHIFT_SMALL_CEIL_BLOCKS + SHIFT_SMALL_QUANTUM) // 23
+
+#define SMALL_METADATA_SIZE (sizeof(region_trailer_t) + NUM_SMALL_BLOCKS * sizeof(msize_t))
+#define SMALL_REGION_SIZE \
+ ((NUM_SMALL_BLOCKS * SMALL_QUANTUM + SMALL_METADATA_SIZE + vm_page_size - 1) & ~ (vm_page_size - 1))
+
+#define SMALL_METADATA_START (NUM_SMALL_BLOCKS * SMALL_QUANTUM)
+
+/*
+ * Beginning and end pointers for a region's heap.
+ */
+#define SMALL_REGION_ADDRESS(region) ((unsigned char *)region)
+#define SMALL_REGION_END(region) (SMALL_REGION_ADDRESS(region) + (NUM_SMALL_BLOCKS * SMALL_QUANTUM))
+
+/*
+ * Locate the heap base for a pointer known to be within a small region.
+ */
+#define SMALL_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << SMALL_BLOCKS_ALIGN) - 1)))
+
+/*
+ * Convert between byte and msize units.
+ */
+#define SMALL_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_SMALL_QUANTUM)
+#define SMALL_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_SMALL_QUANTUM)
+
+#define SMALL_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
+
+/*
+ * Layout of a small region
+ */
+typedef uint32_t small_block_t[SMALL_QUANTUM/sizeof(uint32_t)];
+
+typedef struct small_region
+{
+ small_block_t blocks[NUM_SMALL_BLOCKS];
+
+ region_trailer_t trailer;
+
+ msize_t small_meta_words[NUM_SMALL_BLOCKS];
+
+ uint8_t pad[SMALL_REGION_SIZE - (NUM_SMALL_BLOCKS * sizeof(small_block_t)) - SMALL_METADATA_SIZE];
+} *small_region_t;
+
+/*
+ * Per-region meta data for small allocator
+ */
+#define REGION_TRAILER_FOR_SMALL_REGION(r) (&(((small_region_t)(r))->trailer))
+#define MAGAZINE_INDEX_FOR_SMALL_REGION(r) (REGION_TRAILER_FOR_SMALL_REGION(r)->mag_index)
+#define BYTES_USED_FOR_SMALL_REGION(r) (REGION_TRAILER_FOR_SMALL_REGION(r)->bytes_used)
+
+/*
+ * Locate the metadata base for a pointer known to be within a small region.
+ */
+#define SMALL_META_HEADER_FOR_PTR(_p) (((small_region_t)SMALL_REGION_FOR_PTR(_p))->small_meta_words)
+
+/*
+ * Compute the metadata index for a pointer known to be within a small region.
+ */
+#define SMALL_META_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_SMALL_QUANTUM) & (NUM_SMALL_CEIL_BLOCKS - 1))
+
+/*
+ * Find the metadata word for a pointer known to be within a small region.
+ */
+#define SMALL_METADATA_FOR_PTR(_p) (SMALL_META_HEADER_FOR_PTR(_p) + SMALL_META_INDEX_FOR_PTR(_p))
+
+/*
+ * Determine whether a pointer known to be within a small region points to memory which is free.
+ */
+#define SMALL_PTR_IS_FREE(_p) (*SMALL_METADATA_FOR_PTR(_p) & SMALL_IS_FREE)
+
+/*
+ * Extract the msize value for a pointer known to be within a small region.
+ */
+#define SMALL_PTR_SIZE(_p) (*SMALL_METADATA_FOR_PTR(_p) & ~SMALL_IS_FREE)
+
+#define PROTECT_SMALL 0 // Should be 0: 1 is too slow for normal use
+
+#define SMALL_CACHE 1
+#if !SMALL_CACHE
+#warning SMALL_CACHE turned off
+#endif
+
+#define SMALL_REGION_PAYLOAD_BYTES (NUM_SMALL_BLOCKS * SMALL_QUANTUM)
+
+/************************* DEFINITIONS for large ****************************/
+
+#define LARGE_THRESHOLD (15 * 1024) // strictly above this use "large"
+#define LARGE_THRESHOLD_LARGEMEM (127 * 1024)
+
+#if (LARGE_THRESHOLD > NUM_SMALL_SLOTS * SMALL_QUANTUM)
+#error LARGE_THRESHOLD should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM
+#endif
+
+#if (LARGE_THRESHOLD_LARGEMEM > NUM_SMALL_SLOTS_LARGEMEM * SMALL_QUANTUM)
+#error LARGE_THRESHOLD_LARGEMEM should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM
+#endif
+
+/*
+ * When all memory is touched after a copy, vm_copy() is always a lose
+ * But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages
+ * (on a G3/300MHz)
+ *
+ * This must be larger than LARGE_THRESHOLD
+ */
+#define VM_COPY_THRESHOLD (40 * 1024)
+#define VM_COPY_THRESHOLD_LARGEMEM (128 * 1024)
+
+typedef struct {
+ vm_address_t address;
+ vm_size_t size;
+ boolean_t did_madvise_reusable;
+} large_entry_t;
+
+#define LARGE_CACHE 1
+#if !LARGE_CACHE
+#warning LARGE_CACHE turned off
+#endif
+#if defined(__LP64__)
+#define LARGE_ENTRY_CACHE_SIZE 16
+#define LARGE_CACHE_SIZE_LIMIT ((vm_size_t)0x80000000) /* 2Gb */
+#else
+#define LARGE_ENTRY_CACHE_SIZE 8
+#define LARGE_CACHE_SIZE_LIMIT ((vm_size_t)0x02000000) /* 32Mb */
+#endif
+#define LARGE_CACHE_SIZE_ENTRY_LIMIT (LARGE_CACHE_SIZE_LIMIT/LARGE_ENTRY_CACHE_SIZE)
+
+/*******************************************************************************
+ * Definitions for region hash
+ ******************************************************************************/
+
+typedef void * region_t;
+typedef region_t * rgnhdl_t; /* A pointer into hashed_regions array. */
+
+#define INITIAL_NUM_REGIONS_SHIFT 6 // log2(INITIAL_NUM_REGIONS)
+#define INITIAL_NUM_REGIONS (1 << INITIAL_NUM_REGIONS_SHIFT) // Must be a power of 2!
+#define HASHRING_OPEN_ENTRY ((region_t) 0) // Initial value and sentinel marking end of collision chain
+#define HASHRING_REGION_DEALLOCATED ((region_t)-1) // Region at this slot reclaimed by OS
+#define HASH_BLOCKS_ALIGN TINY_BLOCKS_ALIGN // MIN( TINY_BLOCKS_ALIGN, SMALL_BLOCKS_ALIGN, ... )
+
+typedef struct region_hash_generation {
+ size_t num_regions_allocated;
+ size_t num_regions_allocated_shift; // log2(num_regions_allocated)
+ region_t *hashed_regions; // hashed by location
+ struct region_hash_generation *nextgen;
+} region_hash_generation_t;
+
+/*******************************************************************************
+ * Per-processor magazine for tiny and small allocators
+ ******************************************************************************/
+
+typedef struct { // vm_allocate()'d, so the array of magazines is page-aligned to begin with.
+ // Take magazine_lock first, Depot lock when needed for recirc, then szone->{tiny,small}_regions_lock when needed for alloc
+ pthread_lock_t magazine_lock CACHE_ALIGN;
+
+ // One element deep "death row", optimizes malloc/free/malloc for identical size.
+ void *mag_last_free; // low SHIFT_{TINY,SMALL}_QUANTUM bits indicate the msize
+ region_t mag_last_free_rgn; // holds the region for mag_last_free
+
+ free_list_t *mag_free_list[256]; // assert( 256 >= MAX( NUM_TINY_SLOTS, NUM_SMALL_SLOTS_LARGEMEM ))
+ unsigned mag_bitmap[8]; // assert( sizeof(mag_bitmap) << 3 >= sizeof(mag_free_list)/sizeof(free_list_t) )
+
+ // the last free region in the last block is treated as a big block in use that is not accounted for
+ size_t mag_bytes_free_at_end;
+ region_t mag_last_region; // Valid iff mag_bytes_free_at_end > 0
+
+ // bean counting ...
+ unsigned mag_num_objects;
+ size_t mag_num_bytes_in_objects;
+ size_t num_bytes_in_magazine;
+
+ // recirculation list -- invariant: all regions owned by this magazine that meet the emptiness criteria
+ // are located nearer to the head of the list than any region that doesn't satisfy that criteria.
+ // Doubly linked list for efficient extraction.
+ unsigned recirculation_entries;
+ region_trailer_t *firstNode;
+ region_trailer_t *lastNode;
+
+#if __LP64__
+ uint64_t pad[49]; // So sizeof(magazine_t) is 2560 bytes. FIXME: assert this at compile time
+#else
+ uint32_t pad[45]; // So sizeof(magazine_t) is 1280 bytes. FIXME: assert this at compile time
+#endif
+} magazine_t;
+
+#define TINY_MAX_MAGAZINES 16 /* MUST BE A POWER OF 2! */
+#define TINY_MAGAZINE_PAGED_SIZE \
+ (((sizeof(magazine_t) * (TINY_MAX_MAGAZINES + 1)) + vm_page_size - 1) &\
+ ~ (vm_page_size - 1)) /* + 1 for the Depot */
+
+#define SMALL_MAX_MAGAZINES 16 /* MUST BE A POWER OF 2! */
+#define SMALL_MAGAZINE_PAGED_SIZE \
+ (((sizeof(magazine_t) * (SMALL_MAX_MAGAZINES + 1)) + vm_page_size - 1) &\
+ ~ (vm_page_size - 1)) /* + 1 for the Depot */
+
+#define DEPOT_MAGAZINE_INDEX -1
+
+/****************************** zone itself ***********************************/
+
+/*
+ * Note that objects whose adddress are held in pointers here must be pursued
+ * individually in the {tiny,small}_in_use_enumeration() routines. See for
+ * example the treatment of region_hash_generation and tiny_magazines below.
+ */
+
+typedef struct szone_s { // vm_allocate()'d, so page-aligned to begin with.
+ malloc_zone_t basic_zone;
+ pthread_key_t cpu_id_key;
+ unsigned debug_flags;
+ void *log_address;
+
+ /* Regions for tiny objects */
+ pthread_lock_t tiny_regions_lock CACHE_ALIGN;
+ size_t num_tiny_regions;
+ size_t num_tiny_regions_dealloc;
+ region_hash_generation_t *tiny_region_generation;
+ region_hash_generation_t trg[2];
+
+ int num_tiny_magazines;
+ unsigned num_tiny_magazines_mask;
+ int num_tiny_magazines_mask_shift;
+ magazine_t *tiny_magazines; // array of per-processor magazines
+
+ /* Regions for small objects */
+ pthread_lock_t small_regions_lock CACHE_ALIGN;
+ size_t num_small_regions;
+ size_t num_small_regions_dealloc;
+ region_hash_generation_t *small_region_generation;
+ region_hash_generation_t srg[2];
+
+ unsigned num_small_slots; // determined by physmem size
+
+ int num_small_magazines;
+ unsigned num_small_magazines_mask;
+ int num_small_magazines_mask_shift;
+ magazine_t *small_magazines; // array of per-processor magazines
+
+ /* large objects: all the rest */
+ pthread_lock_t large_szone_lock CACHE_ALIGN; // One customer at a time for large
+ unsigned num_large_objects_in_use;
+ unsigned num_large_entries;
+ large_entry_t *large_entries; // hashed by location; null entries don't count
+ size_t num_bytes_in_large_objects;
+
+#if LARGE_CACHE
+ int large_entry_cache_oldest;
+ int large_entry_cache_newest;
+ large_entry_t large_entry_cache[LARGE_ENTRY_CACHE_SIZE]; // "death row" for large malloc/free
+ boolean_t large_legacy_reset_mprotect;
+ size_t large_entry_cache_hoard_bytes;
+ size_t large_entry_cache_hoard_lmit;
+#endif
+
+ /* flag and limits pertaining to altered malloc behavior for systems with
+ large amounts of physical memory */
+ unsigned is_largemem;
+ unsigned large_threshold;
+ unsigned vm_copy_threshold;
+
+ /* security cookie */
+ uintptr_t cookie;
+
+ /* Initial region list */
+ region_t initial_tiny_regions[INITIAL_NUM_REGIONS];
+ region_t initial_small_regions[INITIAL_NUM_REGIONS];
+
+ /* The purgeable zone constructed by create_purgeable_zone() would like to hand off tiny and small
+ * allocations to the default scalable zone. Record the latter as the "helper" zone here. */
+ struct szone_s *helper_zone;
+} szone_t;
+
+#define SZONE_PAGED_SIZE ((sizeof(szone_t) + vm_page_size - 1) & ~ (vm_page_size - 1))
+
+#if DEBUG_MALLOC || DEBUG_CLIENT
+static void szone_sleep(void);
+#endif
+__private_extern__ void malloc_error_break(void);
+
+// msg prints after fmt, ...
+static NOINLINE void szone_error(szone_t *szone, int is_corruption, const char *msg, const void *ptr, const char *fmt, ...)
+ __printflike(5, 6);
+
+static void protect(void *address, size_t size, unsigned protection, unsigned debug_flags);
+static void *allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags,
+ int vm_page_label);
+static void deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags);
+static int madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi);
+static kern_return_t _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr);
+
+static INLINE mag_index_t mag_get_thread_index(szone_t *szone) ALWAYSINLINE;
+static magazine_t *mag_lock_zine_for_region_trailer(szone_t *szone, magazine_t *magazines, region_trailer_t *trailer,
+ mag_index_t mag_index);
+
+static INLINE rgnhdl_t hash_lookup_region_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r)
+ ALWAYSINLINE;
+static void hash_region_insert_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r);
+static region_t *hash_regions_alloc_no_lock(szone_t *szone, size_t num_entries);
+static region_t *hash_regions_grow_no_lock(szone_t *szone, region_t *regions, size_t old_size,
+ size_t *mutable_shift, size_t *new_size);
+
+static INLINE uintptr_t free_list_gen_checksum(uintptr_t ptr) ALWAYSINLINE;
+static INLINE uintptr_t free_list_checksum_ptr(szone_t *szone, void *p) ALWAYSINLINE;
+static INLINE void *free_list_unchecksum_ptr(szone_t *szone, ptr_union *ptr) ALWAYSINLINE;
+static unsigned free_list_count(szone_t *szone, free_list_t *ptr);
+
+static INLINE void recirc_list_extract(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node) ALWAYSINLINE;
+static INLINE void recirc_list_splice_last(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node) ALWAYSINLINE;
+static INLINE void recirc_list_splice_first(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node) ALWAYSINLINE;
+
+static INLINE void BITARRAY_SET(uint32_t *bits, msize_t index) ALWAYSINLINE;
+static INLINE void BITARRAY_CLR(uint32_t *bits, msize_t index) ALWAYSINLINE;
+static INLINE boolean_t BITARRAY_BIT(uint32_t *bits, msize_t index) ALWAYSINLINE;
+
+static msize_t get_tiny_free_size(const void *ptr);
+static msize_t get_tiny_previous_free_msize(const void *ptr);
+static INLINE msize_t get_tiny_meta_header(const void *ptr, boolean_t *is_free) ALWAYSINLINE;
+static INLINE void set_tiny_meta_header_in_use(const void *ptr, msize_t msize) ALWAYSINLINE;
+static INLINE void set_tiny_meta_header_in_use_1(const void *ptr) ALWAYSINLINE;
+static INLINE void set_tiny_meta_header_middle(const void *ptr) ALWAYSINLINE;
+static INLINE void set_tiny_meta_header_free(const void *ptr, msize_t msize) ALWAYSINLINE;
+static INLINE boolean_t tiny_meta_header_is_free(const void *ptr) ALWAYSINLINE;
+static INLINE void *tiny_previous_preceding_free(void *ptr, msize_t *prev_msize) ALWAYSINLINE;
+
+static void tiny_free_list_add_ptr(szone_t *szone, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize);
+static void tiny_free_list_remove_ptr(szone_t *szone, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize);
+static INLINE region_t tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr) ALWAYSINLINE;
+
+static void tiny_finalize_region(szone_t *szone, magazine_t *tiny_mag_ptr);
+static int tiny_free_detach_region(szone_t *szone, magazine_t *tiny_mag_ptr, region_t r);
+static size_t tiny_free_reattach_region(szone_t *szone, magazine_t *tiny_mag_ptr, region_t r);
+static void tiny_free_scan_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r);
+static void tiny_free_try_depot_unmap_no_lock(szone_t *szone, magazine_t *depot_ptr, region_trailer_t *node);
+static void tiny_free_do_recirc_to_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index);
+static boolean_t tiny_get_region_from_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index);
+
+static INLINE void tiny_free_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, region_t region,
+ void *ptr, msize_t msize) ALWAYSINLINE;
+static void *tiny_malloc_from_region_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index,
+ msize_t msize);
+static boolean_t tiny_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size);
+static boolean_t tiny_check_region(szone_t *szone, region_t region);
+static kern_return_t tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone,
+ memory_reader_t reader, vm_range_recorder_t recorder);
+static void *tiny_malloc_from_free_list(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index,
+ msize_t msize);
+static INLINE void *tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) ALWAYSINLINE;
+static INLINE void free_tiny(szone_t *szone, void *ptr, region_t tiny_region, size_t known_size) ALWAYSINLINE;
+static void print_tiny_free_list(szone_t *szone);
+static void print_tiny_region(boolean_t verbose, region_t region, size_t bytes_at_end);
+static boolean_t tiny_free_list_check(szone_t *szone, grain_t slot);
+
+static INLINE void small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize) ALWAYSINLINE;
+static INLINE void small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize) ALWAYSINLINE;
+static INLINE void small_meta_header_set_middle(msize_t *meta_headers, msize_t index) ALWAYSINLINE;
+static void small_free_list_add_ptr(szone_t *szone, magazine_t *small_mag_ptr, void *ptr, msize_t msize);
+static void small_free_list_remove_ptr(szone_t *szone, magazine_t *small_mag_ptr, void *ptr, msize_t msize);
+static INLINE region_t small_region_for_ptr_no_lock(szone_t *szone, const void *ptr) ALWAYSINLINE;
+
+static void small_finalize_region(szone_t *szone, magazine_t *small_mag_ptr);
+static int small_free_detach_region(szone_t *szone, magazine_t *small_mag_ptr, region_t r);
+static size_t small_free_reattach_region(szone_t *szone, magazine_t *small_mag_ptr, region_t r);
+static void small_free_scan_depot_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r);
+static void small_free_try_depot_unmap_no_lock(szone_t *szone, magazine_t *depot_ptr, region_trailer_t *node);
+static void small_free_do_recirc_to_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index);
+static boolean_t small_get_region_from_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index);
+static INLINE void small_free_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, region_t region,
+ void *ptr, msize_t msize) ALWAYSINLINE;
+static void *small_malloc_from_region_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index,
+ msize_t msize);
+static boolean_t small_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size);
+static boolean_t small_check_region(szone_t *szone, region_t region);
+static kern_return_t small_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone,
+ memory_reader_t reader, vm_range_recorder_t recorder);
+static void *small_malloc_from_free_list(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index,
+ msize_t msize);
+static INLINE void *small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) ALWAYSINLINE;
+static INLINE void free_small(szone_t *szone, void *ptr, region_t small_region, size_t known_size) ALWAYSINLINE;
+static void print_small_free_list(szone_t *szone);
+static void print_small_region(szone_t *szone, boolean_t verbose, region_t region, size_t bytes_at_end);
+static boolean_t small_free_list_check(szone_t *szone, grain_t grain);
+
+#if DEBUG_MALLOC
+static void large_debug_print(szone_t *szone);
+#endif
+static large_entry_t *large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr);
+static void large_entry_insert_no_lock(szone_t *szone, large_entry_t range);
+static INLINE void large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry) ALWAYSINLINE;
+static INLINE large_entry_t *large_entries_alloc_no_lock(szone_t *szone, unsigned num) ALWAYSINLINE;
+static void large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num,
+ vm_range_t *range_to_deallocate);
+static large_entry_t *large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate);
+static vm_range_t large_entry_free_no_lock(szone_t *szone, large_entry_t *entry);
+static NOINLINE kern_return_t large_in_use_enumerator(task_t task, void *context,
+ unsigned type_mask, vm_address_t large_entries_address,
+ unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder);
+static void *large_malloc(szone_t *szone, size_t num_pages, unsigned char alignment, boolean_t cleared_requested);
+static NOINLINE void free_large(szone_t *szone, void *ptr);
+static INLINE int large_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) ALWAYSINLINE;
+
+/*
+ * Mark these NOINLINE to avoid bloating the purgeable zone call backs
+ */
+static NOINLINE void szone_free(szone_t *szone, void *ptr);
+static NOINLINE void *szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested);
+static NOINLINE void *szone_malloc(szone_t *szone, size_t size);
+static NOINLINE void *szone_calloc(szone_t *szone, size_t num_items, size_t size);
+static NOINLINE void *szone_valloc(szone_t *szone, size_t size);
+static NOINLINE size_t szone_size_try_large(szone_t *szone, const void *ptr);
+static NOINLINE size_t szone_size(szone_t *szone, const void *ptr);
+static NOINLINE void *szone_realloc(szone_t *szone, void *ptr, size_t new_size);
+static NOINLINE void *szone_memalign(szone_t *szone, size_t alignment, size_t size);
+static NOINLINE void szone_free_definite_size(szone_t *szone, void *ptr, size_t size);
+static NOINLINE unsigned szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count);
+static NOINLINE void szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count);
+static void szone_destroy(szone_t *szone);
+static NOINLINE size_t szone_good_size(szone_t *szone, size_t size);
+
+static NOINLINE boolean_t szone_check_all(szone_t *szone, const char *function);
+static boolean_t szone_check(szone_t *szone);
+static kern_return_t szone_ptr_in_use_enumerator(task_t task, void *context,
+ unsigned type_mask, vm_address_t zone_address,
+ memory_reader_t reader, vm_range_recorder_t recorder);
+static NOINLINE void szone_print(szone_t *szone, boolean_t verbose);
+static void szone_log(malloc_zone_t *zone, void *log_address);
+static void szone_force_lock(szone_t *szone);
+static void szone_force_unlock(szone_t *szone);
+static boolean_t szone_locked(szone_t *szone);
+
+static void szone_statistics(szone_t *szone, malloc_statistics_t *stats);
+
+static void purgeable_free(szone_t *szone, void *ptr);
+static void *purgeable_malloc(szone_t *szone, size_t size);
+static void *purgeable_calloc(szone_t *szone, size_t num_items, size_t size);
+static void *purgeable_valloc(szone_t *szone, size_t size);
+static size_t purgeable_size(szone_t *szone, const void *ptr);
+static void *purgeable_realloc(szone_t *szone, void *ptr, size_t new_size);
+static void *purgeable_memalign(szone_t *szone, size_t alignment, size_t size);
+static void purgeable_free_definite_size(szone_t *szone, void *ptr, size_t size);
+static unsigned purgeable_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count);
+static void purgeable_batch_free(szone_t *szone, void **to_be_freed, unsigned count);
+static void purgeable_destroy(szone_t *szone);
+static size_t purgeable_good_size(szone_t *szone, size_t size);
+
+static boolean_t purgeable_check(szone_t *szone);
+static kern_return_t purgeable_ptr_in_use_enumerator(task_t task, void *context,
+ unsigned type_mask, vm_address_t zone_address,
+ memory_reader_t reader, vm_range_recorder_t recorder);
+static void purgeable_print(szone_t *szone, boolean_t verbose);
+static void purgeable_log(malloc_zone_t *zone, void *log_address);
+static void purgeable_force_lock(szone_t *szone);
+static void purgeable_force_unlock(szone_t *szone);
+static boolean_t purgeable_locked(szone_t *szone);
+
+static void purgeable_statistics(szone_t *szone, malloc_statistics_t *stats);
+
+static void *frozen_malloc(szone_t *zone, size_t new_size);
+static void *frozen_calloc(szone_t *zone, size_t num_items, size_t size);
+static void *frozen_valloc(szone_t *zone, size_t new_size);
+static void *frozen_realloc(szone_t *zone, void *ptr, size_t new_size);
+static void frozen_free(szone_t *zone, void *ptr);
+static void frozen_destroy(szone_t *zone);
+
+#define SZONE_LOCK(szone) \
+ do { \
+ LOCK(szone->large_szone_lock); \
+ } while (0)
+
+#define SZONE_UNLOCK(szone) \
+ do { \
+ UNLOCK(szone->large_szone_lock); \
+ } while (0)
+
+#define SZONE_TRY_LOCK(szone) \
+ TRY_LOCK(szone->large_szone_lock);
+
+#define SZONE_MAGAZINE_PTR_LOCK(szone, mag_ptr) \
+ do { \
+ LOCK(mag_ptr->magazine_lock); \
+ } while(0)
+
+#define SZONE_MAGAZINE_PTR_UNLOCK(szone, mag_ptr) \
+ do { \
+ UNLOCK(mag_ptr->magazine_lock); \
+ } while(0)
+
+#define SZONE_MAGAZINE_PTR_TRY_LOCK(szone, mag_ptr) \
+ TRY_LOCK(mag_ptr->magazine_lock);
+
+#if DEBUG_MALLOC
+# define LOG(szone,ptr) \
+ (szone->log_address && (((uintptr_t)szone->log_address == -1) || \
+ (szone->log_address == (void *)(ptr))))
+#else
+# define LOG(szone,ptr) 0
+#endif
+
+#if DEBUG_MALLOC || DEBUG_CLIENT
+# define CHECK(szone,fun) \
+ if ((szone)->debug_flags & CHECK_REGIONS) \
+ szone_check_all(szone, fun)
+#else
+# define CHECK(szone,fun) \
+ do {} while (0)
+#endif
+
+/********************* VERY LOW LEVEL UTILITIES ************************/
+
+#if DEBUG_MALLOC || DEBUG_CLIENT
+static void
+szone_sleep(void)
+{
+
+ if (getenv("MallocErrorSleep")) {
+ _malloc_printf(ASL_LEVEL_NOTICE, "*** sleeping to help debug\n");
+ sleep(3600); // to help debug
+ }
+}
+#endif
+
+extern const char *__crashreporter_info__;
+
+// msg prints after fmt, ...
+static NOINLINE void
+szone_error(szone_t *szone, int is_corruption, const char *msg, const void *ptr, const char *fmt, ...)
+{
+ va_list ap;
+ _SIMPLE_STRING b = _simple_salloc();
+
+ if (szone) SZONE_UNLOCK(szone); // FIXME: unlock magazine and region locks?
+ if (b) {
+ if (fmt) {
+ va_start(ap, fmt);
+ _simple_vsprintf(b, fmt, ap);
+ va_end(ap);
+ }
+ if (ptr) {
+ _simple_sprintf(b, "*** error for object %p: %s\n", ptr, msg);
+ } else {
+ _simple_sprintf(b, "*** error: %s\n", msg);
+ }
+ malloc_printf("%s*** set a breakpoint in malloc_error_break to debug\n", _simple_string(b));
+ } else {
+ /*
+ * Should only get here if vm_allocate() can't get a single page of
+ * memory, implying _simple_asl_log() would also fail. So we just
+ * print to the file descriptor.
+ */
+ if (fmt) {
+ va_start(ap, fmt);
+ _malloc_vprintf(MALLOC_PRINTF_NOLOG, fmt, ap);
+ va_end(ap);
+ }
+ if (ptr) {
+ _malloc_printf(MALLOC_PRINTF_NOLOG, "*** error for object %p: %s\n", ptr, msg);
+ } else {
+ _malloc_printf(MALLOC_PRINTF_NOLOG, "*** error: %s\n", msg);
+ }
+ _malloc_printf(MALLOC_PRINTF_NOLOG, "*** set a breakpoint in malloc_error_break to debug\n");
+ }
+ malloc_error_break();
+#if DEBUG_MALLOC
+ szone_print(szone, 1);
+ szone_sleep();
+#endif
+#if DEBUG_CLIENT
+ szone_sleep();
+#endif
+ // Call abort() if this is a memory corruption error and the abort on
+ // corruption flag is set, or if any error should abort.
+ if ((is_corruption && (szone->debug_flags & SCALABLE_MALLOC_ABORT_ON_CORRUPTION)) ||
+ (szone->debug_flags & SCALABLE_MALLOC_ABORT_ON_ERROR)) {
+ __crashreporter_info__ = b ? _simple_string(b) : msg;
+ abort();
+ } else if (b) {
+ _simple_sfree(b);
+ }
+}
+
+static void
+protect(void *address, size_t size, unsigned protection, unsigned debug_flags)
+{
+ kern_return_t err;
+
+ if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_PRELUDE)) {
+ err = vm_protect(mach_task_self(), (vm_address_t)(uintptr_t)address - vm_page_size, vm_page_size, 0, protection);
+ if (err) {
+ malloc_printf("*** can't protect(%p) region for prelude guard page at %p\n",
+ protection,(uintptr_t)address - (1 << vm_page_shift));
+ }
+ }
+ if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE)) {
+ err = vm_protect(mach_task_self(), (vm_address_t)(uintptr_t)address + size, vm_page_size, 0, protection);
+ if (err) {
+ malloc_printf("*** can't protect(%p) region for postlude guard page at %p\n",
+ protection, (uintptr_t)address + size);
+ }
+ }
+}
+
+static void *
+allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label)
+{
+ // align specifies a desired alignment (as a log) or 0 if no alignment requested
+ void *vm_addr;
+ uintptr_t addr = 0, aligned_address;
+ boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES;
+ boolean_t purgeable = debug_flags & SCALABLE_MALLOC_PURGEABLE;
+ size_t allocation_size = round_page(size);
+ size_t delta;
+ int flags = VM_MAKE_TAG(vm_page_label);
+
+ if (align) add_guard_pages = 0; // too cumbersome to deal with that
+ if (!allocation_size) allocation_size = 1 << vm_page_shift;
+ if (add_guard_pages) allocation_size += 2 * (1 << vm_page_shift);
+ if (align) allocation_size += (size_t)1 << align;
+ if (purgeable) flags |= VM_FLAGS_PURGABLE;
+
+ if (allocation_size < size) // size_t arithmetic wrapped!
+ return NULL;
+
+ vm_addr = mmap(0, allocation_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, flags, 0);
+ if ((uintptr_t)vm_addr == -1) {
+ szone_error(szone, 0, "can't allocate region", NULL, "*** mmap(size=%lu) failed (error code=%d)\n",
+ allocation_size, errno);
+ return NULL;
+ }
+ addr = (uintptr_t)vm_addr;
+
+ if (align) {
+ aligned_address = (addr + ((uintptr_t)1 << align) - 1) & ~ (((uintptr_t)1 << align) - 1);
+ if (aligned_address != addr) {
+ delta = aligned_address - addr;
+ if (munmap((void *)addr, delta) == -1)
+ malloc_printf("*** munmap unaligned header failed with %d\n", errno);
+ addr = aligned_address;
+ allocation_size -= delta;
+ }
+ if (allocation_size > size) {
+ if (munmap((void *)(addr + size), allocation_size - size) == -1)
+ malloc_printf("*** munmap unaligned footer failed with %d\n", errno);
+ }
+ }
+ if (add_guard_pages) {
+ addr += (uintptr_t)1 << vm_page_shift;
+ protect((void *)addr, size, 0, debug_flags);
+ }
+ return (void *)addr;
+}
+
+static void
+deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags)
+{
+ int err;
+ boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES;
+
+ if (add_guard_pages) {
+ addr = (void *)((uintptr_t)addr - (1 << vm_page_shift));
+ size += 2 * (1 << vm_page_shift);
+ }
+ err = munmap(addr, size);
+ if ((err == -1) && szone)
+ szone_error(szone, 0, "Can't deallocate_pages region", addr, NULL);
+}
+
+static int
+madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi)
+{
+ if (pgHi > pgLo) {
+ size_t len = pgHi - pgLo;
+
+#if DEBUG_MALLOC
+ if (szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE)
+ memset((void *)pgLo, 0xed, len); // Scribble on MADV_FREEd memory
+#endif
+ MAGMALLOC_MADVFREEREGION((void *)szone, (void *)r, (void *)pgLo, len); // DTrace USDT Probe
+ if (-1 == madvise((void *)pgLo, len, MADV_FREE_REUSABLE)) {
+ /* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */
+#if DEBUG_MALLOC
+ szone_error(szone, 1, "madvise_free_range madvise(..., MADV_FREE_REUSABLE) failed", (void *)pgLo, NULL);
+#endif
+ }
+ }
+ return 0;
+}
+
+static kern_return_t
+_szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr)
+{
+ *ptr = (void *)address;
+ return 0;
+}
+
+// Multiplicative hash where the multiplier is a prime near (ULONG_MAX / phi). [phi = 1.618033...]
+// pthread_t's are page aligned, (sometimes even in ascending sequence). These hash well.
+// See Knuth TAOCP, Vol. 3.
+#if __LP64__
+#define HASH_SELF() \
+ ((((uintptr_t)pthread_self()) >> vm_page_shift) * 11400714819323198549ULL) >> (64 - szone->num_tiny_magazines_mask_shift)
+#else
+#define HASH_SELF() \
+ ((((uintptr_t)pthread_self()) >> vm_page_shift) * 2654435761UL) >> (32 - szone->num_tiny_magazines_mask_shift)
+#endif
+
+#if defined(__i386__) || defined(__x86_64__)
+#define __APPLE_API_PRIVATE
+#include <machine/cpu_capabilities.h>
+#define _COMM_PAGE_VERSION_REQD 9
+#undef __APPLE_API_PRIVATE
+
+/*
+ * These commpage routines provide fast access to the logical cpu number
+ * of the calling processor assuming no pre-emption occurs.
+ */
+#define CPU_NUMBER() (((int (*)()) _COMM_PAGE_CPU_NUMBER)()) /* Zero-based */
+
+static INLINE mag_index_t
+mag_get_thread_index(szone_t *szone)
+{
+ if (!__is_threaded)
+ return 0;
+ else
+ return CPU_NUMBER() & (TINY_MAX_MAGAZINES - 1);
+}
+
+#else
+#warning deriving magazine index from pthread_self() [want processor number]
+
+static INLINE mag_index_t
+mag_get_thread_index(szone_t *szone)
+{
+ if (!__is_threaded)
+ return 0;
+ else if ((pthread_key_t) -1 == szone->cpu_id_key) { // In case pthread_key_create() failed.
+ return HASH_SELF();
+ } else {
+ mag_index_t idx = (mag_index_t)(intptr_t)pthread_getspecific(szone->cpu_id_key);
+
+ // Has this thread been hinted with a non-zero value [i.e. 1 + cpuid()] ?
+ // If so, bump down the hint to a zero-based magazine index and return it.
+ if (idx) {
+ return idx - 1;
+ } else {
+ // No hint available. Contruct a magazine index for this thread ...
+ idx = HASH_SELF();
+
+ // bump up the hint to exclude zero and try to memorize it ...
+ pthread_setspecific(szone->cpu_id_key, (const void *)((uintptr_t)idx + 1));
+
+ // and return the (zero-based) magazine index.
+ return idx;
+ }
+ }
+}
+#endif
+
+static magazine_t *
+mag_lock_zine_for_region_trailer(szone_t *szone, magazine_t *magazines, region_trailer_t *trailer, mag_index_t mag_index)
+{
+ mag_index_t refreshed_index;
+ magazine_t *mag_ptr = &(magazines[mag_index]);
+
+ // Take the lock on entry.
+ SZONE_MAGAZINE_PTR_LOCK(szone, mag_ptr);
+
+ // Now in the time it took to acquire the lock, the region may have migrated
+ // from one magazine to another. In which case the magazine lock we obtained
+ // (namely magazines[mag_index].mag_lock) is stale. If so, keep on tryin' ...
+ while (mag_index != (refreshed_index = trailer->mag_index)) { // Note assignment
+
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, mag_ptr);
+
+ mag_index = refreshed_index;
+ mag_ptr = &(magazines[mag_index]);
+ SZONE_MAGAZINE_PTR_LOCK(szone, mag_ptr);
+ }
+
+ return mag_ptr;
+}
+
+/*******************************************************************************
+ * Region hash implementation
+ *
+ * This is essentially a duplicate of the existing Large allocator hash, minus
+ * the ability to remove entries. The two should be combined eventually.
+ ******************************************************************************/
+#pragma mark region hash
+
+/*
+ * hash_lookup_region_no_lock - Scan a hash ring looking for an entry for a
+ * given region.
+ *
+ * FIXME: If consecutive queries of the same region are likely, a one-entry
+ * cache would likely be a significant performance win here.
+ */
+static INLINE rgnhdl_t
+hash_lookup_region_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r) {
+ size_t index, hash_index;
+ rgnhdl_t entry;
+
+ if (!num_entries)
+ return 0;
+
+ // Multiplicative hash where the multiplier is a prime near (ULONG_MAX / phi). [phi = 1.618033...]
+ // Since the values of (((uintptr_t)r >> HASH_BLOCKS_ALIGN) are (roughly) an ascending sequence of integers,
+ // this hash works really well. See Knuth TAOCP, Vol. 3.
+#if __LP64__
+ index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 11400714819323198549ULL) >> (64 - shift);
+#else
+ index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 2654435761UL) >> (32 - shift);
+#endif
+ do {
+ entry = regions + index;
+ if (*entry == 0)
+ return 0;
+ if (*entry == r)
+ return entry;
+ if (++index == num_entries)
+ index = 0;
+ } while (index != hash_index);
+ return 0;
+}
+
+/*
+ * hash_region_insert_no_lock - Insert a region into the hash ring.
+ */
+static void
+hash_region_insert_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r) {
+ size_t index, hash_index;
+ rgnhdl_t entry;
+
+ // Multiplicative hash where the multiplier is a prime near (ULONG_MAX / phi). [phi = 1.618033...]
+ // Since the values of (((uintptr_t)r >> HASH_BLOCKS_ALIGN) are (roughly) an ascending sequence of integers,
+ // this hash works really well. See Knuth TAOCP, Vol. 3.
+#if __LP64__
+ index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 11400714819323198549ULL) >> (64 - shift);
+#else
+ index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 2654435761UL) >> (32 - shift);
+#endif
+ do {
+ entry = regions + index;
+ if (*entry == HASHRING_OPEN_ENTRY || *entry == HASHRING_REGION_DEALLOCATED) {
+ *entry = r;
+ return;
+ }
+ if (++index == num_entries)
+ index = 0;
+ } while (index != hash_index);
+}
+
+/*
+ * hash_regions_alloc_no_lock - Allocate space for a number of entries. This
+ * must be a VM allocation as to avoid recursing between allocating a new small
+ * region, and asking the small region to allocate space for the new list of
+ * regions.
+ */
+static region_t *
+hash_regions_alloc_no_lock(szone_t *szone, size_t num_entries)
+{
+ size_t size = num_entries * sizeof(region_t);
+
+ return allocate_pages(szone, round_page(size), 0, 0, VM_MEMORY_MALLOC);
+}
+
+/*
+ * hash_regions_grow_no_lock - Grow the hash ring, and rehash the entries.
+ * Return the new region and new size to update the szone. Do not deallocate
+ * the old entries since someone may still be allocating them.
+ */
+static region_t *
+hash_regions_grow_no_lock(szone_t *szone, region_t *regions, size_t old_size, size_t *mutable_shift,
+ size_t *new_size)
+{
+ // double in size and allocate memory for the regions
+ *new_size = old_size + old_size;
+ *mutable_shift = *mutable_shift + 1;
+ region_t *new_regions = hash_regions_alloc_no_lock(szone, *new_size);
+
+ // rehash the entries into the new list
+ size_t index;
+ for (index = 0; index < old_size; ++index) {
+ region_t r = regions[index];
+ if (r != HASHRING_OPEN_ENTRY && r != HASHRING_REGION_DEALLOCATED)
+ hash_region_insert_no_lock(new_regions, *new_size, *mutable_shift, r);
+ }
+ return new_regions;
+}
+
+/********************* FREE LIST UTILITIES ************************/
+
+// A free list entry is comprised of a pair of pointers, previous and next.
+// These are used to implement a doubly-linked list, which permits efficient
+// extraction.
+//
+// Because the free list entries are previously freed objects, a misbehaved
+// program may write to a pointer after it has called free() on that pointer,
+// either by dereference or buffer overflow from an adjacent pointer. This write
+// would then corrupt the free list's previous and next pointers, leading to a
+// crash. In order to detect this case, we take advantage of the fact that
+// malloc'd pointers are known to be at least 16 byte aligned, and thus have
+// at least 4 trailing zero bits.
+//
+// When an entry is added to the free list, a checksum of the previous and next
+// pointers is calculated and written to the low four bits of the respective
+// pointers. Upon detection of an invalid checksum, an error is logged and NULL
+// is returned. Since all code which un-checksums pointers checks for a NULL
+// return, a potentially crashing or malicious dereference is avoided at the
+// cost of leaking the corrupted block, and any subsequent blocks on the free
+// list of that size.
+
+static NOINLINE void
+free_list_checksum_botch(szone_t *szone, free_list_t *ptr)
+{
+ szone_error(szone, 1, "incorrect checksum for freed object "
+ "- object was probably modified after being freed.", ptr, NULL);
+}
+
+static INLINE uintptr_t free_list_gen_checksum(uintptr_t ptr)
+{
+ uint8_t chk;
+
+ chk = (unsigned char)(ptr >> 0);
+ chk += (unsigned char)(ptr >> 8);
+ chk += (unsigned char)(ptr >> 16);
+ chk += (unsigned char)(ptr >> 24);
+#if __LP64__
+ chk += (unsigned char)(ptr >> 32);
+ chk += (unsigned char)(ptr >> 40);
+ chk += (unsigned char)(ptr >> 48);
+ chk += (unsigned char)(ptr >> 56);
+#endif
+
+ return chk & (uintptr_t)0xF;
+}
+
+static INLINE uintptr_t
+free_list_checksum_ptr(szone_t *szone, void *ptr)
+{
+ uintptr_t p = (uintptr_t)ptr;
+ return p | free_list_gen_checksum(p ^ szone->cookie);
+}
+
+static INLINE void *
+free_list_unchecksum_ptr(szone_t *szone, ptr_union *ptr)
+{
+ ptr_union p;
+ p.u = (ptr->u >> 4) << 4;
+
+ if ((ptr->u & (uintptr_t)0xF) != free_list_gen_checksum(p.u ^ szone->cookie))
+ {
+ free_list_checksum_botch(szone, (free_list_t *)ptr);
+ return NULL;
+ }
+ return p.p;
+}
+
+static unsigned
+free_list_count(szone_t *szone, free_list_t *ptr)
+{
+ unsigned count = 0;
+
+ while (ptr) {
+ count++;
+ ptr = free_list_unchecksum_ptr(szone, &ptr->next);
+ }
+ return count;
+}
+
+static INLINE void
+recirc_list_extract(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node)
+{
+ // excise node from list
+ if (NULL == node->prev)
+ mag_ptr->firstNode = node->next;
+ else
+ node->prev->next = node->next;
+
+ if (NULL == node->next)
+ mag_ptr->lastNode = node->prev;
+ else
+ node->next->prev = node->prev;
+
+ mag_ptr->recirculation_entries--;
+}
+
+static INLINE void
+recirc_list_splice_last(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node)
+{
+ if (NULL == mag_ptr->lastNode) {
+ mag_ptr->firstNode = node;
+ node->prev = NULL;
+ } else {
+ node->prev = mag_ptr->lastNode;
+ mag_ptr->lastNode->next = node;
+ }
+ mag_ptr->lastNode = node;
+ node->next = NULL;
+ node->recirc_suitable = FALSE;
+ mag_ptr->recirculation_entries++;
+}
+
+static INLINE void
+recirc_list_splice_first(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node)
+{
+ if (NULL == mag_ptr->firstNode) {
+ mag_ptr->lastNode = node;
+ node->next = NULL;
+ } else {
+ node->next = mag_ptr->firstNode;
+ mag_ptr->firstNode->prev = node;
+ }
+ mag_ptr->firstNode = node;
+ node->prev = NULL;
+ node->recirc_suitable = FALSE;
+ mag_ptr->recirculation_entries++;
+}
+
+/* Macros used to manipulate the uint32_t quantity mag_bitmap. */
+
+/* BITMAPV variants are used by tiny. */
+#if defined(__LP64__)
+// assert(NUM_SLOTS == 64) in which case (slot >> 5) is either 0 or 1
+#define BITMAPV_SET(bitmap,slot) (bitmap[(slot) >> 5] |= 1 << ((slot) & 31))
+#define BITMAPV_CLR(bitmap,slot) (bitmap[(slot) >> 5] &= ~ (1 << ((slot) & 31)))
+#define BITMAPV_BIT(bitmap,slot) ((bitmap[(slot) >> 5] >> ((slot) & 31)) & 1)
+#define BITMAPV_CTZ(bitmap) (__builtin_ctzl(bitmap))
+#else
+// assert(NUM_SLOTS == 32) in which case (slot >> 5) is always 0, so code it that way
+#define BITMAPV_SET(bitmap,slot) (bitmap[0] |= 1 << (slot))
+#define BITMAPV_CLR(bitmap,slot) (bitmap[0] &= ~ (1 << (slot)))
+#define BITMAPV_BIT(bitmap,slot) ((bitmap[0] >> (slot)) & 1)
+#define BITMAPV_CTZ(bitmap) (__builtin_ctz(bitmap))
+#endif
+
+/* BITMAPN is used by small. (slot >> 5) takes on values from 0 to 7. */
+#define BITMAPN_SET(bitmap,slot) (bitmap[(slot) >> 5] |= 1 << ((slot) & 31))
+#define BITMAPN_CLR(bitmap,slot) (bitmap[(slot) >> 5] &= ~ (1 << ((slot) & 31)))
+#define BITMAPN_BIT(bitmap,slot) ((bitmap[(slot) >> 5] >> ((slot) & 31)) & 1)
+
+/* returns bit # of least-significant one bit, starting at 0 (undefined if !bitmap) */
+#define BITMAP32_CTZ(bitmap) (__builtin_ctz(bitmap[0]))
+
+/********************* TINY FREE LIST UTILITIES ************************/
+
+// We encode the meta-headers as follows:
+// Each quantum has an associated set of 2 bits:
+// block_header when 1 says this block is the beginning of a block
+// in_use when 1 says this block is in use
+// so a block in use of size 3 is 1-1 0-X 0-X
+// for a free block TINY_FREE_SIZE(ptr) carries the size and the bits are 1-0 X-X X-X
+// for a block middle the bits are 0-0
+
+// We store the meta-header bit arrays by interleaving them 32 bits at a time.
+// Initial 32 bits of block_header, followed by initial 32 bits of in_use, followed
+// by next 32 bits of block_header, followed by next 32 bits of in_use, etc.
+// This localizes memory references thereby reducing cache and TLB pressures.
+
+static INLINE void
+BITARRAY_SET(uint32_t *bits, msize_t index)
+{
+ // index >> 5 identifies the uint32_t to manipulate in the conceptually contiguous bits array
+ // (index >> 5) << 1 identifies the uint32_t allowing for the actual interleaving
+ bits[(index >> 5) << 1] |= (1 << (index & 31));
+}
+
+static INLINE void
+BITARRAY_CLR(uint32_t *bits, msize_t index)
+{
+ bits[(index >> 5) << 1] &= ~(1 << (index & 31));
+}
+
+static INLINE boolean_t
+BITARRAY_BIT(uint32_t *bits, msize_t index)
+{
+ return ((bits[(index >> 5) << 1]) >> (index & 31)) & 1;
+}
+
+#if 0
+static INLINE void bitarray_mclr(uint32_t *bits, unsigned start, unsigned end) ALWAYSINLINE;
+
+static INLINE void
+bitarray_mclr(uint32_t *bits, unsigned start, unsigned end)
+{
+ // start >> 5 identifies the uint32_t to manipulate in the conceptually contiguous bits array
+ // (start >> 5) << 1 identifies the uint32_t allowing for the actual interleaving
+ uint32_t *addr = bits + ((start >> 5) << 1);
+
+ uint32_t span = end - start;
+ start = start & 31;
+ end = start + span;
+
+ if (end > 31) {
+ addr[0] &= (0xFFFFFFFFU >> (31 - start)) >> 1;
+ addr[2] &= (0xFFFFFFFFU << (end - 32));
+ } else {
+ unsigned mask = (0xFFFFFFFFU >> (31 - start)) >> 1;
+ mask |= (0xFFFFFFFFU << end);
+ addr[0] &= mask;
+ }
+}
+#endif
+
+/*
+ * Obtain the size of a free tiny block (in msize_t units).
+ */
+static msize_t
+get_tiny_free_size(const void *ptr)
+{
+ void *next_block = (void *)((uintptr_t)ptr + TINY_QUANTUM);
+ void *region_end = TINY_REGION_END(TINY_REGION_FOR_PTR(ptr));
+
+ // check whether the next block is outside the tiny region or a block header
+ // if so, then the size of this block is one, and there is no stored size.
+ if (next_block < region_end)
+ {
+ uint32_t *next_header = TINY_BLOCK_HEADER_FOR_PTR(next_block);
+ msize_t next_index = TINY_INDEX_FOR_PTR(next_block);
+
+ if (!BITARRAY_BIT(next_header, next_index))
+ return TINY_FREE_SIZE(ptr);
+ }
+ return 1;
+}
+
+/*
+ * Get the size of the previous free block, which is stored in the last two
+ * bytes of the block. If the previous block is not free, then the result is
+ * undefined.
+ */
+static msize_t
+get_tiny_previous_free_msize(const void *ptr)
+{
+ // check whether the previous block is in the tiny region and a block header
+ // if so, then the size of the previous block is one, and there is no stored
+ // size.
+ if (ptr != TINY_REGION_FOR_PTR(ptr))
+ {
+ void *prev_block = (void *)((uintptr_t)ptr - TINY_QUANTUM);
+ uint32_t *prev_header = TINY_BLOCK_HEADER_FOR_PTR(prev_block);
+ msize_t prev_index = TINY_INDEX_FOR_PTR(prev_block);
+ if (BITARRAY_BIT(prev_header, prev_index))
+ return 1;
+ return TINY_PREVIOUS_MSIZE(ptr);
+ }
+ // don't read possibly unmapped memory before the beginning of the region
+ return 0;
+}
+
+static INLINE msize_t
+get_tiny_meta_header(const void *ptr, boolean_t *is_free)
+{
+ // returns msize and is_free
+ // may return 0 for the msize component (meaning 65536)
+ uint32_t *block_header;
+ msize_t index;
+
+ block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ index = TINY_INDEX_FOR_PTR(ptr);
+
+ msize_t midx = (index >> 5) << 1;
+ uint32_t mask = 1 << (index & 31);
+ *is_free = 0;
+ if (0 == (block_header[midx] & mask)) // if (!BITARRAY_BIT(block_header, index))
+ return 0;
+ if (0 == (block_header[midx + 1] & mask)) { // if (!BITARRAY_BIT(in_use, index))
+ *is_free = 1;
+ return get_tiny_free_size(ptr);
+ }
+
+ // index >> 5 identifies the uint32_t to manipulate in the conceptually contiguous bits array
+ // (index >> 5) << 1 identifies the uint32_t allowing for the actual interleaving
+#if defined(__LP64__)
+ // The return value, msize, is computed as the distance to the next 1 bit in block_header.
+ // That's guaranteed to be somewhwere in the next 64 bits. And those bits could span three
+ // uint32_t block_header elements. Collect the bits into a single uint64_t and measure up with ffsl.
+ uint32_t *addr = ((uint32_t *)block_header) + ((index >> 5) << 1);
+ uint32_t bitidx = index & 31;
+ uint64_t word_lo = addr[0];
+ uint64_t word_mid = addr[2];
+ uint64_t word_hi = addr[4];
+ uint64_t word_lomid = (word_lo >> bitidx) | (word_mid << (32 - bitidx));
+ uint64_t word = bitidx ? word_lomid | (word_hi << (64 - bitidx)) : word_lomid;
+ uint32_t result = __builtin_ffsl(word >> 1);
+#else
+ // The return value, msize, is computed as the distance to the next 1 bit in block_header.
+ // That's guaranteed to be somwhwere in the next 32 bits. And those bits could span two
+ // uint32_t block_header elements. Collect the bits into a single uint32_t and measure up with ffs.
+ uint32_t *addr = ((uint32_t *)block_header) + ((index >> 5) << 1);
+ uint32_t bitidx = index & 31;
+ uint32_t word = bitidx ? (addr[0] >> bitidx) | (addr[2] << (32 - bitidx)) : addr[0];
+ uint32_t result = __builtin_ffs(word >> 1);
+#endif
+ return result;
+}
+
+static INLINE void
+set_tiny_meta_header_in_use(const void *ptr, msize_t msize)
+{
+ uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ msize_t index = TINY_INDEX_FOR_PTR(ptr);
+ msize_t clr_msize = msize - 1;
+ msize_t midx = (index >> 5) << 1;
+ uint32_t val = (1 << (index & 31));
+
+#if DEBUG_MALLOC
+ if (msize >= NUM_TINY_SLOTS)
+ malloc_printf("set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr, msize);
+ if ((unsigned)index + (unsigned)msize > 0x10000)
+ malloc_printf("set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr, msize);
+#endif
+
+ block_header[midx] |= val; // BITARRAY_SET(block_header, index);
+ block_header[midx + 1] |= val; // BITARRAY_SET(in_use, index);
+
+ // bitarray_mclr(block_header, index, end_bit);
+ // bitarray_mclr(in_use, index, end_bit);
+
+ index++;
+ midx = (index >> 5) << 1;
+
+ unsigned start = index & 31;
+ unsigned end = start + clr_msize;
+
+#if defined(__LP64__)
+ if (end > 63) {
+ unsigned mask0 = (0xFFFFFFFFU >> (31 - start)) >> 1;
+ unsigned mask1 = (0xFFFFFFFFU << (end - 64));
+ block_header[midx + 0] &= mask0; // clear header
+ block_header[midx + 1] &= mask0; // clear in_use
+ block_header[midx + 2] = 0; // clear header
+ block_header[midx + 3] = 0; // clear in_use
+ block_header[midx + 4] &= mask1; // clear header
+ block_header[midx + 5] &= mask1; // clear in_use
+ } else
+#endif
+ if (end > 31) {
+ unsigned mask0 = (0xFFFFFFFFU >> (31 - start)) >> 1;
+ unsigned mask1 = (0xFFFFFFFFU << (end - 32));
+ block_header[midx + 0] &= mask0;
+ block_header[midx + 1] &= mask0;
+ block_header[midx + 2] &= mask1;
+ block_header[midx + 3] &= mask1;
+ } else {
+ unsigned mask = (0xFFFFFFFFU >> (31 - start)) >> 1;
+ mask |= (0xFFFFFFFFU << end);
+ block_header[midx + 0] &= mask;
+ block_header[midx + 1] &= mask;
+ }
+
+ // we set the block_header bit for the following block to reaffirm next block is a block
+ index += clr_msize;
+ midx = (index >> 5) << 1;
+ val = (1 << (index & 31));
+ block_header[midx] |= val; // BITARRAY_SET(block_header, (index+clr_msize));
+#if DEBUG_MALLOC
+ {
+ boolean_t ff;
+ msize_t mf;
+
+ mf = get_tiny_meta_header(ptr, &ff);
+ if (msize != mf) {
+ malloc_printf("setting header for tiny in_use %p : %d\n", ptr, msize);
+ malloc_printf("reading header for tiny %p : %d %d\n", ptr, mf, ff);
+ }
+ }
+#endif
+}
+
+static INLINE void
+set_tiny_meta_header_in_use_1(const void *ptr) // As above with msize == 1
+{
+ uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ msize_t index = TINY_INDEX_FOR_PTR(ptr);
+ msize_t midx = (index >> 5) << 1;
+ uint32_t val = (1 << (index & 31));
+
+ block_header[midx] |= val; // BITARRAY_SET(block_header, index);
+ block_header[midx + 1] |= val; // BITARRAY_SET(in_use, index);
+
+ index++;
+ midx = (index >> 5) << 1;
+ val = (1 << (index & 31));
+
+ block_header[midx] |= val; // BITARRAY_SET(block_header, (index+clr_msize))
+}
+
+static INLINE void
+set_tiny_meta_header_middle(const void *ptr)
+{
+ // indicates this block is in the middle of an in use block
+ uint32_t *block_header;
+ uint32_t *in_use;
+ msize_t index;
+
+ block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ in_use = TINY_INUSE_FOR_HEADER(block_header);
+ index = TINY_INDEX_FOR_PTR(ptr);
+
+ BITARRAY_CLR(block_header, index);
+ BITARRAY_CLR(in_use, index);
+}
+
+static INLINE void
+set_tiny_meta_header_free(const void *ptr, msize_t msize)
+{
+ // !msize is acceptable and means 65536
+ uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ msize_t index = TINY_INDEX_FOR_PTR(ptr);
+ msize_t midx = (index >> 5) << 1;
+ uint32_t val = (1 << (index & 31));
+
+#if DEBUG_MALLOC
+ if ((unsigned)index + (unsigned)msize > 0x10000) {
+ malloc_printf("setting header for tiny free %p msize too large: %d\n", ptr, msize);
+ }
+#endif
+
+ block_header[midx] |= val; // BITARRAY_SET(block_header, index);
+ block_header[midx + 1] &= ~val; // BITARRAY_CLR(in_use, index);
+
+ // mark the end of this block if msize is > 1. For msize == 0, the whole
+ // region is free, so there is no following block. For msize == 1, there is
+ // no space to write the size on 64 bit systems. The size for 1 quantum
+ // blocks is computed from the metadata bitmaps.
+ if (msize > 1) {
+ void *follower = FOLLOWING_TINY_PTR(ptr, msize);
+ TINY_PREVIOUS_MSIZE(follower) = msize;
+ TINY_FREE_SIZE(ptr) = msize;
+ }
+ if (msize == 0) {
+ TINY_FREE_SIZE(ptr) = msize;
+ }
+#if DEBUG_MALLOC
+ boolean_t ff;
+ msize_t mf = get_tiny_meta_header(ptr, &ff);
+ if ((msize != mf) || !ff) {
+ malloc_printf("setting header for tiny free %p : %u\n", ptr, msize);
+ malloc_printf("reading header for tiny %p : %u %u\n", ptr, mf, ff);
+ }
+#endif
+}
+
+static INLINE boolean_t
+tiny_meta_header_is_free(const void *ptr)
+{
+ uint32_t *block_header;
+ uint32_t *in_use;
+ msize_t index;
+
+ block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ in_use = TINY_INUSE_FOR_HEADER(block_header);
+ index = TINY_INDEX_FOR_PTR(ptr);
+ if (!BITARRAY_BIT(block_header, index))
+ return 0;
+ return !BITARRAY_BIT(in_use, index);
+}
+
+static INLINE void *
+tiny_previous_preceding_free(void *ptr, msize_t *prev_msize)
+{
+ // returns the previous block, assuming and verifying it's free
+ uint32_t *block_header;
+ uint32_t *in_use;
+ msize_t index;
+ msize_t previous_msize;
+ msize_t previous_index;
+ void *previous_ptr;
+
+ block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ in_use = TINY_INUSE_FOR_HEADER(block_header);
+ index = TINY_INDEX_FOR_PTR(ptr);
+
+ if (!index)
+ return NULL;
+ if ((previous_msize = get_tiny_previous_free_msize(ptr)) > index)
+ return NULL;
+
+ previous_index = index - previous_msize;
+ previous_ptr = (void *)((uintptr_t)TINY_REGION_FOR_PTR(ptr) + TINY_BYTES_FOR_MSIZE(previous_index));
+ if (!BITARRAY_BIT(block_header, previous_index))
+ return NULL;
+ if (BITARRAY_BIT(in_use, previous_index))
+ return NULL;
+ if (get_tiny_free_size(previous_ptr) != previous_msize)
+ return NULL;
+
+ // conservative check did match true check
+ *prev_msize = previous_msize;
+ return previous_ptr;
+}
+
+/*
+ * Adds an item to the proper free list, and also marks the meta-header of the
+ * block properly.
+ * Assumes szone has been locked
+ */
+static void
+tiny_free_list_add_ptr(szone_t *szone, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize)
+{
+ grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1;
+ free_list_t *free_ptr = ptr;
+ free_list_t *free_head = tiny_mag_ptr->mag_free_list[slot];
+
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
+ }
+ if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) {
+ szone_error(szone, 1, "tiny_free_list_add_ptr: Unaligned ptr", ptr, NULL);
+ }
+#endif
+ set_tiny_meta_header_free(ptr, msize);
+ if (free_head) {
+#if DEBUG_MALLOC
+ if (free_list_unchecksum_ptr(szone, &free_head->previous)) {
+ szone_error(szone, 1, "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr,
+ "ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, (void *)free_head, free_head->previous.p);
+ }
+ if (! tiny_meta_header_is_free(free_head)) {
+ szone_error(szone, 1, "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr,
+ "ptr=%p slot=%d free_head=%p\n", ptr, slot, (void *)free_head);
+ }
+#endif
+ free_head->previous.u = free_list_checksum_ptr(szone, free_ptr);
+ } else {
+ BITMAPV_SET(tiny_mag_ptr->mag_bitmap, slot);
+ }
+ free_ptr->previous.u = free_list_checksum_ptr(szone, NULL);
+ free_ptr->next.u = free_list_checksum_ptr(szone, free_head);
+
+ tiny_mag_ptr->mag_free_list[slot] = free_ptr;
+}
+
+/*
+ * Removes the item pointed to by ptr in the proper free list.
+ * Assumes szone has been locked
+ */
+static void
+tiny_free_list_remove_ptr(szone_t *szone, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize)
+{
+ grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1;
+ free_list_t *free_ptr = ptr, *next, *previous;
+
+ next = free_list_unchecksum_ptr(szone, &free_ptr->next);
+ previous = free_list_unchecksum_ptr(szone, &free_ptr->previous);
+
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
+ }
+#endif
+ if (!previous) {
+ // The block to remove is the head of the free list
+#if DEBUG_MALLOC
+ if (tiny_mag_ptr->mag_free_list[slot] != ptr) {
+ szone_error(szone, 1, "tiny_free_list_remove_ptr: Internal invariant broken (tiny_mag_ptr->mag_free_list[slot])", ptr,
+ "ptr=%p slot=%d msize=%d tiny_mag_ptr->mag_free_list[slot]=%p\n",
+ ptr, slot, msize, (void *)tiny_mag_ptr->mag_free_list[slot]);
+ return;
+ }
+#endif
+ tiny_mag_ptr->mag_free_list[slot] = next;
+ if (!next) BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot);
+ } else {
+ // We know free_ptr is already checksummed, so we don't need to do it
+ // again.
+ previous->next = free_ptr->next;
+ }
+ if (next) {
+ // We know free_ptr is already checksummed, so we don't need to do it
+ // again.
+ next->previous = free_ptr->previous;
+ }
+}
+
+/*
+ * tiny_region_for_ptr_no_lock - Returns the tiny region containing the pointer,
+ * or NULL if not found.
+ */
+static INLINE region_t
+tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr)
+{
+ rgnhdl_t r = hash_lookup_region_no_lock(szone->tiny_region_generation->hashed_regions,
+ szone->tiny_region_generation->num_regions_allocated,
+ szone->tiny_region_generation->num_regions_allocated_shift,
+ TINY_REGION_FOR_PTR(ptr));
+ return r ? *r : r;
+}
+
+static void
+tiny_finalize_region(szone_t *szone, magazine_t *tiny_mag_ptr) {
+ void *last_block, *previous_block;
+ uint32_t *last_header;
+ msize_t last_msize, previous_msize, last_index;
+
+ last_block = (void *)
+ ((uintptr_t)TINY_REGION_END(tiny_mag_ptr->mag_last_region) - tiny_mag_ptr->mag_bytes_free_at_end);
+ last_msize = TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_end);
+ last_header = TINY_BLOCK_HEADER_FOR_PTR(last_block);
+ last_index = TINY_INDEX_FOR_PTR(last_block);
+
+ // Before anything we transform any remaining mag_bytes_free_at_end into a
+ // regular free block. We take special care here to update the bitfield
+ // information, since we are bypassing the normal free codepath. If there
+ // is more than one quanta worth of memory in mag_bytes_free_at_end, then
+ // there will be two block headers:
+ // 1) header for the free space at end, msize = 1
+ // 2) header inserted by set_tiny_meta_header_in_use after block
+ // We must clear the second one so that when the free block's size is
+ // queried, we do not think the block is only 1 quantum in size because
+ // of the second set header bit.
+ if (last_index != (NUM_TINY_BLOCKS - 1))
+ BITARRAY_CLR(last_header, (last_index + 1));
+
+ // It is possible that the block prior to the last block in the region has
+ // been free'd, but was not coalesced with the free bytes at the end of the
+ // block, since we treat the bytes at the end of the region as "in use" in
+ // the meta headers. Attempt to coalesce the last block with the previous
+ // block, so we don't violate the "no consecutive free blocks" invariant.
+ //
+ // FIXME: Need to investigate how much work would be required to increase
+ // 'mag_bytes_free_at_end' when freeing the preceding block, rather
+ // than performing this workaround.
+ //
+ previous_block = tiny_previous_preceding_free(last_block, &previous_msize);
+ if (previous_block) {
+ set_tiny_meta_header_middle(last_block);
+ tiny_free_list_remove_ptr(szone, tiny_mag_ptr, previous_block, previous_msize);
+ last_block = previous_block;
+ last_msize += previous_msize;
+ }
+
+ // splice last_block into the free list
+ tiny_free_list_add_ptr(szone, tiny_mag_ptr, last_block, last_msize);
+ tiny_mag_ptr->mag_bytes_free_at_end = 0;
+ tiny_mag_ptr->mag_last_region = NULL;
+}
+
+static int
+tiny_free_detach_region(szone_t *szone, magazine_t *tiny_mag_ptr, region_t r) {
+ uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r);
+ uintptr_t current = start;
+ uintptr_t limit = (uintptr_t)TINY_REGION_END(r);
+ boolean_t is_free;
+ msize_t msize;
+ int total_alloc = 0;
+
+ while (current < limit) {
+ msize = get_tiny_meta_header((void *)current, &is_free);
+ if (is_free && !msize && (current == start)) {
+ // first block is all free
+ break;
+ }
+ if (!msize) {
+#if DEBUG_MALLOC
+ malloc_printf("*** tiny_free_detach_region error with %p: msize=%d is_free =%d\n",
+ (void *)current, msize, is_free);
+#endif
+ break;
+ }
+ if (is_free) {
+ tiny_free_list_remove_ptr(szone, tiny_mag_ptr, (void *)current, msize);
+ } else {
+ total_alloc++;
+ }
+ current += TINY_BYTES_FOR_MSIZE(msize);
+ }
+ return total_alloc;
+}
+
+static size_t
+tiny_free_reattach_region(szone_t *szone, magazine_t *tiny_mag_ptr, region_t r) {
+ uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r);
+ uintptr_t current = start;
+ uintptr_t limit = (uintptr_t)TINY_REGION_END(r);
+ boolean_t is_free;
+ msize_t msize;
+ size_t total_alloc = 0;
+
+ while (current < limit) {
+ msize = get_tiny_meta_header((void *)current, &is_free);
+ if (is_free && !msize && (current == start)) {
+ // first block is all free
+ break;
+ }
+ if (!msize) {
+#if DEBUG_MALLOC
+ malloc_printf("*** tiny_free_reattach_region error with %p: msize=%d is_free =%d\n",
+ (void *)current, msize, is_free);
+#endif
+ break;
+ }
+ if (is_free) {
+ tiny_free_list_add_ptr(szone, tiny_mag_ptr, (void *)current, msize);
+ } else {
+ total_alloc += TINY_BYTES_FOR_MSIZE(msize);
+ }
+ current += TINY_BYTES_FOR_MSIZE(msize);
+ }
+ return total_alloc;
+}
+
+static void
+tiny_free_scan_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r) {
+ uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r);
+ uintptr_t current = start;
+ uintptr_t limit = (uintptr_t)TINY_REGION_END(r);
+ boolean_t is_free;
+ msize_t msize;
+ boolean_t did_advise = FALSE;
+
+ // Scan the metadata identifying blocks which span one or more pages. Mark the pages MADV_FREE taking care to preserve free list
+ // management data.
+ while (current < limit) {
+ msize = get_tiny_meta_header((void *)current, &is_free);
+ if (is_free && !msize && (current == start)) {
+ // first block is all free
+#if DEBUG_MALLOC
+ malloc_printf("*** tiny_free_scan_madvise_free first block is all free! %p: msize=%d is_free =%d\n",
+ (void *)current, msize, is_free);
+#endif
+ uintptr_t pgLo = round_page(start + sizeof(free_list_t) + sizeof(msize_t));
+ uintptr_t pgHi = trunc_page(start + TINY_REGION_SIZE - sizeof(msize_t));
+
+ if (pgLo < pgHi) {
+ madvise_free_range(szone, r, pgLo, pgHi);
+ did_advise = TRUE;
+ }
+ break;
+ }
+ if (!msize) {
+#if DEBUG_MALLOC
+ malloc_printf("*** tiny_free_scan_madvise_free error with %p: msize=%d is_free =%d\n",
+ (void *)current, msize, is_free);
+#endif
+ break;
+ }
+ if (is_free) {
+ uintptr_t pgLo = round_page(current + sizeof(free_list_t) + sizeof(msize_t));
+ uintptr_t pgHi = trunc_page(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t));
+
+ if (pgLo < pgHi) {
+ madvise_free_range(szone, r, pgLo, pgHi);
+ did_advise = TRUE;
+ }
+ }
+ current += TINY_BYTES_FOR_MSIZE(msize);
+ }
+
+ if (did_advise) {
+ /* Move the node to the tail of the Deopt's recirculation list to delay its re-use. */
+ region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(r);
+ recirc_list_extract(szone, depot_ptr, node); // excise node from list
+ recirc_list_splice_last(szone, depot_ptr, node); // connect to magazine as last node
+ }
+}
+
+static void
+tiny_free_try_depot_unmap_no_lock(szone_t *szone, magazine_t *depot_ptr, region_trailer_t *node)
+{
+#warning Tune Depot headroom
+ if (0 < node->bytes_used ||
+ depot_ptr->recirculation_entries < (szone->num_tiny_magazines * 2)) {
+ return;
+ }
+
+ // disconnect node from Depot
+ recirc_list_extract(szone, depot_ptr, node);
+
+ // Iterate the region pulling its free entries off the (locked) Depot's free list
+ region_t sparse_region = TINY_REGION_FOR_PTR(node);
+ int objects_in_use = tiny_free_detach_region(szone, depot_ptr, sparse_region);
+
+ if (0 == objects_in_use) {
+ // Invalidate the hash table entry for this region with HASHRING_REGION_DEALLOCATED.
+ // Using HASHRING_REGION_DEALLOCATED preserves the collision chain, using HASHRING_OPEN_ENTRY (0) would not.
+ rgnhdl_t pSlot = hash_lookup_region_no_lock(szone->tiny_region_generation->hashed_regions,
+ szone->tiny_region_generation->num_regions_allocated,
+ szone->tiny_region_generation->num_regions_allocated_shift, sparse_region);
+ *pSlot = HASHRING_REGION_DEALLOCATED;
+ depot_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES;
+#if ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) /* GCC 4.1 and forward supports atomic builtins */
+ __sync_fetch_and_add( &(szone->num_tiny_regions_dealloc), 1); // Atomically increment num_tiny_regions_dealloc
+#else
+#ifdef __LP64__
+ OSAtomicIncrement64( (volatile int64_t *)&(szone->num_tiny_regions_dealloc) );
+#else
+ OSAtomicIncrement32( (volatile int32_t *)&(szone->num_tiny_regions_dealloc) );
+#endif
+#endif
+
+ // Transfer ownership of the region back to the OS
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, depot_ptr); // Avoid denial of service to Depot while in kernel
+ deallocate_pages(szone, sparse_region, TINY_REGION_SIZE, 0);
+ SZONE_MAGAZINE_PTR_LOCK(szone, depot_ptr);
+
+ MAGMALLOC_DEALLOCREGION((void *)szone, (void *)sparse_region); // DTrace USDT Probe
+
+ } else {
+ szone_error(szone, 1, "tiny_free_try_depot_unmap_no_lock objects_in_use not zero:", NULL, "%d\n", objects_in_use);
+ }
+}
+
+static void
+tiny_free_do_recirc_to_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index)
+{
+ // The entire magazine crossed the "emptiness threshold". Transfer a region
+ // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e
+ // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list.
+ region_trailer_t *node = tiny_mag_ptr->firstNode;
+
+ while (node && !node->recirc_suitable) {
+ node = node->next;
+ }
+
+ if (NULL == node) {
+#if DEBUG_MALLOC
+ malloc_printf("*** tiny_free_do_recirc_to_depot end of list\n");
+#endif
+ return;
+ }
+
+ region_t sparse_region = TINY_REGION_FOR_PTR(node);
+
+ // Deal with unclaimed memory -- mag_bytes_free_at_end
+ if (sparse_region == tiny_mag_ptr->mag_last_region && tiny_mag_ptr->mag_bytes_free_at_end) {
+ tiny_finalize_region(szone, tiny_mag_ptr);
+ }
+
+ // disconnect "suitable" node from magazine
+ recirc_list_extract(szone, tiny_mag_ptr, node);
+
+ // Iterate the region pulling its free entries off its (locked) magazine's free list
+ int objects_in_use = tiny_free_detach_region(szone, tiny_mag_ptr, sparse_region);
+ magazine_t *depot_ptr = &(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX]);
+
+ // hand over the region to the (locked) Depot
+ SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr);
+ // this will cause tiny_free_list_add_ptr called by tiny_free_reattach_region to use
+ // the depot as its target magazine, rather than magazine formerly associated with sparse_region
+ MAGAZINE_INDEX_FOR_TINY_REGION(sparse_region) = DEPOT_MAGAZINE_INDEX;
+
+ // Iterate the region putting its free entries on Depot's free list
+ size_t bytes_inplay = tiny_free_reattach_region(szone, depot_ptr, sparse_region);
+
+ tiny_mag_ptr->mag_num_bytes_in_objects -= bytes_inplay;
+ tiny_mag_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES;
+ tiny_mag_ptr->mag_num_objects -= objects_in_use;
+
+ depot_ptr->mag_num_bytes_in_objects += bytes_inplay;
+ depot_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES;
+ depot_ptr->mag_num_objects += objects_in_use;
+
+ // connect to Depot as first (MRU) node
+ recirc_list_splice_first(szone, depot_ptr, node);
+
+ MAGMALLOC_RECIRCREGION((void *)szone, (int)mag_index, (int)BYTES_USED_FOR_TINY_REGION(sparse_region)); // DTrace USDT Probe
+
+ // Mark free'd dirty pages with MADV_FREE to reduce memory pressure
+ tiny_free_scan_madvise_free(szone, depot_ptr, sparse_region);
+
+ // If the region is entirely empty vm_deallocate() it
+ tiny_free_try_depot_unmap_no_lock(szone, depot_ptr, node);
+
+ SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
+}
+
+static boolean_t
+tiny_get_region_from_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index)
+{
+ magazine_t *depot_ptr = &(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX]);
+
+ /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */
+ if (szone->num_tiny_magazines == 1) // Uniprocessor, single magazine, so no recirculation necessary
+ return 0;
+
+#if DEBUG_MALLOC
+ if (DEPOT_MAGAZINE_INDEX == mag_index) {
+ szone_error(szone, 1, "tiny_get_region_from_depot called for magazine index -1", NULL, NULL);
+ return 0;
+ }
+#endif
+
+ SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr);
+
+ // Appropriate one of the Depot's regions. Prefer LIFO selection for best cache utilization.
+ region_trailer_t *node = depot_ptr->firstNode;
+
+ if (NULL == node) { // Depot empty?
+ SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
+ return 0;
+ }
+
+ // disconnect first node from Depot
+ recirc_list_extract(szone, depot_ptr, node);
+
+ // Iterate the region pulling its free entries off the (locked) Depot's free list
+ region_t sparse_region = TINY_REGION_FOR_PTR(node);
+ int objects_in_use = tiny_free_detach_region(szone, depot_ptr, sparse_region);
+
+ // Transfer ownership of the region
+ MAGAZINE_INDEX_FOR_TINY_REGION(sparse_region) = mag_index;
+
+ // Iterate the region putting its free entries on its new (locked) magazine's free list
+ size_t bytes_inplay = tiny_free_reattach_region(szone, tiny_mag_ptr, sparse_region);
+
+ depot_ptr->mag_num_bytes_in_objects -= bytes_inplay;
+ depot_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES;
+ depot_ptr->mag_num_objects -= objects_in_use;
+
+ tiny_mag_ptr->mag_num_bytes_in_objects += bytes_inplay;
+ tiny_mag_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES;
+ tiny_mag_ptr->mag_num_objects += objects_in_use;
+
+ // connect to magazine as first node (it's maximally sparse at this moment)
+ recirc_list_splice_first(szone, tiny_mag_ptr, node);
+
+ SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
+
+ MAGMALLOC_DEPOTREGION((void *)szone, (int)mag_index, (int)BYTES_USED_FOR_TINY_REGION(sparse_region)); // DTrace USDT Probe
+
+ if (-1 == madvise((void *)sparse_region, TINY_REGION_PAYLOAD_BYTES, MADV_FREE_REUSE)) {
+ /* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */
+#if DEBUG_MALLOC
+ szone_error(szone, 1, "tiny_get_region_from_depot madvise(..., MADV_FREE_REUSE) failed", sparse_region, NULL);
+#endif
+ return 0;
+ }
+
+ return 1;
+}
+
+#warning Tune K and f!
+#define K 1.5 // headroom measured in number of 1Mb regions
+#define DENSITY_THRESHOLD(a) \
+ ((a) - ((a) >> 2)) // "Emptiness" f = 0.25, so "Density" is (1 - f)*a. Generally: ((a) - ((a) >> -log2(f)))
+
+static INLINE void
+tiny_free_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, region_t region, void *ptr,
+ msize_t msize)
+{
+ void *original_ptr = ptr;
+ size_t original_size = TINY_BYTES_FOR_MSIZE(msize);
+ void *next_block = ((unsigned char *)ptr + original_size);
+ msize_t previous_msize, next_msize;
+ void *previous;
+ free_list_t *big_free_block;
+ free_list_t *after_next_block;
+ free_list_t *before_next_block;
+ boolean_t did_prepend = FALSE;
+ boolean_t did_append = FALSE;
+
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
+ }
+ if (! msize) {
+ szone_error(szone, 1, "trying to free tiny block that is too small", ptr,
+ "in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
+ }
+#endif
+
+ // We try to coalesce this block with the preceeding one
+ previous = tiny_previous_preceding_free(ptr, &previous_msize);
+ if (previous) {
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr) || LOG(szone,previous)) {
+ malloc_printf("in tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous);
+ }
+#endif
+ did_prepend = TRUE;
+
+ // clear the meta_header since this is no longer the start of a block
+ set_tiny_meta_header_middle(ptr);
+ tiny_free_list_remove_ptr(szone, tiny_mag_ptr, previous, previous_msize);
+ ptr = previous;
+ msize += previous_msize;
+ }
+ // We try to coalesce with the next block
+ if ((next_block < TINY_REGION_END(region)) && tiny_meta_header_is_free(next_block)) {
+ did_append = TRUE;
+ next_msize = get_tiny_free_size(next_block);
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr) || LOG(szone, next_block)) {
+ malloc_printf("in tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n",
+ ptr, msize, next_block, next_msize);
+ }
+#endif
+ // If we are coalescing with the next block, and the next block is in
+ // the last slot of the free list, then we optimize this case here to
+ // avoid removing next_block from the slot (NUM_TINY_SLOTS - 1) and then adding ptr back
+ // to slot (NUM_TINY_SLOTS - 1).
+ if (next_msize >= NUM_TINY_SLOTS) {
+ msize += next_msize;
+
+ big_free_block = (free_list_t *)next_block;
+ after_next_block = free_list_unchecksum_ptr(szone, &big_free_block->next);
+ before_next_block = free_list_unchecksum_ptr(szone, &big_free_block->previous);
+
+ if (!before_next_block) {
+ tiny_mag_ptr->mag_free_list[NUM_TINY_SLOTS-1] = ptr;
+ } else {
+ before_next_block->next.u = free_list_checksum_ptr(szone, ptr);
+ }
+
+ if (after_next_block) {
+ after_next_block->previous.u = free_list_checksum_ptr(szone, ptr);
+ }
+
+ // we don't need to checksum these since they are already checksummed
+ ((free_list_t *)ptr)->previous = big_free_block->previous;
+ ((free_list_t *)ptr)->next = big_free_block->next;
+
+ // clear the meta_header to enable coalescing backwards
+ set_tiny_meta_header_middle(big_free_block);
+ set_tiny_meta_header_free(ptr, msize);
+
+ goto tiny_free_ending;
+ }
+ tiny_free_list_remove_ptr(szone, tiny_mag_ptr, next_block, next_msize);
+ set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards
+ msize += next_msize;
+ }
+#if !TINY_CACHE
+ // The tiny cache already scribbles free blocks as they go through the
+ // cache, so we do not need to do it here.
+ if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize)
+ memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize));
+
+#endif
+ tiny_free_list_add_ptr(szone, tiny_mag_ptr, ptr, msize);
+ tiny_free_ending:
+ // When in proper debug mode we write on the memory to help debug memory smashers
+
+ tiny_mag_ptr->mag_num_objects--;
+ // we use original_size and not msize to avoid double counting the coalesced blocks
+ tiny_mag_ptr->mag_num_bytes_in_objects -= original_size;
+
+ // Update this region's bytes in use count
+ region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(region);
+ size_t bytes_used = node->bytes_used - original_size;
+ node->bytes_used = bytes_used;
+
+ /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */
+ if (szone->num_tiny_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary
+ /* NOTHING */
+ } else if (DEPOT_MAGAZINE_INDEX != mag_index) {
+ // Emptiness discriminant
+ if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) {
+ /* Region has crossed threshold from density to sparsity. Mark it "suitable" on the
+ recirculation candidates list. */
+ node->recirc_suitable = TRUE;
+ } else {
+ /* After this free, we've found the region is still dense, so it must have been even more so before
+ the free. That implies the region is already correctly marked. Do nothing. */
+ }
+
+ // Has the entire magazine crossed the "emptiness threshold"? If so, transfer a region
+ // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e
+ // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list.
+ size_t a = tiny_mag_ptr->num_bytes_in_magazine; // Total bytes allocated to this magazine
+ size_t u = tiny_mag_ptr->mag_num_bytes_in_objects; // In use (malloc'd) from this magaqzine
+
+ if (a - u > ((3 * TINY_REGION_PAYLOAD_BYTES) / 2) && u < DENSITY_THRESHOLD(a))
+ tiny_free_do_recirc_to_depot(szone, tiny_mag_ptr, mag_index);
+
+ } else {
+ // Freed to Depot. N.B. Lock on tiny_magazines[DEPOT_MAGAZINE_INDEX] is already held
+ uintptr_t safe_ptr = (uintptr_t)ptr + sizeof(free_list_t) + sizeof(msize_t);
+ uintptr_t round_safe = round_page(safe_ptr);
+
+ uintptr_t safe_extent = (uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t);
+ uintptr_t trunc_extent = trunc_page(safe_extent);
+
+ // The newly freed block may complete a span of bytes that cover a page. Mark it with MADV_FREE.
+ if (round_safe < trunc_extent) { // Safe area covers a page
+ if (did_prepend & did_append) { // Coalesced preceding with original_ptr *and* with following
+ uintptr_t trunc_safe_prev = trunc_page((uintptr_t)original_ptr - sizeof(msize_t));
+ uintptr_t rnd_safe_follow =
+ round_page((uintptr_t)original_ptr + original_size + sizeof(free_list_t) + sizeof(msize_t));
+
+ madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), MIN(rnd_safe_follow, trunc_extent));
+ } else if (did_prepend) { // Coalesced preceding with original_ptr
+ uintptr_t trunc_safe_prev = trunc_page((uintptr_t)original_ptr - sizeof(msize_t));
+
+ madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), trunc_extent);
+ } else if (did_append) { // Coalesced original_ptr with following
+ uintptr_t rnd_safe_follow =
+ round_page((uintptr_t)original_ptr + original_size + sizeof(free_list_t) + sizeof(msize_t));
+
+ madvise_free_range(szone, region, round_safe, MIN(rnd_safe_follow, trunc_extent));
+ } else { // Isolated free cannot exceed 496 bytes, thus round_safe == trunc_extent, and so never get here.
+ /* madvise_free_range(szone, region, round_safe, trunc_extent); */
+ }
+ }
+
+ if (0 < bytes_used) {
+ /* Depot'd region is still live. Leave it in place on the Depot's recirculation list
+ so as to avoid thrashing between the Depot's free list and a magazines's free list
+ with detach_region/reattach_region */
+ } else {
+ /* Depot'd region is just now empty. Consider return to OS. */
+ region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(region);
+ magazine_t *depot_ptr = &(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX]);
+ tiny_free_try_depot_unmap_no_lock(szone, depot_ptr, node); // FIXME: depot_ptr is simply tiny_mag_ptr?
+ }
+ }
+}
+
+// Allocates from the last region or a freshly allocated region
+static void *
+tiny_malloc_from_region_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize)
+{
+ void *ptr, *aligned_address;
+
+ // Deal with unclaimed memory -- mag_bytes_free_at_end
+ if (tiny_mag_ptr->mag_bytes_free_at_end)
+ tiny_finalize_region(szone, tiny_mag_ptr);
+
+ // time to create a new region
+ aligned_address = allocate_pages(szone, TINY_REGION_SIZE, TINY_BLOCKS_ALIGN, 0, VM_MEMORY_MALLOC_TINY);
+ if (!aligned_address) // out of memory!
+ return NULL;
+
+ MAGMALLOC_ALLOCREGION((void *)szone, (int)mag_index); // DTrace USDT Probe
+
+ // We set the unused bits of the header in the last pair to be all ones, and those of the inuse to zeroes.
+ ((tiny_region_t)aligned_address)->pairs[CEIL_NUM_TINY_BLOCKS_WORDS-1].header =
+ (NUM_TINY_BLOCKS & 31) ? (0xFFFFFFFFU << (NUM_TINY_BLOCKS & 31)) : 0;
+ ((tiny_region_t)aligned_address)->pairs[CEIL_NUM_TINY_BLOCKS_WORDS-1].inuse = 0;
+
+ // Here find the only place in tinyland that (infrequently) takes the tiny_regions_lock.
+ // Only one thread at a time should be permitted to assess the density of the hash
+ // ring and adjust if needed.
+ // Only one thread at a time should be permitted to insert its new region on
+ // the hash ring.
+ // It is safe for all other threads to read the hash ring (hashed_regions) and
+ // the associated sizes (num_regions_allocated and num_tiny_regions).
+
+ LOCK(szone->tiny_regions_lock);
+
+ // Check to see if the hash ring of tiny regions needs to grow. Try to
+ // avoid the hash ring becoming too dense.
+ if (szone->tiny_region_generation->num_regions_allocated < (2 * szone->num_tiny_regions)) {
+ region_t *new_regions;
+ size_t new_size;
+ size_t new_shift = szone->tiny_region_generation->num_regions_allocated_shift; // In/Out parameter
+ new_regions = hash_regions_grow_no_lock(szone, szone->tiny_region_generation->hashed_regions,
+ szone->tiny_region_generation->num_regions_allocated,
+ &new_shift,
+ &new_size);
+ // Do not deallocate the current hashed_regions allocation since someone may
+ // be iterating it. Instead, just leak it.
+
+ // Prepare to advance to the "next generation" of the hash ring.
+ szone->tiny_region_generation->nextgen->hashed_regions = new_regions;
+ szone->tiny_region_generation->nextgen->num_regions_allocated = new_size;
+ szone->tiny_region_generation->nextgen->num_regions_allocated_shift = new_shift;
+
+ // Throw the switch to atomically advance to the next generation.
+ szone->tiny_region_generation = szone->tiny_region_generation->nextgen;
+ // Ensure everyone sees the advance.
+#if ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) /* GCC 4.1 and forward supports atomic builtins */
+ __sync_synchronize();
+#else
+ OSMemoryBarrier();
+#endif
+ }
+ // Tag the region at "aligned_address" as belonging to us,
+ // and so put it under the protection of the magazine lock we are holding.
+ // Do this before advertising "aligned_address" on the hash ring(!)
+ MAGAZINE_INDEX_FOR_TINY_REGION(aligned_address) = mag_index;
+
+ // Insert the new region into the hash ring, and update malloc statistics
+ hash_region_insert_no_lock(szone->tiny_region_generation->hashed_regions,
+ szone->tiny_region_generation->num_regions_allocated,
+ szone->tiny_region_generation->num_regions_allocated_shift,
+ aligned_address);
+
+ szone->num_tiny_regions++;
+ UNLOCK(szone->tiny_regions_lock);
+
+ tiny_mag_ptr->mag_last_region = aligned_address;
+ BYTES_USED_FOR_TINY_REGION(aligned_address) = TINY_BYTES_FOR_MSIZE(msize);
+ ptr = aligned_address;
+ set_tiny_meta_header_in_use(ptr, msize);
+ tiny_mag_ptr->mag_num_objects++;
+ tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(msize);
+ tiny_mag_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES;
+
+ // We put a header on the last block so that it appears in use (for coalescing, etc...)
+ set_tiny_meta_header_in_use_1((void *)((uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(msize)));
+ tiny_mag_ptr->mag_bytes_free_at_end = TINY_BYTES_FOR_MSIZE(NUM_TINY_BLOCKS - msize);
+
+ // connect to magazine as first node (it's maximally sparse at this moment)
+ recirc_list_splice_first(szone, tiny_mag_ptr, REGION_TRAILER_FOR_TINY_REGION(aligned_address));
+
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
+ }
+#endif
+ return ptr;
+}
+
+static INLINE boolean_t
+tiny_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size)
+{
+ // returns 1 on success
+ msize_t index;
+ msize_t old_msize;
+ unsigned next_index;
+ void *next_block;
+ boolean_t is_free;
+ msize_t next_msize, coalesced_msize, leftover_msize;
+ void *leftover;
+
+ index = TINY_INDEX_FOR_PTR(ptr);
+ old_msize = TINY_MSIZE_FOR_BYTES(old_size);
+ next_index = index + old_msize;
+
+ if (next_index >= NUM_TINY_BLOCKS) {
+ return 0;
+ }
+ next_block = (char *)ptr + old_size;
+
+ magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines,
+ REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)),
+ MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)));
+
+ /*
+ * Look for a free block immediately afterwards. If it's large enough, we can consume (part of)
+ * it.
+ */
+ is_free = tiny_meta_header_is_free(next_block);
+ if (!is_free) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone,tiny_mag_ptr);
+ return 0; // next_block is in use;
+ }
+ next_msize = get_tiny_free_size(next_block);
+ if (old_size + TINY_BYTES_FOR_MSIZE(next_msize) < new_size) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone,tiny_mag_ptr);
+ return 0; // even with next block, not enough
+ }
+ /*
+ * The following block is big enough; pull it from its freelist and chop off enough to satisfy
+ * our needs.
+ */
+ tiny_free_list_remove_ptr(szone, tiny_mag_ptr, next_block, next_msize);
+ set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards
+ coalesced_msize = TINY_MSIZE_FOR_BYTES(new_size - old_size + TINY_QUANTUM - 1);
+ leftover_msize = next_msize - coalesced_msize;
+ if (leftover_msize) {
+ /* there's some left, so put the remainder back */
+ leftover = (void *)((uintptr_t)next_block + TINY_BYTES_FOR_MSIZE(coalesced_msize));
+
+ tiny_free_list_add_ptr(szone, tiny_mag_ptr, leftover, leftover_msize);
+ }
+ set_tiny_meta_header_in_use(ptr, old_msize + coalesced_msize);
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_try_realloc_in_place(), ptr=%p, msize=%d\n", ptr, old_msize + coalesced_msize);
+ }
+#endif
+ tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(coalesced_msize);
+
+ // Update this region's bytes in use count
+ region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
+ size_t bytes_used = node->bytes_used + TINY_BYTES_FOR_MSIZE(coalesced_msize);
+ node->bytes_used = bytes_used;
+
+ // Emptiness discriminant
+ if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) {
+ /* After this reallocation the region is still sparse, so it must have been even more so before
+ the reallocation. That implies the region is already correctly marked. Do nothing. */
+ } else {
+ /* Region has crossed threshold from sparsity to density. Mark it not "suitable" on the
+ recirculation candidates list. */
+ node->recirc_suitable = FALSE;
+ }
+
+ SZONE_MAGAZINE_PTR_UNLOCK(szone,tiny_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ return 1;
+}
+
+static boolean_t
+tiny_check_region(szone_t *szone, region_t region)
+{
+ uintptr_t start, ptr, region_end;
+ boolean_t prev_free = 0;
+ boolean_t is_free;
+ msize_t msize;
+ free_list_t *free_head;
+ void *follower, *previous, *next;
+ mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(region);
+ magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
+
+ // Assumes locked
+ CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__);
+
+ /* establish region limits */
+ start = (uintptr_t)TINY_REGION_ADDRESS(region);
+ ptr = start;
+ region_end = (uintptr_t)TINY_REGION_END(region);
+
+ /*
+ * The last region may have a trailing chunk which has not been converted into inuse/freelist
+ * blocks yet.
+ */
+ if (region == tiny_mag_ptr->mag_last_region)
+ region_end -= tiny_mag_ptr->mag_bytes_free_at_end;
+
+ /*
+ * Scan blocks within the region.
+ */
+ while (ptr < region_end) {
+ /*
+ * If the first block is free, and its size is 65536 (msize = 0) then the entire region is
+ * free.
+ */
+ msize = get_tiny_meta_header((void *)ptr, &is_free);
+ if (is_free && !msize && (ptr == start)) {
+ return 1;
+ }
+
+ /*
+ * If the block's size is 65536 (msize = 0) then since we're not the first entry the size is
+ * corrupt.
+ */
+ if (!msize) {
+ malloc_printf("*** invariant broken for tiny block %p this msize=%d - size is too small\n",
+ ptr, msize);
+ return 0;
+ }
+
+ if (!is_free) {
+ /*
+ * In use blocks cannot be more than (NUM_TINY_SLOTS - 1) quanta large.
+ */
+ prev_free = 0;
+ if (msize > (NUM_TINY_SLOTS - 1)) {
+ malloc_printf("*** invariant broken for %p this tiny msize=%d - size is too large\n",
+ ptr, msize);
+ return 0;
+ }
+ /* move to next block */
+ ptr += TINY_BYTES_FOR_MSIZE(msize);
+ } else {
+ /*
+ * Free blocks must have been coalesced, we cannot have a free block following another
+ * free block.
+ */
+ if (prev_free) {
+ malloc_printf("*** invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n",
+ ptr, msize);
+ return 0;
+ }
+ prev_free = 1;
+ /*
+ * Check the integrity of this block's entry in its freelist.
+ */
+ free_head = (free_list_t *)ptr;
+ previous = free_list_unchecksum_ptr(szone, &free_head->previous);
+ next = free_list_unchecksum_ptr(szone, &free_head->next);
+ if (previous && !tiny_meta_header_is_free(previous)) {
+ malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n",
+ ptr, previous);
+ return 0;
+ }
+ if (next && !tiny_meta_header_is_free(next)) {
+ malloc_printf("*** invariant broken for %p (next in free list %p is not a free pointer)\n",
+ ptr, next);
+ return 0;
+ }
+ /*
+ * Check the free block's trailing size value.
+ */
+ follower = FOLLOWING_TINY_PTR(ptr, msize);
+ if (((uintptr_t)follower != region_end) && (get_tiny_previous_free_msize(follower) != msize)) {
+ malloc_printf("*** invariant broken for tiny free %p followed by %p in region [%p-%p] "
+ "(end marker incorrect) should be %d; in fact %d\n",
+ ptr, follower, TINY_REGION_ADDRESS(region), region_end, msize, get_tiny_previous_free_msize(follower));
+ return 0;
+ }
+ /* move to next block */
+ ptr = (uintptr_t)follower;
+ }
+ }
+ /*
+ * Ensure that we scanned the entire region
+ */
+ if (ptr != region_end) {
+ malloc_printf("*** invariant broken for region end %p - %p\n", ptr, region_end);
+ return 0;
+ }
+ /*
+ * Check the trailing block's integrity.
+ */
+ if (region == tiny_mag_ptr->mag_last_region) {
+ if (tiny_mag_ptr->mag_bytes_free_at_end) {
+ msize = get_tiny_meta_header((void *)ptr, &is_free);
+ if (is_free || (msize != 1)) {
+ malloc_printf("*** invariant broken for blocker block %p - %d %d\n", ptr, msize, is_free);
+ }
+ }
+ }
+ return 1;
+}
+
+static kern_return_t
+tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone,
+ memory_reader_t reader, vm_range_recorder_t recorder)
+{
+ size_t num_regions;
+ size_t index;
+ region_t *regions;
+ vm_range_t buffer[MAX_RECORDER_BUFFER];
+ unsigned count = 0;
+ kern_return_t err;
+ region_t region;
+ vm_range_t range;
+ vm_range_t admin_range;
+ vm_range_t ptr_range;
+ unsigned char *mapped_region;
+ uint32_t *block_header;
+ uint32_t *in_use;
+ unsigned block_index;
+ unsigned block_limit;
+ boolean_t is_free;
+ msize_t msize;
+ void *mapped_ptr;
+ unsigned bit;
+ vm_address_t mag_last_free_ptr = 0;
+ msize_t mag_last_free_msize = 0;
+
+ region_hash_generation_t *trg_ptr;
+ err = reader(task, (vm_address_t)szone->tiny_region_generation, sizeof(region_hash_generation_t), (void **)&trg_ptr);
+ if (err) return err;
+
+ num_regions = trg_ptr->num_regions_allocated;
+ err = reader(task, (vm_address_t)trg_ptr->hashed_regions, sizeof(region_t) * num_regions, (void **)®ions);
+ if (err) return err;
+
+ for (index = 0; index < num_regions; ++index) {
+ region = regions[index];
+ if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) {
+ range.address = (vm_address_t)TINY_REGION_ADDRESS(region);
+ range.size = (vm_size_t)TINY_REGION_SIZE;
+ if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) {
+ admin_range.address = range.address + TINY_METADATA_START;
+ admin_range.size = TINY_METADATA_SIZE;
+ recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1);
+ }
+ if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) {
+ ptr_range.address = range.address;
+ ptr_range.size = NUM_TINY_BLOCKS * TINY_QUANTUM;
+ recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1);
+ }
+ if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
+ err = reader(task, range.address, range.size, (void **)&mapped_region);
+ if (err)
+ return err;
+
+ mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(mapped_region);
+ magazine_t *tiny_mag_ptr;
+ err = reader(task, (vm_address_t)&(szone->tiny_magazines[mag_index]), sizeof(magazine_t),
+ (void **)&tiny_mag_ptr);
+ if (err) return err;
+
+ void *mag_last_free = tiny_mag_ptr->mag_last_free;
+ if (mag_last_free) {
+ mag_last_free_ptr = (uintptr_t) mag_last_free & ~(TINY_QUANTUM - 1);
+ mag_last_free_msize = (uintptr_t) mag_last_free & (TINY_QUANTUM - 1);
+ }
+
+ block_header = (uint32_t *)(mapped_region + TINY_METADATA_START + sizeof(region_trailer_t));
+ in_use = TINY_INUSE_FOR_HEADER(block_header);
+ block_index = 0;
+ block_limit = NUM_TINY_BLOCKS;
+ if (region == tiny_mag_ptr->mag_last_region)
+ block_limit -= TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_end);
+
+ while (block_index < block_limit) {
+ vm_size_t block_offset = TINY_BYTES_FOR_MSIZE(block_index);
+ is_free = !BITARRAY_BIT(in_use, block_index);
+ if (is_free) {
+ mapped_ptr = mapped_region + block_offset;
+
+ // mapped_region, the address at which 'range' in 'task' has been
+ // mapped into our process, is not necessarily aligned to
+ // TINY_BLOCKS_ALIGN.
+ //
+ // Since the code in get_tiny_free_size() assumes the pointer came
+ // from a properly aligned tiny region, and mapped_region is not
+ // necessarily aligned, then do the size calculation directly.
+ // If the next bit is set in the header bitmap, then the size is one
+ // quantum. Otherwise, read the size field.
+ if (!BITARRAY_BIT(block_header, (block_index+1)))
+ msize = TINY_FREE_SIZE(mapped_ptr);
+ else
+ msize = 1;
+
+ if (!msize)
+ break;
+ } else if (range.address + block_offset != mag_last_free_ptr) {
+ msize = 1;
+ bit = block_index + 1;
+ while (! BITARRAY_BIT(block_header, bit)) {
+ bit++;
+ msize ++;
+ }
+ buffer[count].address = range.address + block_offset;
+ buffer[count].size = TINY_BYTES_FOR_MSIZE(msize);
+ count++;
+ if (count >= MAX_RECORDER_BUFFER) {
+ recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
+ count = 0;
+ }
+ } else {
+ // Block is not free but it matches mag_last_free_ptr so even
+ // though it is not marked free in the bitmap, we treat it as if
+ // it is and move on
+ msize = mag_last_free_msize;
+ }
+ block_index += msize;
+ }
+ if (count) {
+ recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
+ count = 0;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static void *
+tiny_malloc_from_free_list(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize)
+{
+ free_list_t *ptr;
+ msize_t this_msize;
+ grain_t slot = msize - 1;
+ free_list_t **free_list = tiny_mag_ptr->mag_free_list;
+ free_list_t **the_slot = free_list + slot;
+ free_list_t *next;
+ free_list_t **limit;
+#if defined(__LP64__)
+ uint64_t bitmap;
+#else
+ uint32_t bitmap;
+#endif
+ msize_t leftover_msize;
+ free_list_t *leftover_ptr;
+
+ // Assumes we've locked the region
+ CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__);
+
+ // Look for an exact match by checking the freelist for this msize.
+ //
+ ptr = *the_slot;
+ if (ptr) {
+ next = free_list_unchecksum_ptr(szone, &ptr->next);
+ if (next) {
+ next->previous = ptr->previous;
+ } else {
+ BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot);
+ }
+ *the_slot = next;
+ this_msize = msize;
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr)) {
+ malloc_printf("in tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr, this_msize);
+ }
+#endif
+ goto return_tiny_alloc;
+ }
+
+ // Mask off the bits representing slots holding free blocks smaller than the
+ // size we need. If there are no larger free blocks, try allocating from
+ // the free space at the end of the tiny region.
+#if defined(__LP64__)
+ bitmap = ((uint64_t *)(tiny_mag_ptr->mag_bitmap))[0] & ~ ((1ULL << slot) - 1);
+#else
+ bitmap = tiny_mag_ptr->mag_bitmap[0] & ~ ((1 << slot) - 1);
+#endif
+ if (!bitmap)
+ goto try_tiny_malloc_from_end;
+
+ slot = BITMAPV_CTZ(bitmap);
+ limit = free_list + NUM_TINY_SLOTS - 1;
+ free_list += slot;
+
+ if (free_list < limit) {
+ ptr = *free_list;
+ if (ptr) {
+ next = free_list_unchecksum_ptr(szone, &ptr->next);
+ *free_list = next;
+ if (next) {
+ next->previous = ptr->previous;
+ } else {
+ BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot);
+ }
+ this_msize = get_tiny_free_size(ptr);
+ goto add_leftover_and_proceed;
+ }
+#if DEBUG_MALLOC
+ malloc_printf("in tiny_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n",slot);
+#endif
+ }
+
+ // We are now looking at the last slot, which contains blocks equal to, or
+ // due to coalescing of free blocks, larger than (NUM_TINY_SLOTS - 1) * tiny quantum size.
+ // If the last freelist is not empty, and the head contains a block that is
+ // larger than our request, then the remainder is put back on the free list.
+ ptr = *limit;
+ if (ptr) {
+ this_msize = get_tiny_free_size(ptr);
+ next = free_list_unchecksum_ptr(szone, &ptr->next);
+ if (this_msize - msize >= NUM_TINY_SLOTS) {
+ // the leftover will go back to the free list, so we optimize by
+ // modifying the free list rather than a pop and push of the head
+ leftover_msize = this_msize - msize;
+ leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
+ *limit = leftover_ptr;
+ if (next) {
+ next->previous.u = free_list_checksum_ptr(szone, leftover_ptr);
+ }
+ leftover_ptr->previous = ptr->previous;
+ leftover_ptr->next = ptr->next;
+ set_tiny_meta_header_free(leftover_ptr, leftover_msize);
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n",
+ ptr, msize, this_msize);
+ }
+#endif
+ this_msize = msize;
+ goto return_tiny_alloc;
+ }
+ if (next) {
+ next->previous = ptr->previous;
+ }
+ *limit = next;
+ goto add_leftover_and_proceed;
+ /* NOTREACHED */
+ }
+
+try_tiny_malloc_from_end:
+ // Let's see if we can use tiny_mag_ptr->mag_bytes_free_at_end
+ if (tiny_mag_ptr->mag_bytes_free_at_end >= TINY_BYTES_FOR_MSIZE(msize)) {
+ ptr = (free_list_t *)((uintptr_t)TINY_REGION_END(tiny_mag_ptr->mag_last_region) -
+ tiny_mag_ptr->mag_bytes_free_at_end);
+ tiny_mag_ptr->mag_bytes_free_at_end -= TINY_BYTES_FOR_MSIZE(msize);
+ if (tiny_mag_ptr->mag_bytes_free_at_end) {
+ // let's add an in use block after ptr to serve as boundary
+ set_tiny_meta_header_in_use_1((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
+ }
+ this_msize = msize;
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr)) {
+ malloc_printf("in tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr, msize);
+ }
+#endif
+ goto return_tiny_alloc;
+ }
+ return NULL;
+
+add_leftover_and_proceed:
+ if (!this_msize || (this_msize > msize)) {
+ leftover_msize = this_msize - msize;
+ leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize);
+ }
+#endif
+ tiny_free_list_add_ptr(szone, tiny_mag_ptr, leftover_ptr, leftover_msize);
+ this_msize = msize;
+ }
+
+return_tiny_alloc:
+ tiny_mag_ptr->mag_num_objects++;
+ tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(this_msize);
+
+ // Update this region's bytes in use count
+ region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
+ size_t bytes_used = node->bytes_used + TINY_BYTES_FOR_MSIZE(this_msize);
+ node->bytes_used = bytes_used;
+
+ // Emptiness discriminant
+ if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) {
+ /* After this allocation the region is still sparse, so it must have been even more so before
+ the allocation. That implies the region is already correctly marked. Do nothing. */
+ } else {
+ /* Region has crossed threshold from sparsity to density. Mark it not "suitable" on the
+ recirculation candidates list. */
+ node->recirc_suitable = FALSE;
+ }
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize);
+ }
+#endif
+ if (this_msize > 1)
+ set_tiny_meta_header_in_use(ptr, this_msize);
+ else
+ set_tiny_meta_header_in_use_1(ptr);
+ return ptr;
+}
+#undef DENSITY_THRESHOLD
+#undef K
+
+static INLINE void *
+tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested)
+{
+ void *ptr;
+ mag_index_t mag_index = mag_get_thread_index(szone);
+ magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
+
+#if DEBUG_MALLOC
+ if (DEPOT_MAGAZINE_INDEX == mag_index) {
+ szone_error(szone, 1, "malloc called for magazine index -1", NULL, NULL);
+ return(NULL);
+ }
+
+ if (!msize) {
+ szone_error(szone, 1, "invariant broken (!msize) in allocation (region)", NULL, NULL);
+ return(NULL);
+ }
+#endif
+
+ SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
+
+#if TINY_CACHE
+ ptr = tiny_mag_ptr->mag_last_free;
+
+ if ((((uintptr_t)ptr) & (TINY_QUANTUM - 1)) == msize) {
+ // we have a winner
+ tiny_mag_ptr->mag_last_free = NULL;
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ ptr = (void *)((uintptr_t)ptr & ~ (TINY_QUANTUM - 1));
+ if (cleared_requested) {
+ memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
+ }
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr, msize);
+ }
+#endif
+ return ptr;
+ }
+#endif /* TINY_CACHE */
+
+ ptr = tiny_malloc_from_free_list(szone, tiny_mag_ptr, mag_index, msize);
+ if (ptr) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ if (cleared_requested) {
+ memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
+ }
+ return ptr;
+ }
+
+ if (tiny_get_region_from_depot(szone, tiny_mag_ptr, mag_index)) {
+ ptr = tiny_malloc_from_free_list(szone, tiny_mag_ptr, mag_index, msize);
+ if (ptr) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ if (cleared_requested) {
+ memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
+ }
+ return ptr;
+ }
+ }
+
+ ptr = tiny_malloc_from_region_no_lock(szone, tiny_mag_ptr, mag_index, msize);
+ // we don't clear because this freshly allocated space is pristine
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ return ptr;
+}
+
+static NOINLINE void
+free_tiny_botch(szone_t *szone, free_list_t *ptr)
+{
+ mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
+ magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ szone_error(szone, 1, "double free", ptr, NULL);
+}
+
+static INLINE void
+free_tiny(szone_t *szone, void *ptr, region_t tiny_region, size_t known_size)
+{
+ msize_t msize;
+ boolean_t is_free;
+ mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region);
+ magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
+
+ // ptr is known to be in tiny_region
+ if (known_size) {
+ msize = TINY_MSIZE_FOR_BYTES(known_size + TINY_QUANTUM - 1);
+ } else {
+ msize = get_tiny_meta_header(ptr, &is_free);
+ if (is_free) {
+ free_tiny_botch(szone, ptr);
+ return;
+ }
+ }
+#if DEBUG_MALLOC
+ if (!msize) {
+ malloc_printf("*** free_tiny() block in use is too large: %p\n", ptr);
+ return;
+ }
+#endif
+
+ SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
+
+#if TINY_CACHE
+ // Depot does not participate in TINY_CACHE since it can't be directly malloc()'d
+ if (DEPOT_MAGAZINE_INDEX != mag_index) {
+ if (msize < TINY_QUANTUM) { // to see if the bits fit in the last 4 bits
+ void *ptr2 = tiny_mag_ptr->mag_last_free; // Might be NULL
+ region_t rgn2 = tiny_mag_ptr->mag_last_free_rgn;
+
+ /* check that we don't already have this pointer in the cache */
+ if (ptr == (void *)((uintptr_t)ptr2 & ~ (TINY_QUANTUM - 1))) {
+ free_tiny_botch(szone, ptr);
+ return;
+ }
+
+ if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize)
+ memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize));
+
+ tiny_mag_ptr->mag_last_free = (void *)(((uintptr_t)ptr) | msize);
+ tiny_mag_ptr->mag_last_free_rgn = tiny_region;
+
+ if (!ptr2) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ return;
+ }
+
+ msize = (uintptr_t)ptr2 & (TINY_QUANTUM - 1);
+ ptr = (void *)(((uintptr_t)ptr2) & ~(TINY_QUANTUM - 1));
+ tiny_region = rgn2;
+ }
+ }
+#endif /* TINY_CACHE */
+
+ // Now in the time it took to acquire the lock, the region may have migrated
+ // from one magazine to another. I.e. trailer->mag_index is volatile.
+ // In which case the magazine lock we obtained (namely magazines[mag_index].mag_lock)
+ // is stale. If so, keep on tryin' ...
+ region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(tiny_region);
+ mag_index_t refreshed_index;
+
+ while (mag_index != (refreshed_index = trailer->mag_index)) { // Note assignment
+
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+
+ mag_index = refreshed_index;
+ tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
+ SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
+ }
+
+ tiny_free_no_lock(szone, tiny_mag_ptr, mag_index, tiny_region, ptr, msize);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+}
+
+static void
+print_tiny_free_list(szone_t *szone)
+{
+ free_list_t *ptr;
+ _SIMPLE_STRING b = _simple_salloc();
+ mag_index_t mag_index;
+
+ if (b) {
+ _simple_sappend(b, "tiny free sizes:\n");
+ for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
+ grain_t slot = 0;
+ _simple_sprintf(b,"\tMagazine %d: ", mag_index);
+ while (slot < NUM_TINY_SLOTS) {
+ ptr = szone->tiny_magazines[mag_index].mag_free_list[slot];
+ if (ptr) {
+ _simple_sprintf(b, "%s%y[%d]; ", (slot == NUM_TINY_SLOTS-1) ? ">=" : "",
+ (slot+1)*TINY_QUANTUM, free_list_count(szone, ptr));
+ }
+ slot++;
+ }
+ _simple_sappend(b,"\n");
+ }
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
+ _simple_sfree(b);
+ }
+}
+
+static void
+print_tiny_region(boolean_t verbose, region_t region, size_t bytes_at_end)
+{
+ unsigned counts[1024];
+ unsigned in_use = 0;
+ uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(region);
+ uintptr_t current = start;
+ uintptr_t limit = (uintptr_t)TINY_REGION_END(region) - bytes_at_end;
+ boolean_t is_free;
+ msize_t msize;
+ unsigned ci;
+ _SIMPLE_STRING b;
+ uintptr_t pgTot = 0;
+
+ if (region == HASHRING_REGION_DEALLOCATED) {
+ if ((b = _simple_salloc()) != NULL) {
+ _simple_sprintf(b, "Tiny region [unknown address] was returned to the OS\n");
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
+ _simple_sfree(b);
+ }
+ return;
+ }
+
+ memset(counts, 0, sizeof(counts));
+ while (current < limit) {
+ msize = get_tiny_meta_header((void *)current, &is_free);
+ if (is_free & !msize && (current == start)) {
+ // first block is all free
+ uintptr_t pgLo = round_page(start + sizeof(free_list_t) + sizeof(msize_t));
+ uintptr_t pgHi = trunc_page(start + TINY_REGION_SIZE - sizeof(msize_t));
+
+ if (pgLo < pgHi) {
+ pgTot += (pgHi - pgLo);
+ }
+ break;
+ }
+ if (!msize) {
+ malloc_printf("*** error with %p: msize=%d\n", (void *)current, (unsigned)msize);
+ break;
+ }
+ if (!is_free) {
+ // block in use
+ if (msize > NUM_TINY_SLOTS)
+ malloc_printf("*** error at %p msize for in_use is %d\n", (void *)current, msize);
+ if (msize < 1024)
+ counts[msize]++;
+ in_use++;
+ } else {
+ uintptr_t pgLo = round_page(current + sizeof(free_list_t) + sizeof(msize_t));
+ uintptr_t pgHi = trunc_page(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t));
+
+ if (pgLo < pgHi) {
+ pgTot += (pgHi - pgLo);
+ }
+ }
+ current += TINY_BYTES_FOR_MSIZE(msize);
+ }
+ if ((b = _simple_salloc()) != NULL) {
+ _simple_sprintf(b, "Tiny region [%p-%p, %y] \t", (void *)start, TINY_REGION_END(region), (int)TINY_REGION_SIZE);
+ _simple_sprintf(b, "Magazine=%d \t", MAGAZINE_INDEX_FOR_TINY_REGION(region));
+ _simple_sprintf(b, "Allocations in use=%d \t Bytes in use=%ly \t", in_use, BYTES_USED_FOR_TINY_REGION(region));
+ if (bytes_at_end)
+ _simple_sprintf(b, "Untouched=%ly ", bytes_at_end);
+ if (DEPOT_MAGAZINE_INDEX == MAGAZINE_INDEX_FOR_TINY_REGION(region)) {
+ _simple_sprintf(b, "Advised MADV_FREE=%ly", pgTot);
+ } else {
+ _simple_sprintf(b, "Fragments subject to reclamation=%ly", pgTot);
+ }
+ if (verbose && in_use) {
+ _simple_sappend(b, "\n\tSizes in use: ");
+ for (ci = 0; ci < 1024; ci++)
+ if (counts[ci])
+ _simple_sprintf(b, "%d[%d] ", TINY_BYTES_FOR_MSIZE(ci), counts[ci]);
+ }
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
+ _simple_sfree(b);
+ }
+}
+
+static boolean_t
+tiny_free_list_check(szone_t *szone, grain_t slot)
+{
+ mag_index_t mag_index;
+
+ for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
+ magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
+ SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
+
+ unsigned count = 0;
+ free_list_t *ptr = szone->tiny_magazines[mag_index].mag_free_list[slot];
+ boolean_t is_free;
+ free_list_t *previous = NULL;
+
+ while (ptr) {
+ is_free = tiny_meta_header_is_free(ptr);
+ if (! is_free) {
+ malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ return 0;
+ }
+ if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) {
+ malloc_printf("*** unaligned ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ return 0;
+ }
+ if (!tiny_region_for_ptr_no_lock(szone, ptr)) {
+ malloc_printf("*** ptr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ return 0;
+ }
+ if (free_list_unchecksum_ptr(szone, &ptr->previous) != previous) {
+ malloc_printf("*** previous incorrectly set slot=%d count=%d ptr=%p\n", slot, count, ptr);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ return 0;
+ }
+ previous = ptr;
+ ptr = free_list_unchecksum_ptr(szone, &ptr->next);
+ count++;
+ }
+
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ }
+ return 1;
+}
+
+/********************* SMALL FREE LIST UTILITIES ************************/
+
+/*
+ * Mark a block as free. Only the first quantum of a block is marked thusly,
+ * the remainder are marked "middle".
+ */
+static INLINE void
+small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize)
+{
+ meta_headers[index] = msize | SMALL_IS_FREE;
+}
+
+/*
+ * Mark a block as in use. Only the first quantum of a block is marked thusly,
+ * the remainder are marked "middle".
+ */
+static INLINE void
+small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize)
+{
+ meta_headers[index] = msize;
+}
+
+/*
+ * Mark a quantum as being the second or later in a block.
+ */
+static INLINE void
+small_meta_header_set_middle(msize_t *meta_headers, msize_t index)
+{
+ meta_headers[index] = 0;
+}
+
+/*
+ * Adds an item to the proper free list, and also marks the meta-header of the
+ * block properly.
+ * Assumes szone has been locked
+ */
+static void
+small_free_list_add_ptr(szone_t *szone, magazine_t *small_mag_ptr, void *ptr, msize_t msize)
+{
+ grain_t slot = (msize <= szone->num_small_slots) ? msize - 1 : szone->num_small_slots - 1;
+ free_list_t *free_ptr = ptr;
+ free_list_t *free_head = small_mag_ptr->mag_free_list[slot];
+ void *follower;
+
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
+ }
+ if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) {
+ szone_error(szone, 1, "small_free_list_add_ptr: Unaligned ptr", ptr, NULL);
+ }
+#endif
+ small_meta_header_set_is_free(SMALL_META_HEADER_FOR_PTR(ptr), SMALL_META_INDEX_FOR_PTR(ptr), msize);
+
+ if (free_head) {
+#if DEBUG_MALLOC
+ if (free_list_unchecksum_ptr(szone, &free_head->previous)) {
+ szone_error(szone, 1, "small_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr,
+ "ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, (void *)free_head, free_head->previous.p);
+ }
+ if (!SMALL_PTR_IS_FREE(free_head)) {
+ szone_error(szone, 1, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr,
+ "ptr=%p slot=%d free_head=%p\n", ptr, slot, (void *)free_head);
+ }
+#endif
+ free_head->previous.u = free_list_checksum_ptr(szone, free_ptr);
+ } else {
+ BITMAPN_SET(small_mag_ptr->mag_bitmap, slot);
+ }
+ free_ptr->previous.u = free_list_checksum_ptr(szone, NULL);
+ free_ptr->next.u = free_list_checksum_ptr(szone, free_head);
+
+ small_mag_ptr->mag_free_list[slot] = free_ptr;
+
+ // Store msize at the end of the block denoted by "ptr" (i.e. at a negative offset from "follower")
+ follower = (void *)((uintptr_t)ptr + SMALL_BYTES_FOR_MSIZE(msize));
+ SMALL_PREVIOUS_MSIZE(follower) = msize;
+}
+
+/*
+ * Removes the item pointed to by ptr in the proper free list.
+ * Assumes szone has been locked
+ */
+static void
+small_free_list_remove_ptr(szone_t *szone, magazine_t *small_mag_ptr, void *ptr, msize_t msize)
+{
+ grain_t slot = (msize <= szone->num_small_slots) ? msize - 1 : szone->num_small_slots - 1;
+ free_list_t *free_ptr = ptr, *next, *previous;
+
+ next = free_list_unchecksum_ptr(szone, &free_ptr->next);
+ previous = free_list_unchecksum_ptr(szone, &free_ptr->previous);
+
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
+ }
+#endif
+
+ if (!previous) {
+ // The block to remove is the head of the free list
+#if DEBUG_MALLOC
+ if (small_mag_ptr->mag_free_list[slot] != ptr) {
+ szone_error(szone, 1, "small_free_list_remove_ptr: Internal invariant broken (small_mag_ptr->mag_free_list[slot])", ptr,
+ "ptr=%p slot=%d msize=%d small_mag_ptr->mag_free_list[slot]=%p\n",
+ ptr, slot, msize, (void *)small_mag_ptr->mag_free_list[slot]);
+ return;
+ }
+#endif
+ small_mag_ptr->mag_free_list[slot] = next;
+ if (!next) BITMAPN_CLR(small_mag_ptr->mag_bitmap, slot);
+ } else {
+ // We know free_ptr is already checksummed, so we don't need to do it
+ // again.
+ previous->next = free_ptr->next;
+ }
+ if (next) {
+ // We know free_ptr is already checksummed, so we don't need to do it
+ // again.
+ next->previous = free_ptr->previous;
+ }
+}
+
+/*
+ * small_region_for_ptr_no_lock - Returns the small region containing the pointer,
+ * or NULL if not found.
+ */
+static INLINE region_t
+small_region_for_ptr_no_lock(szone_t *szone, const void *ptr)
+{
+ rgnhdl_t r = hash_lookup_region_no_lock(szone->small_region_generation->hashed_regions,
+ szone->small_region_generation->num_regions_allocated,
+ szone->small_region_generation->num_regions_allocated_shift,
+ SMALL_REGION_FOR_PTR(ptr));
+ return r ? *r : r;
+}
+
+static void
+small_finalize_region(szone_t *szone, magazine_t *small_mag_ptr) {
+ void *last_block, *previous_block;
+ msize_t last_msize, previous_msize, last_index;
+
+ last_block = SMALL_REGION_END(small_mag_ptr->mag_last_region) - small_mag_ptr->mag_bytes_free_at_end;
+ last_msize = SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end);
+
+ // It is possible that the block prior to the last block in the region has
+ // been free'd, but was not coalesced with the free bytes at the end of the
+ // block, since we treat the bytes at the end of the region as "in use" in
+ // the meta headers. Attempt to coalesce the last block with the previous
+ // block, so we don't violate the "no consecutive free blocks" invariant.
+ //
+ // FIXME: If we could calculate the previous small free size in the same
+ // manner as tiny_previous_preceding_free, it would eliminate the
+ // index & previous msize checks, which are a guard against reading
+ // bogus data out of in-use or written-on-freed memory.
+ //
+ // FIXME: Need to investigate how much work would be required to increase
+ // 'mag_bytes_free_at_end' when freeing the preceding block, rather
+ // than performing this workaround.
+ //
+ last_index = SMALL_META_INDEX_FOR_PTR(last_block);
+ previous_msize = SMALL_PREVIOUS_MSIZE(last_block);
+
+ if (last_index && (previous_msize <= last_index)) {
+ previous_block = (void *)((uintptr_t)last_block - SMALL_BYTES_FOR_MSIZE(previous_msize));
+ if (*SMALL_METADATA_FOR_PTR(previous_block) == (previous_msize | SMALL_IS_FREE)) {
+ msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(last_block);
+
+ small_meta_header_set_middle(meta_headers, last_index);
+ small_free_list_remove_ptr(szone, small_mag_ptr, previous_block, previous_msize);
+ last_block = (void *)((uintptr_t)last_block - SMALL_BYTES_FOR_MSIZE(previous_msize));
+ last_msize += previous_msize;
+ }
+ }
+
+ // splice last_block into the free list
+ small_free_list_add_ptr(szone, small_mag_ptr, last_block, last_msize);
+ small_mag_ptr->mag_bytes_free_at_end = 0;
+ small_mag_ptr->mag_last_region = NULL;
+}
+
+static int
+small_free_detach_region(szone_t *szone, magazine_t *small_mag_ptr, region_t r) {
+ unsigned char *ptr = SMALL_REGION_ADDRESS(r);
+ msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
+ uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(r);
+ uintptr_t current = start;
+ uintptr_t limit = (uintptr_t)SMALL_REGION_END(r);
+ int total_alloc = 0;
+
+ while (current < limit) {
+ unsigned index = SMALL_META_INDEX_FOR_PTR(current);
+ msize_t msize_and_free = meta_headers[index];
+ boolean_t is_free = msize_and_free & SMALL_IS_FREE;
+ msize_t msize = msize_and_free & ~ SMALL_IS_FREE;
+
+ if (!msize) {
+#if DEBUG_MALLOC
+ malloc_printf("*** small_free_detach_region error with %p: msize=%d is_free =%d\n",
+ (void *)current, msize, is_free);
+#endif
+ break;
+ }
+ if (is_free) {
+ small_free_list_remove_ptr(szone, small_mag_ptr, (void *)current, msize);
+ } else {
+ total_alloc++;
+ }
+ current += SMALL_BYTES_FOR_MSIZE(msize);
+ }
+ return total_alloc;
+}
+
+static size_t
+small_free_reattach_region(szone_t *szone, magazine_t *small_mag_ptr, region_t r) {
+ unsigned char *ptr = SMALL_REGION_ADDRESS(r);
+ msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
+ uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(r);
+ uintptr_t current = start;
+ uintptr_t limit = (uintptr_t)SMALL_REGION_END(r);
+ size_t total_alloc = 0;
+
+ while (current < limit) {
+ unsigned index = SMALL_META_INDEX_FOR_PTR(current);
+ msize_t msize_and_free = meta_headers[index];
+ boolean_t is_free = msize_and_free & SMALL_IS_FREE;
+ msize_t msize = msize_and_free & ~ SMALL_IS_FREE;
+
+ if (!msize) {
+#if DEBUG_MALLOC
+ malloc_printf("*** small_free_reattach_region error with %p: msize=%d is_free =%d\n",
+ (void *)current, msize, is_free);
+#endif
+ break;
+ }
+ if (is_free) {
+ small_free_list_add_ptr(szone, small_mag_ptr, (void *)current, msize);
+ } else {
+ total_alloc += SMALL_BYTES_FOR_MSIZE(msize);
+ }
+ current += SMALL_BYTES_FOR_MSIZE(msize);
+ }
+ return total_alloc;
+}
+
+static void
+small_free_scan_depot_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r) {
+ uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(r);
+ uintptr_t current = start;
+ uintptr_t limit = (uintptr_t)SMALL_REGION_END(r);
+ msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(start);
+ boolean_t did_advise = FALSE;
+
+ // Scan the metadata identifying blocks which span one or more pages. Mark the pages MADV_FREE taking care to preserve free list
+ // management data.
+ while (current < limit) {
+ unsigned index = SMALL_META_INDEX_FOR_PTR(current);
+ msize_t msize_and_free = meta_headers[index];
+ boolean_t is_free = msize_and_free & SMALL_IS_FREE;
+ msize_t msize = msize_and_free & ~ SMALL_IS_FREE;
+
+ if (is_free && !msize && (current == start)) {
+#if DEBUG_MALLOC
+ // first block is all free
+ malloc_printf("*** small_free_scan_depot_madvise_free first block is all free! %p: msize=%d is_free =%d\n",
+ (void *)current, msize, is_free);
+#endif
+ uintptr_t pgLo = round_page(start + sizeof(free_list_t) + sizeof(msize_t));
+ uintptr_t pgHi = trunc_page(start + SMALL_REGION_SIZE - sizeof(msize_t));
+
+ if (pgLo < pgHi) {
+ madvise_free_range(szone, r, pgLo, pgHi);
+ did_advise = TRUE;
+ }
+ break;
+ }
+ if (!msize) {
+#if DEBUG_MALLOC
+ malloc_printf("*** small_free_scan_depot_madvise_free error with %p: msize=%d is_free =%d\n",
+ (void *)current, msize, is_free);
+#endif
+ break;
+ }
+ if (is_free) {
+ uintptr_t pgLo = round_page(current + sizeof(free_list_t) + sizeof(msize_t));
+ uintptr_t pgHi = trunc_page(current + SMALL_BYTES_FOR_MSIZE(msize) - sizeof(msize_t));
+
+ if (pgLo < pgHi) {
+ madvise_free_range(szone, r, pgLo, pgHi);
+ did_advise = TRUE;
+ }
+ }
+ current += SMALL_BYTES_FOR_MSIZE(msize);
+ }
+
+ if (did_advise) {
+ /* Move the node to the tail of the Deopt's recirculation list to delay its re-use. */
+ region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(r);
+ recirc_list_extract(szone, depot_ptr, node); // excise node from list
+ recirc_list_splice_last(szone, depot_ptr, node); // connect to magazine as last node
+ }
+}
+
+static void
+small_free_try_depot_unmap_no_lock(szone_t *szone, magazine_t *depot_ptr, region_trailer_t *node)
+{
+#warning Tune Depot headroom
+ if (0 < node->bytes_used ||
+ depot_ptr->recirculation_entries < (szone->num_small_magazines * 2)) {
+ return;
+ }
+
+ // disconnect first node from Depot
+ recirc_list_extract(szone, depot_ptr, node);
+
+ // Iterate the region pulling its free entries off the (locked) Depot's free list
+ region_t sparse_region = SMALL_REGION_FOR_PTR(node);
+ int objects_in_use = small_free_detach_region(szone, depot_ptr, sparse_region);
+
+ if (0 == objects_in_use) {
+ // Invalidate the hash table entry for this region with HASHRING_REGION_DEALLOCATED.
+ // Using HASHRING_REGION_DEALLOCATED preserves the collision chain, using HASHRING_OPEN_ENTRY (0) would not.
+ rgnhdl_t pSlot = hash_lookup_region_no_lock(szone->small_region_generation->hashed_regions,
+ szone->small_region_generation->num_regions_allocated,
+ szone->small_region_generation->num_regions_allocated_shift, sparse_region);
+ *pSlot = HASHRING_REGION_DEALLOCATED;
+ depot_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES;
+#if ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) /* GCC 4.1 and forward supports atomic builtins */
+ __sync_fetch_and_add( &(szone->num_small_regions_dealloc), 1); // Atomically increment num_small_regions_dealloc
+#else
+#ifdef __LP64__
+ OSAtomicIncrement64( (volatile int64_t *)&(szone->num_small_regions_dealloc) );
+#else
+ OSAtomicIncrement32( (volatile int32_t *)&(szone->num_small_regions_dealloc) );
+#endif
+#endif
+
+ // Transfer ownership of the region back to the OS
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, depot_ptr); // Avoid denial of service to Depot while in kernel
+ deallocate_pages(szone, sparse_region, SMALL_REGION_SIZE, 0);
+ SZONE_MAGAZINE_PTR_LOCK(szone, depot_ptr);
+
+ MAGMALLOC_DEALLOCREGION((void *)szone, (void *)sparse_region); // DTrace USDT Probe
+
+ } else {
+ szone_error(szone, 1, "small_free_try_depot_unmap_no_lock objects_in_use not zero:", NULL, "%d\n", objects_in_use);
+ }
+}
+
+static void
+small_free_do_recirc_to_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index)
+{
+ // The entire magazine crossed the "emptiness threshold". Transfer a region
+ // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e
+ // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list.
+ region_trailer_t *node = small_mag_ptr->firstNode;
+
+ while (node && !node->recirc_suitable) {
+ node = node->next;
+ }
+
+ if (NULL == node) {
+#if DEBUG_MALLOC
+ malloc_printf("*** small_free_do_recirc_to_depot end of list\n");
+#endif
+ return;
+ }
+
+ region_t sparse_region = SMALL_REGION_FOR_PTR(node);
+
+ // Deal with unclaimed memory -- mag_bytes_free_at_end
+ if (sparse_region == small_mag_ptr->mag_last_region && small_mag_ptr->mag_bytes_free_at_end) {
+ small_finalize_region(szone, small_mag_ptr);
+ }
+
+ // disconnect first node from magazine
+ recirc_list_extract(szone, small_mag_ptr, node);
+
+ // Iterate the region pulling its free entries off its (locked) magazine's free list
+ int objects_in_use = small_free_detach_region(szone, small_mag_ptr, sparse_region);
+ magazine_t *depot_ptr = &(szone->small_magazines[DEPOT_MAGAZINE_INDEX]);
+
+ // hand over the region to the (locked) Depot
+ SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr);
+ // this will cause small_free_list_add_ptr called by small_free_reattach_region to use
+ // the depot as its target magazine, rather than magazine formerly associated with sparse_region
+ MAGAZINE_INDEX_FOR_SMALL_REGION(sparse_region) = DEPOT_MAGAZINE_INDEX;
+
+ // Iterate the region putting its free entries on Depot's free list
+ size_t bytes_inplay = small_free_reattach_region(szone, depot_ptr, sparse_region);
+
+ small_mag_ptr->mag_num_bytes_in_objects -= bytes_inplay;
+ small_mag_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES;
+ small_mag_ptr->mag_num_objects -= objects_in_use;
+
+ depot_ptr->mag_num_bytes_in_objects += bytes_inplay;
+ depot_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES;
+ depot_ptr->mag_num_objects += objects_in_use;
+
+ // connect to Depot as first node
+ recirc_list_splice_first(szone, depot_ptr, node);
+
+ MAGMALLOC_RECIRCREGION((void *)szone, (int)mag_index, (int)BYTES_USED_FOR_SMALL_REGION(sparse_region)); // DTrace USDT Probe
+
+ // Mark free'd dirty pages with MADV_FREE to reduce memory pressure
+ small_free_scan_depot_madvise_free(szone, depot_ptr, sparse_region);
+
+ // If the region is entirely empty vm_deallocate() it
+ small_free_try_depot_unmap_no_lock(szone, depot_ptr, node);
+
+ SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
+}
+
+static boolean_t
+small_get_region_from_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index)
+{
+ magazine_t *depot_ptr = &(szone->small_magazines[DEPOT_MAGAZINE_INDEX]);
+
+ /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */
+ if (szone->num_small_magazines == 1) // Uniprocessor, single magazine, so no recirculation necessary
+ return 0;
+
+#if DEBUG_MALLOC
+ if (DEPOT_MAGAZINE_INDEX == mag_index) {
+ szone_error(szone, 1, "small_get_region_from_depot called for magazine index -1", NULL, NULL);
+ return 0;
+ }
+#endif
+
+ SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr);
+
+ // Appropriate one of the Depot's regions. Prefer LIFO selection for best cache utilization.
+ region_trailer_t *node = depot_ptr->firstNode;
+
+ if (NULL == node) { // Depot empty?
+ SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
+ return 0;
+ }
+
+ // disconnect first node from Depot
+ recirc_list_extract(szone, depot_ptr, node);
+
+ // Iterate the region pulling its free entries off the (locked) Depot's free list
+ region_t sparse_region = SMALL_REGION_FOR_PTR(node);
+ int objects_in_use = small_free_detach_region(szone, depot_ptr, sparse_region);
+
+ // Transfer ownership of the region
+ MAGAZINE_INDEX_FOR_SMALL_REGION(sparse_region) = mag_index;
+
+ // Iterate the region putting its free entries on its new (locked) magazine's free list
+ size_t bytes_inplay = small_free_reattach_region(szone, small_mag_ptr, sparse_region);
+
+ depot_ptr->mag_num_bytes_in_objects -= bytes_inplay;
+ depot_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES;
+ depot_ptr->mag_num_objects -= objects_in_use;
+
+ small_mag_ptr->mag_num_bytes_in_objects += bytes_inplay;
+ small_mag_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES;
+ small_mag_ptr->mag_num_objects += objects_in_use;
+
+ // connect to magazine as first node (it's maximally sparse at this moment)
+ recirc_list_splice_first(szone, small_mag_ptr, node);
+
+ SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
+
+ MAGMALLOC_DEPOTREGION((void *)szone, (int)mag_index, (int)BYTES_USED_FOR_SMALL_REGION(sparse_region)); // DTrace USDT Probe
+
+ if (-1 == madvise((void *)sparse_region, SMALL_REGION_PAYLOAD_BYTES, MADV_FREE_REUSE)) {
+ /* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */
+#if DEBUG_MALLOC
+ szone_error(szone, 1, "small_get_region_from_depot madvise(..., MADV_FREE_REUSE) failed", sparse_region, NULL);
+#endif
+ return 0;
+ }
+
+ return 1;
+}
+
+#warning Tune K and f!
+#define K 1.5 // headroom measured in number of 8Mb regions
+#define DENSITY_THRESHOLD(a) \
+ ((a) - ((a) >> 2)) // "Emptiness" f = 0.25, so "Density" is (1 - f)*a. Generally: ((a) - ((a) >> -log2(f)))
+
+static INLINE void
+small_free_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, region_t region, void *ptr, msize_t msize)
+{
+ msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
+ unsigned index = SMALL_META_INDEX_FOR_PTR(ptr);
+ void *original_ptr = ptr;
+ size_t original_size = SMALL_BYTES_FOR_MSIZE(msize);
+ unsigned char *next_block = ((unsigned char *)ptr + original_size);
+ msize_t next_index = index + msize;
+ msize_t previous_msize, next_msize;
+ void *previous;
+ boolean_t did_prepend = FALSE;
+ boolean_t did_append = FALSE;
+
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
+ }
+ if (! msize) {
+ szone_error(szone, 1, "trying to free small block that is too small", ptr,
+ "in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
+ }
+#endif
+
+ // We try to coalesce this block with the preceeding one
+ if (index && (SMALL_PREVIOUS_MSIZE(ptr) <= index)) {
+ previous_msize = SMALL_PREVIOUS_MSIZE(ptr);
+ if (meta_headers[index - previous_msize] == (previous_msize | SMALL_IS_FREE)) {
+ previous = (void *)((uintptr_t)ptr - SMALL_BYTES_FOR_MSIZE(previous_msize));
+ // previous is really to be coalesced
+ did_prepend = TRUE;
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr) || LOG(szone,previous)) {
+ malloc_printf("in small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous);
+ }
+#endif
+ small_free_list_remove_ptr(szone, small_mag_ptr, previous, previous_msize);
+ small_meta_header_set_middle(meta_headers, index);
+ ptr = previous;
+ msize += previous_msize;
+ index -= previous_msize;
+ }
+ }
+ // We try to coalesce with the next block
+ if ((next_block < SMALL_REGION_END(region)) && (meta_headers[next_index] & SMALL_IS_FREE)) {
+ // next block is free, we coalesce
+ did_append = TRUE;
+ next_msize = meta_headers[next_index] & ~ SMALL_IS_FREE;
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr))
+ malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n",
+ ptr, msize, next_block, next_msize);
+#endif
+ small_free_list_remove_ptr(szone, small_mag_ptr, next_block, next_msize);
+ small_meta_header_set_middle(meta_headers, next_index);
+ msize += next_msize;
+ }
+ if (szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) {
+ if (!msize) {
+ szone_error(szone, 1, "incorrect size information - block header was damaged", ptr, NULL);
+ } else {
+ memset(ptr, 0x55, SMALL_BYTES_FOR_MSIZE(msize));
+ }
+ }
+ small_free_list_add_ptr(szone, small_mag_ptr, ptr, msize);
+ small_mag_ptr->mag_num_objects--;
+ // we use original_size and not msize to avoid double counting the coalesced blocks
+ small_mag_ptr->mag_num_bytes_in_objects -= original_size;
+
+ // Update this region's bytes in use count
+ region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(region);
+ size_t bytes_used = node->bytes_used - original_size;
+ node->bytes_used = bytes_used;
+
+ /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */
+ if (szone->num_small_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary
+ /* NOTHING */
+ } else if (DEPOT_MAGAZINE_INDEX != mag_index) {
+ // Emptiness discriminant
+ if (bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES)) {
+ /* Region has crossed threshold from density to sparsity. Mark it "suitable" on the
+ recirculation candidates list. */
+ node->recirc_suitable = TRUE;
+ } else {
+ /* After this free, we've found the region is still dense, so it must have been even more so before
+ the free. That implies the region is already correctly marked. Do nothing. */
+ }
+
+ // Has the entire magazine crossed the "emptiness threshold"? If so, transfer a region
+ // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e
+ // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list.
+
+ size_t a = small_mag_ptr->num_bytes_in_magazine; // Total bytes allocated to this magazine
+ size_t u = small_mag_ptr->mag_num_bytes_in_objects; // In use (malloc'd) from this magaqzine
+
+ if (a - u > ((3 * SMALL_REGION_PAYLOAD_BYTES) / 2) && u < DENSITY_THRESHOLD(a))
+ small_free_do_recirc_to_depot(szone, small_mag_ptr, mag_index);
+
+ } else {
+ // Freed to Depot. N.B. Lock on small_magazines[DEPOT_MAGAZINE_INDEX] is already held
+ uintptr_t safe_ptr = (uintptr_t)ptr + sizeof(free_list_t) + sizeof(msize_t);
+ uintptr_t round_safe = round_page(safe_ptr);
+
+ uintptr_t safe_extent = (uintptr_t)ptr + SMALL_BYTES_FOR_MSIZE(msize) - sizeof(msize_t);
+ uintptr_t trunc_extent = trunc_page(safe_extent);
+
+ // The newly freed block may complete a span of bytes that cover a page. Mark it with MADV_FREE.
+ if (round_safe < trunc_extent) { // Safe area covers a page (perhaps many)
+ if (did_prepend & did_append) { // Coalesced preceding with original_ptr *and* with following
+ uintptr_t trunc_safe_prev = trunc_page((uintptr_t)original_ptr - sizeof(msize_t));
+ uintptr_t rnd_safe_follow =
+ round_page((uintptr_t)original_ptr + original_size + sizeof(free_list_t) + sizeof(msize_t));
+
+ madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), MIN(rnd_safe_follow, trunc_extent));
+ } else if (did_prepend) { // Coalesced preceding with original_ptr
+ uintptr_t trunc_safe_prev = trunc_page((uintptr_t)original_ptr - sizeof(msize_t));
+
+ madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), trunc_extent);
+ } else if (did_append) { // Coalesced original_ptr with following
+ uintptr_t rnd_safe_follow =
+ round_page((uintptr_t)original_ptr + original_size + sizeof(free_list_t) + sizeof(msize_t));
+
+ madvise_free_range(szone, region, round_safe, MIN(rnd_safe_follow, trunc_extent));
+ } else // Isolated free
+ madvise_free_range(szone, region, round_safe, trunc_extent);
+ }
+
+ if (0 < bytes_used) {
+ /* Depot'd region is still live. Leave it in place on the Depot's recirculation list
+ so as to avoid thrashing between the Depot's free list and a magazines's free list
+ with detach_region/reattach_region */
+ } else {
+ /* Depot'd region is just now empty. Consider return to OS. */
+ region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(region);
+ magazine_t *depot_ptr = &(szone->small_magazines[DEPOT_MAGAZINE_INDEX]);
+ small_free_try_depot_unmap_no_lock(szone, depot_ptr, node);
+ }
+ }
+}
+
+// Allocates from the last region or a freshly allocated region
+static void *
+small_malloc_from_region_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize)
+{
+ void *ptr, *aligned_address;
+
+ // Before anything we transform the mag_bytes_free_at_end - if any - to a regular free block
+ /* FIXME: last_block needs to be coalesced with previous entry if free, <rdar://5462322> */
+ if (small_mag_ptr->mag_bytes_free_at_end)
+ small_finalize_region(szone, small_mag_ptr);
+
+ // time to create a new region
+ aligned_address = allocate_pages(szone, SMALL_REGION_SIZE, SMALL_BLOCKS_ALIGN, 0, VM_MEMORY_MALLOC_SMALL);
+ if (!aligned_address)
+ return NULL;
+
+ MAGMALLOC_ALLOCREGION((void *)szone, (int)mag_index); // DTrace USDT Probe
+
+ // Here find the only place in smallville that (infrequently) takes the small_regions_lock.
+ // Only one thread at a time should be permitted to assess the density of the hash
+ // ring and adjust if needed.
+ // Only one thread at a time should be permitted to insert its new region on
+ // the hash ring.
+ // It is safe for all other threads to read the hash ring (hashed_regions) and
+ // the associated sizes (num_regions_allocated and num_small_regions).
+
+ LOCK(szone->small_regions_lock);
+ // Check to see if the hash ring of small regions needs to grow. Try to
+ // avoid the hash ring becoming too dense.
+ if (szone->small_region_generation->num_regions_allocated < (2 * szone->num_small_regions)) {
+ region_t *new_regions;
+ size_t new_size;
+ size_t new_shift = szone->small_region_generation->num_regions_allocated_shift; // In/Out parameter
+ new_regions = hash_regions_grow_no_lock(szone, szone->small_region_generation->hashed_regions,
+ szone->small_region_generation->num_regions_allocated,
+ &new_shift,
+ &new_size);
+ // Do not deallocate the current hashed_regions allocation since someone
+ // may be iterating it. Instead, just leak it.
+
+ // Prepare to advance to the "next generation" of the hash ring.
+ szone->small_region_generation->nextgen->hashed_regions = new_regions;
+ szone->small_region_generation->nextgen->num_regions_allocated = new_size;
+ szone->small_region_generation->nextgen->num_regions_allocated_shift = new_shift;
+
+ // Throw the switch to atomically advance to the next generation.
+ szone->small_region_generation = szone->small_region_generation->nextgen;
+ // Ensure everyone sees the advance.
+#if ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) /* GCC 4.1 and forward supports atomic builtins */
+ __sync_synchronize();
+#else
+ OSMemoryBarrier();
+#endif
+ }
+ // Tag the region at "aligned_address" as belonging to us,
+ // and so put it under the protection of the magazine lock we are holding.
+ // Do this before advertising "aligned_address" on the hash ring(!)
+ MAGAZINE_INDEX_FOR_SMALL_REGION(aligned_address) = mag_index;
+
+ // Insert the new region into the hash ring, and update malloc statistics
+ hash_region_insert_no_lock(szone->small_region_generation->hashed_regions,
+ szone->small_region_generation->num_regions_allocated,
+ szone->small_region_generation->num_regions_allocated_shift,
+ aligned_address);
+
+ szone->num_small_regions++;
+
+ UNLOCK(szone->small_regions_lock);
+
+ small_mag_ptr->mag_last_region = aligned_address;
+ BYTES_USED_FOR_SMALL_REGION(aligned_address) = SMALL_BYTES_FOR_MSIZE(msize);
+ ptr = aligned_address;
+ small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), 0, msize);
+ small_mag_ptr->mag_num_objects++;
+ small_mag_ptr->mag_num_bytes_in_objects += SMALL_BYTES_FOR_MSIZE(msize);
+ small_mag_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES;
+
+ // add a big free block
+ small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr) , msize, NUM_SMALL_BLOCKS - msize);
+ small_mag_ptr->mag_bytes_free_at_end = SMALL_BYTES_FOR_MSIZE(NUM_SMALL_BLOCKS - msize);
+
+ // connect to magazine as first node (it's maximally sparse at this moment)
+ recirc_list_splice_first(szone, small_mag_ptr, REGION_TRAILER_FOR_SMALL_REGION(aligned_address));
+
+ return ptr;
+}
+
+static INLINE boolean_t
+small_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size)
+{
+ // returns 1 on success
+ msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
+ unsigned index;
+ msize_t old_msize, new_msize;
+ unsigned next_index;
+ void *next_block;
+ msize_t next_msize_and_free;
+ boolean_t is_free;
+ msize_t next_msize, leftover_msize;
+ void *leftover;
+
+ index = SMALL_META_INDEX_FOR_PTR(ptr);
+ old_msize = SMALL_MSIZE_FOR_BYTES(old_size);
+ new_msize = SMALL_MSIZE_FOR_BYTES(new_size + SMALL_QUANTUM - 1);
+ next_index = index + old_msize;
+
+ if (next_index >= NUM_SMALL_BLOCKS) {
+ return 0;
+ }
+ next_block = (char *)ptr + old_size;
+
+#if DEBUG_MALLOC
+ if ((uintptr_t)next_block & (SMALL_QUANTUM - 1)) {
+ szone_error(szone, 1, "internal invariant broken in realloc(next_block)", next_block, NULL);
+ }
+ if (meta_headers[index] != old_msize)
+ malloc_printf("*** small_try_realloc_in_place incorrect old %d %d\n",
+ meta_headers[index], old_msize);
+#endif
+
+ magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines,
+ REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)),
+ MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)));
+
+ /*
+ * Look for a free block immediately afterwards. If it's large enough, we can consume (part of)
+ * it.
+ */
+ next_msize_and_free = meta_headers[next_index];
+ is_free = next_msize_and_free & SMALL_IS_FREE;
+ if (!is_free) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ return 0; // next_block is in use;
+ }
+ next_msize = next_msize_and_free & ~ SMALL_IS_FREE;
+ if (old_msize + next_msize < new_msize) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ return 0; // even with next block, not enough
+ }
+ /*
+ * The following block is big enough; pull it from its freelist and chop off enough to satisfy
+ * our needs.
+ */
+ small_free_list_remove_ptr(szone, small_mag_ptr, next_block, next_msize);
+ small_meta_header_set_middle(meta_headers, next_index);
+ leftover_msize = old_msize + next_msize - new_msize;
+ if (leftover_msize) {
+ /* there's some left, so put the remainder back */
+ leftover = (unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(new_msize);
+
+ small_free_list_add_ptr(szone, small_mag_ptr, leftover, leftover_msize);
+ }
+#if DEBUG_MALLOC
+ if (SMALL_BYTES_FOR_MSIZE(new_msize) > szone->large_threshold) {
+ malloc_printf("*** realloc in place for %p exceeded msize=%d\n", new_msize);
+ }
+#endif
+ small_meta_header_set_in_use(meta_headers, index, new_msize);
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in szone_realloc(), ptr=%p, msize=%d\n", ptr, *SMALL_METADATA_FOR_PTR(ptr));
+ }
+#endif
+ small_mag_ptr->mag_num_bytes_in_objects += SMALL_BYTES_FOR_MSIZE(new_msize - old_msize);
+
+ // Update this region's bytes in use count
+ region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
+ size_t bytes_used = node->bytes_used + SMALL_BYTES_FOR_MSIZE(new_msize - old_msize);
+ node->bytes_used = bytes_used;
+
+ // Emptiness discriminant
+ if (bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES)) {
+ /* After this reallocation the region is still sparse, so it must have been even more so before
+ the reallocation. That implies the region is already correctly marked. Do nothing. */
+ } else {
+ /* Region has crossed threshold from sparsity to density. Mark it not "suitable" on the
+ recirculation candidates list. */
+ node->recirc_suitable = FALSE;
+ }
+
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ return 1;
+}
+
+static boolean_t
+small_check_region(szone_t *szone, region_t region)
+{
+ unsigned char *ptr = SMALL_REGION_ADDRESS(region);
+ msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
+ unsigned char *region_end = SMALL_REGION_END(region);
+ msize_t prev_free = 0;
+ unsigned index;
+ msize_t msize_and_free;
+ msize_t msize;
+ free_list_t *free_head;
+ void *previous, *next;
+ msize_t *follower;
+ mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
+ magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
+
+ // Assumes locked
+ CHECK_MAGAZINE_PTR_LOCKED(szone, small_mag_ptr, __PRETTY_FUNCTION__);
+
+ if (region == small_mag_ptr->mag_last_region)
+ region_end -= small_mag_ptr->mag_bytes_free_at_end;
+
+ while (ptr < region_end) {
+ index = SMALL_META_INDEX_FOR_PTR(ptr);
+ msize_and_free = meta_headers[index];
+ if (!(msize_and_free & SMALL_IS_FREE)) {
+ // block is in use
+ msize = msize_and_free;
+ if (!msize) {
+ malloc_printf("*** invariant broken: null msize ptr=%p num_small_regions=%d end=%p\n",
+ ptr, szone->num_small_regions, region_end);
+ return 0;
+ }
+ if (SMALL_BYTES_FOR_MSIZE(msize) > szone->large_threshold) {
+ malloc_printf("*** invariant broken for %p this small msize=%d - size is too large\n",
+ ptr, msize_and_free);
+ return 0;
+ }
+ ptr += SMALL_BYTES_FOR_MSIZE(msize);
+ prev_free = 0;
+ } else {
+ // free pointer
+ msize = msize_and_free & ~ SMALL_IS_FREE;
+ free_head = (free_list_t *)ptr;
+ follower = (msize_t *)FOLLOWING_SMALL_PTR(ptr, msize);
+ if (!msize) {
+ malloc_printf("*** invariant broken for free block %p this msize=%d\n", ptr, msize);
+ return 0;
+ }
+ if (prev_free) {
+ malloc_printf("*** invariant broken for %p (2 free in a row)\n", ptr);
+ return 0;
+ }
+ previous = free_list_unchecksum_ptr(szone, &free_head->previous);
+ next = free_list_unchecksum_ptr(szone, &free_head->next);
+ if (previous && !SMALL_PTR_IS_FREE(previous)) {
+ malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n",
+ ptr, free_head->previous);
+ return 0;
+ }
+ if (next && !SMALL_PTR_IS_FREE(next)) {
+ malloc_printf("*** invariant broken for %p (next is not a free pointer)\n", ptr);
+ return 0;
+ }
+ if (SMALL_PREVIOUS_MSIZE(follower) != msize) {
+ malloc_printf("*** invariant broken for small free %p followed by %p in region [%p-%p] "
+ "(end marker incorrect) should be %d; in fact %d\n",
+ ptr, follower, SMALL_REGION_ADDRESS(region), region_end, msize, SMALL_PREVIOUS_MSIZE(follower));
+ return 0;
+ }
+ ptr = (unsigned char *)follower;
+ prev_free = SMALL_IS_FREE;
+ }
+ }
+ return 1;
+}
+
+static kern_return_t
+small_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone,
+ memory_reader_t reader, vm_range_recorder_t recorder)
+{
+ size_t num_regions;
+ size_t index;
+ region_t *regions;
+ vm_range_t buffer[MAX_RECORDER_BUFFER];
+ unsigned count = 0;
+ kern_return_t err;
+ region_t region;
+ vm_range_t range;
+ vm_range_t admin_range;
+ vm_range_t ptr_range;
+ unsigned char *mapped_region;
+ msize_t *block_header;
+ unsigned block_index;
+ unsigned block_limit;
+ msize_t msize_and_free;
+ msize_t msize;
+ vm_address_t mag_last_free_ptr = 0;
+ msize_t mag_last_free_msize = 0;
+
+ region_hash_generation_t *srg_ptr;
+ err = reader(task, (vm_address_t)szone->small_region_generation, sizeof(region_hash_generation_t), (void **)&srg_ptr);
+ if (err) return err;
+
+ num_regions = srg_ptr->num_regions_allocated;
+ err = reader(task, (vm_address_t)srg_ptr->hashed_regions, sizeof(region_t) * num_regions, (void **)®ions);
+ if (err) return err;
+
+ for (index = 0; index < num_regions; ++index) {
+ region = regions[index];
+ if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) {
+ range.address = (vm_address_t)SMALL_REGION_ADDRESS(region);
+ range.size = SMALL_REGION_SIZE;
+ if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) {
+ admin_range.address = range.address + SMALL_METADATA_START;
+ admin_range.size = SMALL_METADATA_SIZE;
+ recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1);
+ }
+ if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) {
+ ptr_range.address = range.address;
+ ptr_range.size = NUM_SMALL_BLOCKS * SMALL_QUANTUM;
+ recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1);
+ }
+ if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
+ err = reader(task, range.address, range.size, (void **)&mapped_region);
+ if (err)
+ return err;
+
+ mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(mapped_region);
+ magazine_t *small_mag_ptr;
+ err = reader(task, (vm_address_t)&(szone->small_magazines[mag_index]), sizeof(magazine_t),
+ (void **)&small_mag_ptr);
+ if (err) return err;
+
+ void *mag_last_free = small_mag_ptr->mag_last_free;
+ if (mag_last_free) {
+ mag_last_free_ptr = (uintptr_t) mag_last_free & ~(SMALL_QUANTUM - 1);
+ mag_last_free_msize = (uintptr_t) mag_last_free & (SMALL_QUANTUM - 1);
+ }
+
+ block_header = (msize_t *)(mapped_region + SMALL_METADATA_START + sizeof(region_trailer_t));
+ block_index = 0;
+ block_limit = NUM_SMALL_BLOCKS;
+ if (region == small_mag_ptr->mag_last_region)
+ block_limit -= SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end);
+ while (block_index < block_limit) {
+ msize_and_free = block_header[block_index];
+ msize = msize_and_free & ~ SMALL_IS_FREE;
+ if (! (msize_and_free & SMALL_IS_FREE) &&
+ range.address + SMALL_BYTES_FOR_MSIZE(block_index) != mag_last_free_ptr) {
+ // Block in use
+ buffer[count].address = range.address + SMALL_BYTES_FOR_MSIZE(block_index);
+ buffer[count].size = SMALL_BYTES_FOR_MSIZE(msize);
+ count++;
+ if (count >= MAX_RECORDER_BUFFER) {
+ recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
+ count = 0;
+ }
+ }
+ block_index += msize;
+ }
+ if (count) {
+ recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
+ count = 0;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static void *
+small_malloc_from_free_list(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize)
+{
+ free_list_t *ptr;
+ msize_t this_msize;
+ grain_t slot = (msize <= szone->num_small_slots) ? msize - 1 : szone->num_small_slots - 1;
+ free_list_t **free_list = small_mag_ptr->mag_free_list;
+ free_list_t **the_slot = free_list + slot;
+ free_list_t *next;
+ free_list_t **limit;
+ unsigned bitmap;
+ msize_t leftover_msize;
+ free_list_t *leftover_ptr;
+
+ // Assumes we've locked the region
+ CHECK_MAGAZINE_PTR_LOCKED(szone, small_mag_ptr, __PRETTY_FUNCTION__);
+
+ // Look for an exact match by checking the freelist for this msize.
+ //
+ ptr = *the_slot;
+ if (ptr) {
+ next = free_list_unchecksum_ptr(szone, &ptr->next);
+ if (next) {
+ next->previous = ptr->previous;
+ } else {
+ BITMAPN_CLR(small_mag_ptr->mag_bitmap, slot);
+ }
+ *the_slot = next;
+ this_msize = msize;
+ goto return_small_alloc;
+ }
+
+ // Mask off the bits representing slots holding free blocks smaller than
+ // the size we need. If there are no larger free blocks, try allocating
+ // from the free space at the end of the small region.
+ if (szone->is_largemem) {
+ // BITMAPN_CTZ implementation
+ unsigned idx = slot >> 5;
+ bitmap = 0;
+ unsigned mask = ~ ((1 << (slot & 31)) - 1);
+ for ( ; idx < SMALL_BITMAP_WORDS; ++idx ) {
+ bitmap = small_mag_ptr->mag_bitmap[idx] & mask;
+ if (bitmap != 0)
+ break;
+ mask = ~0U;
+ }
+ // Check for fallthrough: No bits set in bitmap
+ if ((bitmap == 0) && (idx == SMALL_BITMAP_WORDS))
+ goto try_small_from_end;
+
+ // Start looking at the first set bit, plus 32 bits for every word of
+ // zeroes or entries that were too small.
+ slot = BITMAP32_CTZ((&bitmap)) + (idx * 32);
+ } else {
+ bitmap = small_mag_ptr->mag_bitmap[0] & ~ ((1 << slot) - 1);
+ if (!bitmap)
+ goto try_small_from_end;
+
+ slot = BITMAP32_CTZ((&bitmap));
+ }
+ // FIXME: Explain use of - 1 here, last slot has special meaning
+ limit = free_list + szone->num_small_slots - 1;
+ free_list += slot;
+
+ if (free_list < limit) {
+ ptr = *free_list;
+ if (ptr) {
+
+ next = free_list_unchecksum_ptr(szone, &ptr->next);
+ *free_list = next;
+ if (next) {
+ next->previous = ptr->previous;
+ } else {
+ BITMAPN_CLR(small_mag_ptr->mag_bitmap, slot);
+ }
+ this_msize = SMALL_PTR_SIZE(ptr);
+ goto add_leftover_and_proceed;
+ }
+#if DEBUG_MALLOC
+ malloc_printf("in small_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n",slot);
+#endif
+ }
+
+ // We are now looking at the last slot, which contains blocks equal to, or
+ // due to coalescing of free blocks, larger than (num_small_slots - 1) * (small quantum size).
+ // If the last freelist is not empty, and the head contains a block that is
+ // larger than our request, then the remainder is put back on the free list.
+ //
+ ptr = *limit;
+ if (ptr) {
+ this_msize = SMALL_PTR_SIZE(ptr);
+ next = free_list_unchecksum_ptr(szone, &ptr->next);
+ if (this_msize - msize >= szone->num_small_slots) {
+ // the leftover will go back to the free list, so we optimize by
+ // modifying the free list rather than a pop and push of the head
+ leftover_msize = this_msize - msize;
+ leftover_ptr = (free_list_t *)((unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize));
+ *limit = leftover_ptr;
+ if (next) {
+ next->previous.u = free_list_checksum_ptr(szone, leftover_ptr);
+ }
+ leftover_ptr->previous = ptr->previous;
+ leftover_ptr->next = ptr->next;
+ small_meta_header_set_is_free(SMALL_META_HEADER_FOR_PTR(leftover_ptr),
+ SMALL_META_INDEX_FOR_PTR(leftover_ptr), leftover_msize);
+ // Store msize at the end of the block denoted by "leftover_ptr" (i.e. at a negative offset from follower)
+ SMALL_PREVIOUS_MSIZE(FOLLOWING_SMALL_PTR(leftover_ptr, leftover_msize)) = leftover_msize; // Access is safe
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in small_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize);
+ }
+#endif
+ this_msize = msize;
+ goto return_small_alloc;
+ }
+ if (next) {
+ next->previous = ptr->previous;
+ }
+ *limit = next;
+ goto add_leftover_and_proceed;
+ }
+
+try_small_from_end:
+ // Let's see if we can use small_mag_ptr->mag_bytes_free_at_end
+ if (small_mag_ptr->mag_bytes_free_at_end >= SMALL_BYTES_FOR_MSIZE(msize)) {
+ ptr = (free_list_t *)(SMALL_REGION_END(small_mag_ptr->mag_last_region) -
+ small_mag_ptr->mag_bytes_free_at_end);
+ small_mag_ptr->mag_bytes_free_at_end -= SMALL_BYTES_FOR_MSIZE(msize);
+ if (small_mag_ptr->mag_bytes_free_at_end) {
+ // let's mark this block as in use to serve as boundary
+ small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr),
+ SMALL_META_INDEX_FOR_PTR((unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize)),
+ SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end));
+ }
+ this_msize = msize;
+ goto return_small_alloc;
+ }
+ return NULL;
+
+add_leftover_and_proceed:
+ if (this_msize > msize) {
+ leftover_msize = this_msize - msize;
+ leftover_ptr = (free_list_t *)((unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize));
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in small_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize);
+ }
+#endif
+ small_free_list_add_ptr(szone, small_mag_ptr, leftover_ptr, leftover_msize);
+ this_msize = msize;
+ }
+
+return_small_alloc:
+ small_mag_ptr->mag_num_objects++;
+ small_mag_ptr->mag_num_bytes_in_objects += SMALL_BYTES_FOR_MSIZE(this_msize);
+
+ // Update this region's bytes in use count
+ region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
+ size_t bytes_used = node->bytes_used + SMALL_BYTES_FOR_MSIZE(this_msize);
+ node->bytes_used = bytes_used;
+
+ // Emptiness discriminant
+ if (bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES)) {
+ /* After this allocation the region is still sparse, so it must have been even more so before
+ the allocation. That implies the region is already correctly marked. Do nothing. */
+ } else {
+ /* Region has crossed threshold from sparsity to density. Mark in not "suitable" on the
+ recirculation candidates list. */
+ node->recirc_suitable = FALSE;
+ }
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in small_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize);
+ }
+#endif
+ small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), SMALL_META_INDEX_FOR_PTR(ptr), this_msize);
+ return ptr;
+}
+#undef DENSITY_THRESHOLD
+#undef K
+
+static INLINE void *
+small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested)
+{
+ void *ptr;
+ mag_index_t mag_index = mag_get_thread_index(szone);
+ magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
+
+ SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr);
+
+#if SMALL_CACHE
+ ptr = (void *)small_mag_ptr->mag_last_free;
+
+ if ((((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) == msize) {
+ // we have a winner
+ small_mag_ptr->mag_last_free = NULL;
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ ptr = (void *)((uintptr_t)ptr & ~ (SMALL_QUANTUM - 1));
+ if (cleared_requested) {
+ memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize));
+ }
+ return ptr;
+ }
+#endif /* SMALL_CACHE */
+
+ ptr = small_malloc_from_free_list(szone, small_mag_ptr, mag_index, msize);
+ if (ptr) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ if (cleared_requested) {
+ memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize));
+ }
+ return ptr;
+ }
+
+ if (small_get_region_from_depot(szone, small_mag_ptr, mag_index)) {
+ ptr = small_malloc_from_free_list(szone, small_mag_ptr, mag_index, msize);
+ if (ptr) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ if (cleared_requested) {
+ memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize));
+ }
+ return ptr;
+ }
+ }
+
+ ptr = small_malloc_from_region_no_lock(szone, small_mag_ptr, mag_index, msize);
+ // we don't clear because this freshly allocated space is pristine
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ return ptr;
+}
+
+static NOINLINE void
+free_small_botch(szone_t *szone, free_list_t *ptr)
+{
+ mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
+ magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ szone_error(szone, 1, "double free", ptr, NULL);
+}
+
+static INLINE void
+free_small(szone_t *szone, void *ptr, region_t small_region, size_t known_size)
+{
+ msize_t msize;
+ mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
+ magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
+
+ // ptr is known to be in small_region
+ if (known_size) {
+ msize = SMALL_MSIZE_FOR_BYTES(known_size + SMALL_QUANTUM - 1);
+ } else {
+ msize = SMALL_PTR_SIZE(ptr);
+ if (SMALL_PTR_IS_FREE(ptr)) {
+ free_small_botch(szone, ptr);
+ return;
+ }
+ }
+
+ SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr);
+
+#if SMALL_CACHE
+ // Depot does not participate in SMALL_CACHE since it can't be directly malloc()'d
+ if (DEPOT_MAGAZINE_INDEX != mag_index) {
+
+ void *ptr2 = small_mag_ptr->mag_last_free; // Might be NULL
+ region_t rgn2 = small_mag_ptr->mag_last_free_rgn;
+
+ /* check that we don't already have this pointer in the cache */
+ if (ptr == (void *)((uintptr_t)ptr2 & ~ (SMALL_QUANTUM - 1))) {
+ free_small_botch(szone, ptr);
+ return;
+ }
+
+ if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize)
+ memset(ptr, 0x55, SMALL_BYTES_FOR_MSIZE(msize));
+
+ small_mag_ptr->mag_last_free = (void *)(((uintptr_t)ptr) | msize);
+ small_mag_ptr->mag_last_free_rgn = small_region;
+
+ if (!ptr2) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ return;
+ }
+
+ msize = (uintptr_t)ptr2 & (SMALL_QUANTUM - 1);
+ ptr = (void *)(((uintptr_t)ptr2) & ~(SMALL_QUANTUM - 1));
+ small_region = rgn2;
+ }
+#endif /* SMALL_CACHE */
+
+ // Now in the time it took to acquire the lock, the region may have migrated
+ // from one magazine to another. I.e. trailer->mag_index is volatile.
+ // In which case the magazine lock we obtained (namely magazines[mag_index].mag_lock)
+ // is stale. If so, keep on tryin' ...
+ region_trailer_t *trailer = REGION_TRAILER_FOR_SMALL_REGION(small_region);
+ mag_index_t refreshed_index;
+
+ while (mag_index != (refreshed_index = trailer->mag_index)) { // Note assignment
+
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+
+ mag_index = refreshed_index;
+ small_mag_ptr = &(szone->small_magazines[mag_index]);
+ SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr);
+ }
+
+ small_free_no_lock(szone, small_mag_ptr, mag_index, small_region, ptr, msize);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ CHECK(szone, __PRETTY_FUNCTION__);
+}
+
+static void
+print_small_free_list(szone_t *szone)
+{
+ free_list_t *ptr;
+ _SIMPLE_STRING b = _simple_salloc();
+ mag_index_t mag_index;
+
+ if (b) {
+ _simple_sappend(b, "small free sizes:\n");
+ for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
+ grain_t slot = 0;
+ _simple_sprintf(b,"\tMagazine %d: ", mag_index);
+ while (slot < szone->num_small_slots) {
+ ptr = szone->small_magazines[mag_index].mag_free_list[slot];
+ if (ptr) {
+ _simple_sprintf(b, "%s%y[%d]; ", (slot == szone->num_small_slots-1) ? ">=" : "",
+ (slot + 1) * SMALL_QUANTUM, free_list_count(szone, ptr));
+ }
+ slot++;
+ }
+ _simple_sappend(b,"\n");
+ }
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
+ _simple_sfree(b);
+ }
+}
+
+static void
+print_small_region(szone_t *szone, boolean_t verbose, region_t region, size_t bytes_at_end)
+{
+ unsigned counts[1024];
+ unsigned in_use = 0;
+ uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(region);
+ uintptr_t current = start;
+ uintptr_t limit = (uintptr_t)SMALL_REGION_END(region) - bytes_at_end;
+ msize_t msize_and_free;
+ msize_t msize;
+ unsigned ci;
+ _SIMPLE_STRING b;
+ uintptr_t pgTot = 0;
+
+ if (region == HASHRING_REGION_DEALLOCATED) {
+ if ((b = _simple_salloc()) != NULL) {
+ _simple_sprintf(b, "Small region [unknown address] was returned to the OS\n");
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
+ _simple_sfree(b);
+ }
+ return;
+ }
+
+ memset(counts, 0, sizeof(counts));
+ while (current < limit) {
+ msize_and_free = *SMALL_METADATA_FOR_PTR(current);
+ msize = msize_and_free & ~ SMALL_IS_FREE;
+ if (!msize) {
+ malloc_printf("*** error with %p: msize=%d\n", (void *)current, (unsigned)msize);
+ break;
+ }
+ if (!(msize_and_free & SMALL_IS_FREE)) {
+ // block in use
+ if (msize < 1024)
+ counts[msize]++;
+ in_use++;
+ } else {
+ uintptr_t pgLo = round_page(current + sizeof(free_list_t) + sizeof(msize_t));
+ uintptr_t pgHi = trunc_page(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t));
+
+ if (pgLo < pgHi) {
+ pgTot += (pgHi - pgLo);
+ }
+ }
+ current += SMALL_BYTES_FOR_MSIZE(msize);
+ }
+ if ((b = _simple_salloc()) != NULL) {
+ _simple_sprintf(b, "Small region [%p-%p, %y] \t", (void *)start, SMALL_REGION_END(region), (int)SMALL_REGION_SIZE);
+ _simple_sprintf(b, "Magazine=%d \t", MAGAZINE_INDEX_FOR_SMALL_REGION(region));
+ _simple_sprintf(b, "Allocations in use=%d \t Bytes in use=%ly \t", in_use, BYTES_USED_FOR_SMALL_REGION(region));
+ if (bytes_at_end)
+ _simple_sprintf(b, "Untouched=%ly ", bytes_at_end);
+ if (DEPOT_MAGAZINE_INDEX == MAGAZINE_INDEX_FOR_SMALL_REGION(region)) {
+ _simple_sprintf(b, "Advised MADV_FREE=%ly", pgTot);
+ } else {
+ _simple_sprintf(b, "Fragments subject to reclamation=%ly", pgTot);
+ }
+ if (verbose && in_use) {
+ _simple_sappend(b, "\n\tSizes in use: ");
+ for (ci = 0; ci < 1024; ci++)
+ if (counts[ci])
+ _simple_sprintf(b, "%d[%d] ", SMALL_BYTES_FOR_MSIZE(ci), counts[ci]);
+ }
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
+ _simple_sfree(b);
+ }
+}
+
+static boolean_t
+small_free_list_check(szone_t *szone, grain_t slot)
+{
+ mag_index_t mag_index;
+
+ for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
+ magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
+ SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr);
+
+ unsigned count = 0;
+ free_list_t *ptr = szone->small_magazines[mag_index].mag_free_list[slot];
+ msize_t msize_and_free;
+ free_list_t *previous = NULL;
+
+ while (ptr) {
+ msize_and_free = *SMALL_METADATA_FOR_PTR(ptr);
+ if (!(msize_and_free & SMALL_IS_FREE)) {
+ malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ return 0;
+ }
+ if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) {
+ malloc_printf("*** unaligned ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ return 0;
+ }
+ if (!small_region_for_ptr_no_lock(szone, ptr)) {
+ malloc_printf("*** ptr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ return 0;
+ }
+ if (free_list_unchecksum_ptr(szone, &ptr->previous) != previous) {
+ malloc_printf("*** previous incorrectly set slot=%d count=%d ptr=%p\n", slot, count, ptr);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ return 0;
+ }
+ previous = ptr;
+ ptr = free_list_unchecksum_ptr(szone, &ptr->next);
+ count++;
+ }
+
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ }
+ return 1;
+}
+
+/*******************************************************************************
+ * Large allocator implementation
+ ******************************************************************************/
+#pragma mark large allocator
+
+#if DEBUG_MALLOC
+
+static void
+large_debug_print(szone_t *szone)
+{
+ unsigned index;
+ large_entry_t *range;
+ _SIMPLE_STRING b = _simple_salloc();
+
+ if (b) {
+ for (index = 0, range = szone->large_entries; index < szone->num_large_entries; index++, range++)
+ if (range->address)
+ _simple_sprintf(b, "%d: %p(%y); ", index, range->address, range->size);
+
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
+ _simple_sfree(b);
+ }
+}
+#endif
+
+/*
+ * Scan the hash ring looking for an entry for the given pointer.
+ */
+static large_entry_t *
+large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr)
+{
+ // result only valid with lock held
+ unsigned num_large_entries = szone->num_large_entries;
+ unsigned hash_index;
+ unsigned index;
+ large_entry_t *range;
+
+ if (!num_large_entries)
+ return NULL;
+
+ hash_index = ((uintptr_t)ptr >> vm_page_shift) % num_large_entries;
+ index = hash_index;
+
+ do {
+ range = szone->large_entries + index;
+ if (range->address == (vm_address_t)ptr)
+ return range;
+ if (0 == range->address)
+ return NULL; // end of chain
+ index++;
+ if (index == num_large_entries)
+ index = 0;
+ } while (index != hash_index);
+
+ return NULL;
+}
+
+static void
+large_entry_insert_no_lock(szone_t *szone, large_entry_t range)
+{
+ unsigned num_large_entries = szone->num_large_entries;
+ unsigned hash_index = (((uintptr_t)(range.address)) >> vm_page_shift) % num_large_entries;
+ unsigned index = hash_index;
+ large_entry_t *entry;
+
+ // assert(szone->num_large_objects_in_use < szone->num_large_entries); /* must be called with room to spare */
+
+ do {
+ entry = szone->large_entries + index;
+ if (0 == entry->address) {
+ *entry = range;
+ return; // end of chain
+ }
+ index++;
+ if (index == num_large_entries)
+ index = 0;
+ } while (index != hash_index);
+
+ // assert(0); /* must not fallthrough! */
+}
+
+// FIXME: can't we simply swap the (now empty) entry with the last entry on the collision chain for this hash slot?
+static INLINE void
+large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry)
+{
+ unsigned num_large_entries = szone->num_large_entries;
+ unsigned hash_index = entry - szone->large_entries;
+ unsigned index = hash_index;
+ large_entry_t range;
+
+ // assert(entry->address == 0) /* caller must have cleared *entry */
+
+ do {
+ index++;
+ if (index == num_large_entries)
+ index = 0;
+ range = szone->large_entries[index];
+ if (0 == range.address)
+ return;
+ szone->large_entries[index].address = (vm_address_t)0;
+ szone->large_entries[index].size = 0;
+ szone->large_entries[index].did_madvise_reusable = FALSE;
+ large_entry_insert_no_lock(szone, range); // this will reinsert in the
+ // proper place
+ } while (index != hash_index);
+
+ // assert(0); /* since entry->address == 0, must not fallthrough! */
+}
+
+// FIXME: num should probably be a size_t, since you can theoretically allocate
+// more than 2^32-1 large_threshold objects in 64 bit.
+static INLINE large_entry_t *
+large_entries_alloc_no_lock(szone_t *szone, unsigned num)
+{
+ size_t size = num * sizeof(large_entry_t);
+
+ // Note that we allocate memory (via a system call) under a spin lock
+ // That is certainly evil, however it's very rare in the lifetime of a process
+ // The alternative would slow down the normal case
+ return allocate_pages(szone, round_page(size), 0, 0, VM_MEMORY_MALLOC_LARGE);
+}
+
+static void
+large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate)
+{
+ size_t size = num * sizeof(large_entry_t);
+
+ range_to_deallocate->address = (vm_address_t)entries;
+ range_to_deallocate->size = round_page(size);
+}
+
+static large_entry_t *
+large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate)
+{
+ // sets range_to_deallocate
+ unsigned old_num_entries = szone->num_large_entries;
+ large_entry_t *old_entries = szone->large_entries;
+ // always an odd number for good hashing
+ unsigned new_num_entries = (old_num_entries) ? old_num_entries * 2 + 1 :
+ ((vm_page_size / sizeof(large_entry_t)) - 1);
+ large_entry_t *new_entries = large_entries_alloc_no_lock(szone, new_num_entries);
+ unsigned index = old_num_entries;
+ large_entry_t oldRange;
+
+ // if the allocation of new entries failed, bail
+ if (new_entries == NULL)
+ return NULL;
+
+ szone->num_large_entries = new_num_entries;
+ szone->large_entries = new_entries;
+
+ /* rehash entries into the new list */
+ while (index--) {
+ oldRange = old_entries[index];
+ if (oldRange.address) {
+ large_entry_insert_no_lock(szone, oldRange);
+ }
+ }
+
+ if (old_entries) {
+ large_entries_free_no_lock(szone, old_entries, old_num_entries, range_to_deallocate);
+ } else {
+ range_to_deallocate->address = (vm_address_t)0;
+ range_to_deallocate->size = 0;
+ }
+
+ return new_entries;
+}
+
+// frees the specific entry in the size table
+// returns a range to truly deallocate
+static vm_range_t
+large_entry_free_no_lock(szone_t *szone, large_entry_t *entry)
+{
+ vm_range_t range;
+
+ range.address = entry->address;
+ range.size = entry->size;
+
+ if (szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) {
+ protect((void *)range.address, range.size, VM_PROT_READ | VM_PROT_WRITE, szone->debug_flags);
+ range.address -= vm_page_size;
+ range.size += 2 * vm_page_size;
+ }
+
+ entry->address = 0;
+ entry->size = 0;
+ entry->did_madvise_reusable = FALSE;
+ large_entries_rehash_after_entry_no_lock(szone, entry);
+
+#if DEBUG_MALLOC
+ if (large_entry_for_pointer_no_lock(szone, (void *)range.address)) {
+ malloc_printf("*** freed entry %p still in use; num_large_entries=%d\n",
+ range.address, szone->num_large_entries);
+ large_debug_print(szone);
+ szone_sleep();
+ }
+#endif
+ return range;
+}
+
+static NOINLINE kern_return_t
+large_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t large_entries_address,
+ unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder)
+{
+ unsigned index = 0;
+ vm_range_t buffer[MAX_RECORDER_BUFFER];
+ unsigned count = 0;
+ large_entry_t *entries;
+ kern_return_t err;
+ vm_range_t range;
+ large_entry_t entry;
+
+ err = reader(task, large_entries_address, sizeof(large_entry_t) * num_entries, (void **)&entries);
+ if (err)
+ return err;
+
+ index = num_entries;
+ if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) {
+ range.address = large_entries_address;
+ range.size = round_page(num_entries * sizeof(large_entry_t));
+ recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &range, 1);
+ }
+ if (type_mask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE)) {
+ while (index--) {
+ entry = entries[index];
+ if (entry.address) {
+ range.address = entry.address;
+ range.size = entry.size;
+ buffer[count++] = range;
+ if (count >= MAX_RECORDER_BUFFER) {
+ recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE,
+ buffer, count);
+ count = 0;
+ }
+ }
+ }
+ }
+ if (count) {
+ recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE,
+ buffer, count);
+ }
+ return 0;
+}
+
+static void *
+large_malloc(szone_t *szone, size_t num_pages, unsigned char alignment,
+ boolean_t cleared_requested)
+{
+ void *addr;
+ vm_range_t range_to_deallocate;
+ size_t size;
+ large_entry_t large_entry;
+
+ if (!num_pages)
+ num_pages = 1; // minimal allocation size for this szone
+ size = (size_t)num_pages << vm_page_shift;
+ range_to_deallocate.size = 0;
+ range_to_deallocate.address = 0;
+
+#if LARGE_CACHE
+ if (size < LARGE_CACHE_SIZE_ENTRY_LIMIT) { // Look for a large_entry_t on the death-row cache?
+ SZONE_LOCK(szone);
+
+ int i, best = -1, idx = szone->large_entry_cache_newest, stop_idx = szone->large_entry_cache_oldest;
+ size_t best_size = SIZE_T_MAX;
+
+ while (1) { // Scan large_entry_cache for best fit, starting with most recent entry
+ size_t this_size = szone->large_entry_cache[idx].size;
+
+ if (size == this_size) { // size match!
+ best = idx;
+ best_size = this_size;
+ break;
+ }
+
+ if (size <= this_size && this_size < best_size) { // improved fit?
+ best = idx;
+ best_size = this_size;
+ }
+
+ if (idx == stop_idx) // exhausted live ring?
+ break;
+
+ if (idx)
+ idx--; // bump idx down
+ else
+ idx = LARGE_ENTRY_CACHE_SIZE - 1; // wrap idx
+ }
+
+ if (best > -1 && (best_size - size) < size) { //limit fragmentation to 50%
+ addr = (void *)szone->large_entry_cache[best].address;
+ boolean_t was_madvised_reusable = szone->large_entry_cache[best].did_madvise_reusable;
+
+ // Compact live ring to fill entry now vacated at large_entry_cache[best]
+ // while preserving time-order
+ if (szone->large_entry_cache_oldest < szone->large_entry_cache_newest) {
+
+ // Ring hasn't wrapped. Fill in from right.
+ for (i = best; i < szone->large_entry_cache_newest; ++i)
+ szone->large_entry_cache[i] = szone->large_entry_cache[i + 1];
+
+ szone->large_entry_cache_newest--; // Pull in right endpoint.
+
+ } else if (szone->large_entry_cache_newest < szone->large_entry_cache_oldest) {
+
+ // Ring has wrapped. Arrange to fill in from the contiguous side.
+ if (best <= szone->large_entry_cache_newest) {
+ // Fill from right.
+ for (i = best; i < szone->large_entry_cache_newest; ++i)
+ szone->large_entry_cache[i] = szone->large_entry_cache[i + 1];
+
+ if (0 < szone->large_entry_cache_newest)
+ szone->large_entry_cache_newest--;
+ else
+ szone->large_entry_cache_newest = LARGE_ENTRY_CACHE_SIZE - 1;
+ } else {
+ // Fill from left.
+ for ( i = best; i > szone->large_entry_cache_oldest; --i)
+ szone->large_entry_cache[i] = szone->large_entry_cache[i - 1];
+
+ if (szone->large_entry_cache_oldest < LARGE_ENTRY_CACHE_SIZE - 1)
+ szone->large_entry_cache_oldest++;
+ else
+ szone->large_entry_cache_oldest = 0;
+ }
+
+ } else {
+ // By trichotomy, large_entry_cache_newest == large_entry_cache_oldest.
+ // That implies best == large_entry_cache_newest == large_entry_cache_oldest
+ // and the ring is now empty.
+ szone->large_entry_cache[best].address = 0;
+ szone->large_entry_cache[best].size = 0;
+ szone->large_entry_cache[best].did_madvise_reusable = FALSE;
+ }
+
+ if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) {
+ // density of hash table too high; grow table
+ // we do that under lock to avoid a race
+ large_entry_t *entries = large_entries_grow_no_lock(szone, &range_to_deallocate);
+ if (entries == NULL) {
+ SZONE_UNLOCK(szone);
+ return NULL;
+ }
+ }
+
+ large_entry.address = (vm_address_t)addr;
+ large_entry.size = best_size;
+ large_entry.did_madvise_reusable = FALSE;
+ large_entry_insert_no_lock(szone, large_entry);
+
+ szone->num_large_objects_in_use ++;
+ szone->num_bytes_in_large_objects += best_size;
+ if (!was_madvised_reusable)
+ szone->large_entry_cache_hoard_bytes -= best_size;
+ SZONE_UNLOCK(szone);
+
+ if (range_to_deallocate.size) {
+ // we deallocate outside the lock
+ deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0);
+ }
+
+ // Perform the madvise() outside the lock.
+ // Typically the madvise() is successful and we'll quickly return from this routine.
+ // In the unusual case of failure, reacquire the lock to unwind.
+ if (was_madvised_reusable && -1 == madvise(addr, size, MADV_FREE_REUSE)) {
+ /* -1 return: VM map entry change makes this unfit for reuse. */
+#if DEBUG_MALLOC
+ szone_error(szone, 1, "large_malloc madvise(..., MADV_FREE_REUSE) failed", addr, NULL);
+#endif
+
+ SZONE_LOCK(szone);
+ szone->num_large_objects_in_use--;
+ szone->num_bytes_in_large_objects -= large_entry.size;
+
+ // Re-acquire "entry" after interval just above where we let go the lock.
+ large_entry_t *entry = large_entry_for_pointer_no_lock(szone, addr);
+ if (NULL == entry) {
+ szone_error(szone, 1, "entry for pointer being discarded from death-row vanished", addr, NULL);
+ SZONE_UNLOCK(szone);
+ } else {
+
+ range_to_deallocate = large_entry_free_no_lock(szone, entry);
+ SZONE_UNLOCK(szone);
+
+ if (range_to_deallocate.size) {
+ // we deallocate outside the lock
+ deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0);
+ }
+ }
+ /* Fall through to allocate_pages() afresh. */
+ } else {
+ if (cleared_requested) {
+ memset(addr, 0, size);
+ }
+
+ return addr;
+ }
+ } else {
+ SZONE_UNLOCK(szone);
+ }
+ }
+
+ range_to_deallocate.size = 0;
+ range_to_deallocate.address = 0;
+#endif /* LARGE_CACHE */
+
+ addr = allocate_pages(szone, size, alignment, szone->debug_flags, VM_MEMORY_MALLOC_LARGE);
+ if (addr == NULL) {
+ return NULL;
+ }
+
+ SZONE_LOCK(szone);
+ if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) {
+ // density of hash table too high; grow table
+ // we do that under lock to avoid a race
+ large_entry_t *entries = large_entries_grow_no_lock(szone, &range_to_deallocate);
+ if (entries == NULL) {
+ SZONE_UNLOCK(szone);
+ return NULL;
+ }
+ }
+
+ large_entry.address = (vm_address_t)addr;
+ large_entry.size = size;
+ large_entry.did_madvise_reusable = FALSE;
+ large_entry_insert_no_lock(szone, large_entry);
+
+ szone->num_large_objects_in_use ++;
+ szone->num_bytes_in_large_objects += size;
+ SZONE_UNLOCK(szone);
+
+ if (range_to_deallocate.size) {
+ // we deallocate outside the lock
+ deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0);
+ }
+ return addr;
+}
+
+static NOINLINE void
+free_large(szone_t *szone, void *ptr)
+{
+ // We have established ptr is page-aligned and neither tiny nor small
+ large_entry_t *entry;
+ vm_range_t vm_range_to_deallocate;
+
+ SZONE_LOCK(szone);
+ entry = large_entry_for_pointer_no_lock(szone, ptr);
+ if (entry) {
+#if LARGE_CACHE
+#ifndef MADV_CAN_REUSE
+#define MADV_CAN_REUSE 9 /* per Francois, for testing until xnu is resubmitted to B&I */
+#endif
+ if (entry->size < LARGE_CACHE_SIZE_ENTRY_LIMIT &&
+ -1 != madvise((void *)(entry->address), entry->size, MADV_CAN_REUSE)) { // Put the large_entry_t on the death-row cache?
+ int idx = szone->large_entry_cache_newest, stop_idx = szone->large_entry_cache_oldest;
+ large_entry_t this_entry = *entry; // Make a local copy, "entry" is volatile when lock is let go.
+ boolean_t reusable = TRUE;
+ boolean_t should_madvise = szone->large_entry_cache_hoard_bytes + this_entry.size > szone->large_entry_cache_hoard_lmit;
+
+ // Already freed?
+ // [Note that repeated entries in death-row risk vending the same entry subsequently
+ // to two different malloc() calls. By checking here the (illegal) double free
+ // is accommodated, matching the behavior of the previous implementation.]
+ while (1) { // Scan large_entry_cache starting with most recent entry
+ if (szone->large_entry_cache[idx].address == entry->address) {
+ szone_error(szone, 1, "pointer being freed already on death-row", ptr, NULL);
+ SZONE_UNLOCK(szone);
+ return;
+ }
+
+ if (idx == stop_idx) // exhausted live ring?
+ break;
+
+ if (idx)
+ idx--; // bump idx down
+ else
+ idx = LARGE_ENTRY_CACHE_SIZE - 1; // wrap idx
+ }
+
+ SZONE_UNLOCK(szone);
+
+ if (szone->debug_flags & SCALABLE_MALLOC_PURGEABLE) { // Are we a purgable zone?
+ int state = VM_PURGABLE_NONVOLATILE; // restore to default condition
+
+ if (KERN_SUCCESS != vm_purgable_control(mach_task_self(), this_entry.address, VM_PURGABLE_SET_STATE, &state)) {
+ malloc_printf("*** can't vm_purgable_control(..., VM_PURGABLE_SET_STATE) for large freed block at %p\n", this_entry.address);
+ reusable = FALSE;
+ }
+ }
+
+ if (szone->large_legacy_reset_mprotect) { // Linked for Leopard?
+ // Accomodate Leopard apps that (illegally) mprotect() their own guard pages on large malloc'd allocations
+ kern_return_t err = vm_protect(mach_task_self(), (vm_address_t)(this_entry.address), this_entry.size,
+ 0, PROT_READ | PROT_WRITE);
+ if (err) {
+ malloc_printf("*** can't reset protection for large freed block at %p\n", this_entry.address);
+ reusable = FALSE;
+ }
+ }
+
+ // madvise(..., MADV_REUSABLE) death-row arrivals if hoarding would exceed large_entry_cache_hoard_lmit
+ if (should_madvise) {
+ // Issue madvise to avoid paging out the dirtied free()'d pages in "entry"
+ MAGMALLOC_MADVFREEREGION((void *)szone, (void *)0, (void *)(this_entry.address), this_entry.size); // DTrace USDT Probe
+
+ if (-1 == madvise((void *)(this_entry.address), this_entry.size, MADV_FREE_REUSABLE)) {
+ /* -1 return: VM map entry change makes this unfit for reuse. */
+#if DEBUG_MALLOC
+ szone_error(szone, 1, "free_large madvise(..., MADV_FREE_REUSABLE) failed", (void *)this_entry.address, NULL);
+#endif
+ reusable = FALSE;
+ }
+ }
+
+ SZONE_LOCK(szone);
+
+ // Re-acquire "entry" after interval just above where we let go the lock.
+ entry = large_entry_for_pointer_no_lock(szone, ptr);
+ if (NULL == entry) {
+ szone_error(szone, 1, "entry for pointer being freed from death-row vanished", ptr, NULL);
+ SZONE_UNLOCK(szone);
+ return;
+ }
+
+ // Add "entry" to death-row ring
+ if (reusable) {
+ int idx = szone->large_entry_cache_newest; // Most recently occupied
+ vm_address_t addr;
+ size_t adjsize;
+
+ if (szone->large_entry_cache_newest == szone->large_entry_cache_oldest &&
+ 0 == szone->large_entry_cache[idx].address) {
+ // Ring is empty, idx is good as it stands
+ addr = 0;
+ adjsize = 0;
+ } else {
+ // Extend the queue to the "right" by bumping up large_entry_cache_newest
+ if (idx == LARGE_ENTRY_CACHE_SIZE - 1)
+ idx = 0; // Wrap index
+ else
+ idx++; // Bump index
+
+ if (idx == szone->large_entry_cache_oldest) { // Fully occupied
+ // Drop this entry from the cache and deallocate the VM
+ addr = szone->large_entry_cache[idx].address;
+ adjsize = szone->large_entry_cache[idx].size;
+ if (!szone->large_entry_cache[idx].did_madvise_reusable)
+ szone->large_entry_cache_hoard_bytes -= adjsize;
+ } else {
+ // Using an unoccupied cache slot
+ addr = 0;
+ adjsize = 0;
+ }
+ }
+
+ if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE))
+ memset((void *)(entry->address), 0x55, entry->size);
+
+ entry->did_madvise_reusable = should_madvise; // Was madvise()'d above?
+ if (!should_madvise) // Entered on death-row without madvise() => up the hoard total
+ szone->large_entry_cache_hoard_bytes += entry->size;
+
+ szone->large_entry_cache[idx] = *entry;
+ szone->large_entry_cache_newest = idx;
+
+ szone->num_large_objects_in_use--;
+ szone->num_bytes_in_large_objects -= entry->size;
+
+ (void)large_entry_free_no_lock(szone, entry);
+
+ if (0 == addr) {
+ SZONE_UNLOCK(szone);
+ return;
+ }
+
+ // Fall through to drop large_entry_cache_oldest from the cache,
+ // and then deallocate its pages.
+
+ // Trim the queue on the "left" by bumping up large_entry_cache_oldest
+ if (szone->large_entry_cache_oldest == LARGE_ENTRY_CACHE_SIZE - 1)
+ szone->large_entry_cache_oldest = 0;
+ else
+ szone->large_entry_cache_oldest++;
+
+ // we deallocate_pages, including guard pages, outside the lock
+ SZONE_UNLOCK(szone);
+ deallocate_pages(szone, (void *)addr, (size_t)adjsize, 0);
+ return;
+ } else {
+ /* fall through to discard an allocation that is not reusable */
+ }
+ }
+#endif /* LARGE_CACHE */
+
+ szone->num_large_objects_in_use--;
+ szone->num_bytes_in_large_objects -= entry->size;
+
+ vm_range_to_deallocate = large_entry_free_no_lock(szone, entry);
+ } else {
+#if DEBUG_MALLOC
+ large_debug_print(szone);
+#endif
+ szone_error(szone, 1, "pointer being freed was not allocated", ptr, NULL);
+ SZONE_UNLOCK(szone);
+ return;
+ }
+ SZONE_UNLOCK(szone); // we release the lock asap
+ CHECK(szone, __PRETTY_FUNCTION__);
+
+ // we deallocate_pages, including guard pages, outside the lock
+ if (vm_range_to_deallocate.address) {
+#if DEBUG_MALLOC
+ // FIXME: large_entry_for_pointer_no_lock() needs the lock held ...
+ if (large_entry_for_pointer_no_lock(szone, (void *)vm_range_to_deallocate.address)) {
+ malloc_printf("*** invariant broken: %p still in use num_large_entries=%d\n",
+ vm_range_to_deallocate.address, szone->num_large_entries);
+ large_debug_print(szone);
+ szone_sleep();
+ }
+#endif
+ deallocate_pages(szone, (void *)vm_range_to_deallocate.address, (size_t)vm_range_to_deallocate.size, 0);
+ }
+}
+
+static INLINE int
+large_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size)
+{
+ vm_address_t addr = (vm_address_t)ptr + old_size;
+ large_entry_t *large_entry;
+ kern_return_t err;
+
+ SZONE_LOCK(szone);
+ large_entry = large_entry_for_pointer_no_lock(szone, (void *)addr);
+ SZONE_UNLOCK(szone);
+
+ if (large_entry) { // check if "addr = ptr + old_size" is already spoken for
+ return 0; // large pointer already exists in table - extension is not going to work
+ }
+
+ new_size = round_page(new_size);
+ /*
+ * Ask for allocation at a specific address, and mark as realloc
+ * to request coalescing with previous realloc'ed extensions.
+ */
+ err = vm_allocate(mach_task_self(), &addr, new_size - old_size, VM_MAKE_TAG(VM_MEMORY_REALLOC));
+ if (err != KERN_SUCCESS) {
+ return 0;
+ }
+
+ SZONE_LOCK(szone);
+ /* extend existing large entry */
+ large_entry = large_entry_for_pointer_no_lock(szone, ptr);
+ if (!large_entry) {
+ szone_error(szone, 1, "large entry reallocated is not properly in table", ptr, NULL);
+ SZONE_UNLOCK(szone);
+ return 0; // Bail, leaking "addr"
+ }
+
+ large_entry->address = (vm_address_t)ptr;
+ large_entry->size = new_size;
+ szone->num_bytes_in_large_objects += new_size - old_size;
+ SZONE_UNLOCK(szone); // we release the lock asap
+
+ return 1;
+}
+
+/********************* Zone call backs ************************/
+/*
+ * Mark these NOINLINE to avoid bloating the purgeable zone call backs
+ */
+static NOINLINE void
+szone_free(szone_t *szone, void *ptr)
+{
+ region_t tiny_region;
+ region_t small_region;
+
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr))
+ malloc_printf("in szone_free with %p\n", ptr);
+#endif
+ if (!ptr)
+ return;
+ /*
+ * Try to free to a tiny region.
+ */
+ if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) {
+ szone_error(szone, 1, "Non-aligned pointer being freed", ptr, NULL);
+ return;
+ }
+ if ((tiny_region = tiny_region_for_ptr_no_lock(szone, ptr)) != NULL) {
+ if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) {
+ szone_error(szone, 1, "Pointer to metadata being freed", ptr, NULL);
+ return;
+ }
+ free_tiny(szone, ptr, tiny_region, 0);
+ return;
+ }
+
+ /*
+ * Try to free to a small region.
+ */
+ if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) {
+ szone_error(szone, 1, "Non-aligned pointer being freed (2)", ptr, NULL);
+ return;
+ }
+ if ((small_region = small_region_for_ptr_no_lock(szone, ptr)) != NULL) {
+ if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) {
+ szone_error(szone, 1, "Pointer to metadata being freed (2)", ptr, NULL);
+ return;
+ }
+ free_small(szone, ptr, small_region, 0);
+ return;
+ }
+
+ /* check that it's a legal large allocation */
+ if ((uintptr_t)ptr & (vm_page_size - 1)) {
+ szone_error(szone, 1, "non-page-aligned, non-allocated pointer being freed", ptr, NULL);
+ return;
+ }
+ free_large(szone, ptr);
+}
+
+static NOINLINE void
+szone_free_definite_size(szone_t *szone, void *ptr, size_t size)
+{
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr))
+ malloc_printf("in szone_free_definite_size with %p\n", ptr);
+
+ if (0 == size) {
+ szone_error(szone, 1, "pointer of size zero being freed", ptr, NULL);
+ return;
+ }
+
+#endif
+ if (!ptr)
+ return;
+
+ /*
+ * Try to free to a tiny region.
+ */
+ if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) {
+ szone_error(szone, 1, "Non-aligned pointer being freed", ptr, NULL);
+ return;
+ }
+ if (size <= (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) {
+ if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) {
+ szone_error(szone, 1, "Pointer to metadata being freed", ptr, NULL);
+ return;
+ }
+ free_tiny(szone, ptr, TINY_REGION_FOR_PTR(ptr), size);
+ return;
+ }
+
+ /*
+ * Try to free to a small region.
+ */
+ if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) {
+ szone_error(szone, 1, "Non-aligned pointer being freed (2)", ptr, NULL);
+ return;
+ }
+ if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) &&
+ (size <= szone->large_threshold)) {
+ if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) {
+ szone_error(szone, 1, "Pointer to metadata being freed (2)", ptr, NULL);
+ return;
+ }
+ free_small(szone, ptr, SMALL_REGION_FOR_PTR(ptr), size);
+ return;
+ }
+
+ /* check that it's a legal large allocation */
+ if ((uintptr_t)ptr & (vm_page_size - 1)) {
+ szone_error(szone, 1, "non-page-aligned, non-allocated pointer being freed", ptr, NULL);
+ return;
+ }
+ free_large(szone, ptr);
+}
+
+static NOINLINE void *
+szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested)
+{
+ void *ptr;
+ msize_t msize;
+
+ if (size <= (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) {
+ // think tiny
+ msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
+ if (!msize)
+ msize = 1;
+ ptr = tiny_malloc_should_clear(szone, msize, cleared_requested);
+ } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) &&
+ (size <= szone->large_threshold)) {
+ // think small
+ msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1);
+ if (! msize)
+ msize = 1;
+ ptr = small_malloc_should_clear(szone, msize, cleared_requested);
+ } else {
+ // large
+ size_t num_pages = round_page(size) >> vm_page_shift;
+ if (num_pages == 0) /* Overflowed */
+ ptr = 0;
+ else
+ ptr = large_malloc(szone, num_pages, 0, cleared_requested);
+ }
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr))
+ malloc_printf("szone_malloc returned %p\n", ptr);
+#endif
+ /*
+ * If requested, scribble on allocated memory.
+ */
+ if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && ptr && !cleared_requested && size)
+ memset(ptr, 0xaa, size);
+
+ return ptr;
+}
+
+static NOINLINE void *
+szone_malloc(szone_t *szone, size_t size) {
+ return szone_malloc_should_clear(szone, size, 0);
+}
+
+static NOINLINE void *
+szone_calloc(szone_t *szone, size_t num_items, size_t size)
+{
+ size_t total_bytes = num_items * size;
+
+ // Check for overflow of integer multiplication
+ if (num_items > 1) {
+#if __LP64__ /* size_t is uint64_t */
+ if ((num_items | size) & 0xffffffff00000000ul) {
+ // num_items or size equals or exceeds sqrt(2^64) == 2^32, appeal to wider arithmetic
+ __uint128_t product = ((__uint128_t)num_items) * ((__uint128_t)size);
+ if ((uint64_t)(product >> 64)) // compiles to test on upper register of register pair
+ return NULL;
+ }
+#else /* size_t is uint32_t */
+ if ((num_items | size) & 0xffff0000ul) {
+ // num_items or size equals or exceeds sqrt(2^32) == 2^16, appeal to wider arithmetic
+ uint64_t product = ((uint64_t)num_items) * ((uint64_t)size);
+ if ((uint32_t)(product >> 32)) // compiles to test on upper register of register pair
+ return NULL;
+ }
+#endif
+ }
+
+ return szone_malloc_should_clear(szone, total_bytes, 1);
+}
+
+static NOINLINE void *
+szone_valloc(szone_t *szone, size_t size)
+{
+ void *ptr;
+
+ if (size <= szone->large_threshold) {
+ ptr = szone_memalign(szone, vm_page_size, size);
+ } else {
+ size_t num_pages;
+
+ num_pages = round_page(size) >> vm_page_shift;
+ ptr = large_malloc(szone, num_pages, 0, 0);
+ }
+
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr))
+ malloc_printf("szone_valloc returned %p\n", ptr);
+#endif
+ return ptr;
+}
+
+/* Isolate PIC-base load (for __is_threaded) here. */
+static NOINLINE size_t
+szone_size_try_large(szone_t *szone, const void *ptr)
+{
+ size_t size = 0;
+ large_entry_t *entry;
+
+ SZONE_LOCK(szone);
+ entry = large_entry_for_pointer_no_lock(szone, ptr);
+ if (entry) {
+ size = entry->size;
+ }
+ SZONE_UNLOCK(szone);
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr)) {
+ malloc_printf("szone_size for %p returned %d\n", ptr, (unsigned)size);
+ }
+#endif
+ return size;
+}
+
+static NOINLINE size_t
+szone_size(szone_t *szone, const void *ptr)
+{
+ boolean_t is_free;
+ msize_t msize, msize_and_free;
+
+ if (!ptr)
+ return 0;
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr)) {
+ malloc_printf("in szone_size for %p (szone=%p)\n", ptr, szone);
+ }
+#endif
+
+ /*
+ * Look for it in a tiny region.
+ */
+ if ((uintptr_t)ptr & (TINY_QUANTUM - 1))
+ return 0;
+ if (tiny_region_for_ptr_no_lock(szone, ptr)) {
+ if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS)
+ return 0;
+ msize = get_tiny_meta_header(ptr, &is_free);
+ if (is_free)
+ return 0;
+#if TINY_CACHE
+ {
+ mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
+ magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
+
+ if (msize < TINY_QUANTUM && ptr == (void *)((uintptr_t)(tiny_mag_ptr->mag_last_free) & ~ (TINY_QUANTUM - 1)))
+ return 0;
+ }
+#endif
+ return TINY_BYTES_FOR_MSIZE(msize);
+ }
+
+ /*
+ * Look for it in a small region.
+ */
+ if ((uintptr_t)ptr & (SMALL_QUANTUM - 1))
+ return 0;
+ if (small_region_for_ptr_no_lock(szone, ptr)) {
+ if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS)
+ return 0;
+ msize_and_free = *SMALL_METADATA_FOR_PTR(ptr);
+ if (msize_and_free & SMALL_IS_FREE)
+ return 0;
+#if SMALL_CACHE
+ {
+ mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
+ magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
+
+ if (ptr == (void *)((uintptr_t)(small_mag_ptr->mag_last_free) & ~ (SMALL_QUANTUM - 1)))
+ return 0;
+ }
+#endif
+ return SMALL_BYTES_FOR_MSIZE(msize_and_free);
+ }
+
+ /*
+ * If not page-aligned, it cannot have come from a large allocation.
+ */
+ if ((uintptr_t)ptr & (vm_page_size - 1))
+ return 0;
+
+ /*
+ * Look for it in a large entry.
+ */
+ return szone_size_try_large(szone, ptr);
+}
+
+static NOINLINE void *
+szone_realloc(szone_t *szone, void *ptr, size_t new_size)
+{
+ size_t old_size;
+ void *new_ptr;
+
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr)) {
+ malloc_printf("in szone_realloc for %p, %d\n", ptr, (unsigned)new_size);
+ }
+#endif
+ if (!ptr) {
+ ptr = szone_malloc(szone, new_size);
+ return ptr;
+ }
+ old_size = szone_size(szone, ptr);
+ if (!old_size) {
+ szone_error(szone, 1, "pointer being reallocated was not allocated", ptr, NULL);
+ return NULL;
+ }
+ /* we never shrink an allocation */
+ if (old_size >= new_size)
+ return ptr;
+
+ /*
+ * If the new size suits the tiny allocator and the pointer being resized
+ * belongs to a tiny region, try to reallocate in-place.
+ */
+ if ((new_size + TINY_QUANTUM - 1) <= (NUM_TINY_SLOTS - 1) * TINY_QUANTUM) {
+ if (tiny_region_for_ptr_no_lock(szone, ptr) != NULL) {
+ if (tiny_try_realloc_in_place(szone, ptr, old_size, new_size)) {
+ return ptr;
+ }
+ }
+
+ /*
+ * If the new size suits the small allocator and the pointer being resized
+ * belongs to a small region, and we're not protecting the small allocations
+ * try to reallocate in-place.
+ */
+ } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) &&
+ ((new_size + SMALL_QUANTUM - 1) <= szone->large_threshold) &&
+ (small_region_for_ptr_no_lock(szone, ptr) != NULL)) {
+ if (small_try_realloc_in_place(szone, ptr, old_size, new_size)) {
+ return ptr;
+ }
+
+ /*
+ * If the allocation's a large allocation, try to reallocate in-place there.
+ */
+ } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) &&
+ !(szone->debug_flags & SCALABLE_MALLOC_PURGEABLE) &&
+ (old_size > szone->large_threshold)) {
+ if (large_try_realloc_in_place(szone, ptr, old_size, new_size)) {
+ return ptr;
+ }
+ }
+
+ /*
+ * Can't reallocate in place for whatever reason; allocate a new buffer and copy.
+ */
+ new_ptr = szone_malloc(szone, new_size);
+ if (new_ptr == NULL)
+ return NULL;
+
+ /*
+ * If the allocation's large enough, try to copy using VM. If that fails, or
+ * if it's too small, just copy by hand.
+ */
+ if ((old_size < szone->vm_copy_threshold) ||
+ vm_copy(mach_task_self(), (vm_address_t)ptr, old_size, (vm_address_t)new_ptr))
+ memcpy(new_ptr, ptr, old_size);
+ szone_free(szone, ptr);
+
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr)) {
+ malloc_printf("szone_realloc returned %p for %d\n", new_ptr, (unsigned)new_size);
+ }
+#endif
+ return new_ptr;
+}
+
+static NOINLINE void *
+szone_memalign(szone_t *szone, size_t alignment, size_t size)
+{
+ if ((size + alignment) < size) // size_t arithmetic wrapped!
+ return NULL;
+
+ // alignment is gauranteed a power of 2 at least as large as sizeof(void *), hence non-zero.
+ // Since size + alignment didn't wrap, 0 <= size + alignment - 1 < size + alignment
+ size_t span = size + alignment - 1;
+
+ if (alignment <= TINY_QUANTUM) {
+ return szone_malloc(szone, size); // Trivially satisfied by tiny, small, or large
+
+ } else if (span <= (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) {
+ msize_t mspan = TINY_MSIZE_FOR_BYTES(span + TINY_QUANTUM - 1);
+ void *p = szone_malloc(szone, span); // avoids inlining tiny_malloc_should_clear(szone, mspan, 0);
+
+ if (NULL == p)
+ return NULL;
+
+ size_t offset = ((uintptr_t) p) & (alignment - 1); // p % alignment
+ size_t pad = (0 == offset) ? 0 : alignment - offset; // p + pad achieves desired alignment
+
+ msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
+ msize_t mpad = TINY_MSIZE_FOR_BYTES(pad + TINY_QUANTUM - 1);
+ msize_t mwaste = mspan - msize - mpad; // excess blocks
+
+ if (mpad > 0) {
+ void *q = (void *)(((uintptr_t) p) + pad);
+
+ // Mark q as a block header and in-use, thus creating two blocks.
+ magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines,
+ REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)),
+ MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)));
+ set_tiny_meta_header_in_use(q, msize);
+
+ // set_tiny_meta_header_in_use() "reaffirms" the block_header on the *following* block, so
+ // now set its in_use bit as well. But only if its within the original allocation made above.
+ if (mwaste > 0)
+ BITARRAY_SET(TINY_INUSE_FOR_HEADER(TINY_BLOCK_HEADER_FOR_PTR(q)), TINY_INDEX_FOR_PTR(q) + msize);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+
+ // Give up mpad blocks beginning at p to the tiny free list
+ // region_t r = TINY_REGION_FOR_PTR(p);
+ szone_free(szone, p); // avoids inlining free_tiny(szone, p, &r);
+
+ p = q; // advance p to the desired alignment
+ }
+
+ if (mwaste > 0) {
+ void *q = (void *)(((uintptr_t) p) + TINY_BYTES_FOR_MSIZE(msize));
+ // Mark q as block header and in-use, thus creating two blocks.
+ magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines,
+ REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)),
+ MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)));
+ set_tiny_meta_header_in_use(q, mwaste);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+
+ // Give up mwaste blocks beginning at q to the tiny free list
+ // region_t r = TINY_REGION_FOR_PTR(q);
+ szone_free(szone, q); // avoids inlining free_tiny(szone, q, &r);
+ }
+
+ return p; // p has the desired size and alignment, and can later be free()'d
+
+ } else if ((NUM_TINY_SLOTS - 1)*TINY_QUANTUM < size && alignment <= SMALL_QUANTUM) {
+ return szone_malloc(szone, size); // Trivially satisfied by small or large
+
+ } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (span <= szone->large_threshold)) {
+
+ if (size <= (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) {
+ size = (NUM_TINY_SLOTS - 1)*TINY_QUANTUM + TINY_QUANTUM; // ensure block allocated by small does not have a tiny-possible size
+ span = size + alignment - 1;
+ }
+
+ msize_t mspan = SMALL_MSIZE_FOR_BYTES(span + SMALL_QUANTUM - 1);
+ void *p = szone_malloc(szone, span); // avoid inlining small_malloc_should_clear(szone, mspan, 0);
+
+ if (NULL == p)
+ return NULL;
+
+ size_t offset = ((uintptr_t) p) & (alignment - 1); // p % alignment
+ size_t pad = (0 == offset) ? 0 : alignment - offset; // p + pad achieves desired alignment
+
+ msize_t msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1);
+ msize_t mpad = SMALL_MSIZE_FOR_BYTES(pad + SMALL_QUANTUM - 1);
+ msize_t mwaste = mspan - msize - mpad; // excess blocks
+
+ if (mpad > 0) {
+ void *q = (void *)(((uintptr_t) p) + pad);
+
+ // Mark q as block header and in-use, thus creating two blocks.
+ magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines,
+ REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p)),
+ MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p)));
+ small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(p), SMALL_META_INDEX_FOR_PTR(p), mpad);
+ small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(q), SMALL_META_INDEX_FOR_PTR(q), msize + mwaste);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+
+ // Give up mpad blocks beginning at p to the small free list
+ // region_t r = SMALL_REGION_FOR_PTR(p);
+ szone_free(szone, p); // avoid inlining free_small(szone, p, &r);
+
+ p = q; // advance p to the desired alignment
+ }
+ if (mwaste > 0) {
+ void *q = (void *)(((uintptr_t) p) + SMALL_BYTES_FOR_MSIZE(msize));
+ // Mark q as block header and in-use, thus creating two blocks.
+ magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines,
+ REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p)),
+ MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p)));
+ small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(p), SMALL_META_INDEX_FOR_PTR(p), msize);
+ small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(q), SMALL_META_INDEX_FOR_PTR(q), mwaste);
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+
+ // Give up mwaste blocks beginning at q to the small free list
+ // region_t r = SMALL_REGION_FOR_PTR(q);
+ szone_free(szone, q); // avoid inlining free_small(szone, q, &r);
+ }
+
+ return p; // p has the desired size and alignment, and can later be free()'d
+
+ } else if (szone->large_threshold < size && alignment <= vm_page_size) {
+ return szone_malloc(szone, size); // Trivially satisfied by large
+
+ } else {
+ // ensure block allocated by large does not have a small-possible size
+ size_t num_pages = round_page(MAX(szone->large_threshold + 1, size)) >> vm_page_shift;
+ void *p;
+
+ if (num_pages == 0) /* Overflowed */
+ p = NULL;
+ else
+ p = large_malloc(szone, num_pages, MAX(vm_page_shift, __builtin_ctz(alignment)), 0);
+
+ return p;
+ }
+ /* NOTREACHED */
+}
+
+// given a size, returns the number of pointers allocated capable of holding
+// that size, up to the limit specified by the 'count' argument. These pointers
+// are stored in the 'results' array, which must be allocated by the caller.
+// may return zero, since this function is only a best attempt at allocating
+// the pointers. clients should be prepared to call malloc for any additional
+// blocks they need.
+static NOINLINE unsigned
+szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count)
+{
+ msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
+ unsigned found = 0;
+ mag_index_t mag_index = mag_get_thread_index(szone);
+ magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
+
+ // only bother implementing this for tiny
+ if (size > (NUM_TINY_SLOTS - 1)*TINY_QUANTUM)
+ return 0;
+ // make sure to return objects at least one quantum in size
+ if (!msize)
+ msize = 1;
+
+ CHECK(szone, __PRETTY_FUNCTION__);
+
+ // We must lock the zone now, since tiny_malloc_from_free_list assumes that
+ // the caller has done so.
+ SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
+
+ // with the zone locked, allocate objects from the free list until all
+ // sufficiently large objects have been exhausted, or we have met our quota
+ // of objects to allocate.
+ while (found < count) {
+ void *ptr = tiny_malloc_from_free_list(szone, tiny_mag_ptr, mag_index, msize);
+ if (!ptr)
+ break;
+
+ *results++ = ptr;
+ found++;
+ }
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ return found;
+}
+
+/* Try caching the tiny_region and checking if the next ptr hits there. */
+static NOINLINE void
+szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count)
+{
+ unsigned cc = 0;
+ void *ptr;
+ region_t tiny_region = NULL;
+ boolean_t is_free;
+ msize_t msize;
+ magazine_t *tiny_mag_ptr = NULL;
+ mag_index_t mag_index = -1;
+
+ // frees all the pointers in to_be_freed
+ // note that to_be_freed may be overwritten during the process
+ if (!count)
+ return;
+
+ CHECK(szone, __PRETTY_FUNCTION__);
+ while (cc < count) {
+ ptr = to_be_freed[cc];
+ if (ptr) {
+ if (NULL == tiny_region || tiny_region != TINY_REGION_FOR_PTR(ptr)) { // region same as last iteration?
+ if (tiny_mag_ptr) { // non-NULL iff magazine lock taken
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ tiny_mag_ptr = NULL;
+ }
+
+ tiny_region = tiny_region_for_ptr_no_lock(szone, ptr);
+
+ if (tiny_region) {
+ tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines,
+ REGION_TRAILER_FOR_TINY_REGION(tiny_region),
+ MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region));
+ mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region);
+ }
+ }
+ if (tiny_region) {
+ // this is a tiny pointer
+ if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS)
+ break; // pointer to metadata; let the standard free deal with it
+ msize = get_tiny_meta_header(ptr, &is_free);
+ if (is_free)
+ break; // a double free; let the standard free deal with it
+
+ tiny_free_no_lock(szone, tiny_mag_ptr, mag_index, tiny_region, ptr, msize);
+ to_be_freed[cc] = NULL;
+ } else {
+ // No region in this zone claims ptr; let the standard free deal with it
+ break;
+ }
+ }
+ cc++;
+ }
+
+ if (tiny_mag_ptr) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ tiny_mag_ptr = NULL;
+ }
+
+ CHECK(szone, __PRETTY_FUNCTION__);
+ while (count--) {
+ ptr = to_be_freed[count];
+ if (ptr)
+ szone_free(szone, ptr);
+ }
+}
+
+// FIXME: Suppose one of the locks is held?
+static void
+szone_destroy(szone_t *szone)
+{
+ size_t index;
+ large_entry_t *large;
+ vm_range_t range_to_deallocate;
+
+ /* destroy large entries */
+ index = szone->num_large_entries;
+ while (index--) {
+ large = szone->large_entries + index;
+ if (large->address) {
+ // we deallocate_pages, including guard pages
+ deallocate_pages(szone, (void *)(large->address), large->size, szone->debug_flags);
+ }
+ }
+ large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate);
+ if (range_to_deallocate.size)
+ deallocate_pages(szone, (void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0);
+
+ /* destroy tiny regions */
+ for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index)
+ if ((HASHRING_OPEN_ENTRY != szone->tiny_region_generation->hashed_regions[index]) &&
+ (HASHRING_REGION_DEALLOCATED != szone->tiny_region_generation->hashed_regions[index]))
+ deallocate_pages(szone, szone->tiny_region_generation->hashed_regions[index], TINY_REGION_SIZE, 0);
+
+ /* destroy small regions */
+ for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index)
+ if ((HASHRING_OPEN_ENTRY != szone->small_region_generation->hashed_regions[index]) &&
+ (HASHRING_REGION_DEALLOCATED != szone->small_region_generation->hashed_regions[index]))
+ deallocate_pages(szone, szone->small_region_generation->hashed_regions[index], SMALL_REGION_SIZE, 0);
+
+ /* destroy region hash rings, if any */
+ if (szone->tiny_region_generation->hashed_regions != szone->initial_tiny_regions) {
+ size_t size = round_page(szone->tiny_region_generation->num_regions_allocated * sizeof(region_t));
+ deallocate_pages(szone, szone->tiny_region_generation->hashed_regions, size, 0);
+ }
+ if (szone->small_region_generation->hashed_regions != szone->initial_small_regions) {
+ size_t size = round_page(szone->small_region_generation->num_regions_allocated * sizeof(region_t));
+ deallocate_pages(szone, szone->small_region_generation->hashed_regions, size, 0);
+ }
+
+ /* Now destroy the separate szone region */
+ if (szone->cpu_id_key != (pthread_key_t) -1)
+ (void)pthread_key_delete(szone->cpu_id_key);
+ deallocate_pages(szone, (void *)&(szone->tiny_magazines[-1]), TINY_MAGAZINE_PAGED_SIZE, SCALABLE_MALLOC_ADD_GUARD_PAGES);
+ deallocate_pages(szone, (void *)&(szone->small_magazines[-1]), SMALL_MAGAZINE_PAGED_SIZE, SCALABLE_MALLOC_ADD_GUARD_PAGES);
+ deallocate_pages(szone, (void *)szone, SZONE_PAGED_SIZE, SCALABLE_MALLOC_ADD_GUARD_PAGES);
+}
+
+static NOINLINE size_t
+szone_good_size(szone_t *szone, size_t size)
+{
+ msize_t msize;
+ int guard_small = (szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL;
+
+ // Find a good size for this tiny allocation.
+ if (size <= (NUM_TINY_SLOTS - 1) * TINY_QUANTUM) {
+ msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
+ if (!msize)
+ msize = 1;
+ return TINY_BYTES_FOR_MSIZE(msize);
+ }
+
+ // Find a good size for this small allocation.
+ if (!guard_small && (size <= szone->large_threshold)) {
+ msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1);
+ if (!msize)
+ msize = 1;
+ return SMALL_BYTES_FOR_MSIZE(msize);
+ }
+
+ // Check for integer overflow on the size, since unlike the two cases above,
+ // there is no upper bound on allocation size at this point.
+ if (size > round_page(size))
+ return (size_t)(-1LL);
+
+#if DEBUG_MALLOC
+ // It is not acceptable to see a size of zero here, since that means we
+ // failed to catch a request for zero bytes in the tiny check, or the size
+ // overflowed to zero during some arithmetic.
+ if (size == 0)
+ malloc_printf("szone_good_size() invariant broken %y\n", size);
+#endif
+ return round_page(size);
+}
+
+unsigned szone_check_counter = 0;
+unsigned szone_check_start = 0;
+unsigned szone_check_modulo = 1;
+
+static NOINLINE boolean_t
+szone_check_all(szone_t *szone, const char *function)
+{
+ size_t index;
+
+ /* check tiny regions - chould check region count */
+ for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index) {
+ region_t tiny = szone->tiny_region_generation->hashed_regions[index];
+
+ if (HASHRING_REGION_DEALLOCATED == tiny)
+ continue;
+
+ if (tiny) {
+ magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines,
+ REGION_TRAILER_FOR_TINY_REGION(tiny), MAGAZINE_INDEX_FOR_TINY_REGION(tiny));
+
+ if (!tiny_check_region(szone, tiny)) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ szone->debug_flags &= ~ CHECK_REGIONS;
+ szone_error(szone, 1, "check: tiny region incorrect", NULL,
+ "*** tiny region %ld incorrect szone_check_all(%s) counter=%d\n",
+ index, function, szone_check_counter);
+ return 0;
+ }
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
+ }
+ }
+ /* check tiny free lists */
+ for (index = 0; index < NUM_TINY_SLOTS; ++index) {
+ if (!tiny_free_list_check(szone, index)) {
+ szone->debug_flags &= ~ CHECK_REGIONS;
+ szone_error(szone, 1, "check: tiny free list incorrect", NULL,
+ "*** tiny free list incorrect (slot=%ld) szone_check_all(%s) counter=%d\n",
+ index, function, szone_check_counter);
+ return 0;
+ }
+ }
+
+ /* check small regions - could check region count */
+ for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index) {
+ region_t small = szone->small_region_generation->hashed_regions[index];
+
+ if (HASHRING_REGION_DEALLOCATED == small)
+ continue;
+
+ if (small) {
+ magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines,
+ REGION_TRAILER_FOR_SMALL_REGION(small), MAGAZINE_INDEX_FOR_SMALL_REGION(small));
+
+ if (!small_check_region(szone, small)) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ szone->debug_flags &= ~ CHECK_REGIONS;
+ szone_error(szone, 1, "check: small region incorrect", NULL,
+ "*** small region %ld incorrect szone_check_all(%s) counter=%d\n",
+ index, function, szone_check_counter);
+ return 0;
+ }
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
+ }
+ }
+ /* check small free lists */
+ for (index = 0; index < szone->num_small_slots; ++index) {
+ if (!small_free_list_check(szone, index)) {
+ szone->debug_flags &= ~ CHECK_REGIONS;
+ szone_error(szone, 1, "check: small free list incorrect", NULL,
+ "*** small free list incorrect (slot=%ld) szone_check_all(%s) counter=%d\n",
+ index, function, szone_check_counter);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static boolean_t
+szone_check(szone_t *szone)
+{
+ if ((++szone_check_counter % 10000) == 0)
+ _malloc_printf(ASL_LEVEL_NOTICE, "at szone_check counter=%d\n", szone_check_counter);
+
+ if (szone_check_counter < szone_check_start)
+ return 1;
+
+ if (szone_check_counter % szone_check_modulo)
+ return 1;
+
+ return szone_check_all(szone, "");
+}
+
+static kern_return_t
+szone_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address,
+ memory_reader_t reader, vm_range_recorder_t recorder)
+{
+ szone_t *szone;
+ kern_return_t err;
+
+ if (!reader) reader = _szone_default_reader;
+
+ err = reader(task, zone_address, sizeof(szone_t), (void **)&szone);
+ if (err) return err;
+
+ err = tiny_in_use_enumerator(task, context, type_mask, szone, reader, recorder);
+ if (err) return err;
+
+ err = small_in_use_enumerator(task, context, type_mask, szone, reader, recorder);
+ if (err) return err;
+
+ err = large_in_use_enumerator(task, context, type_mask,
+ (vm_address_t)szone->large_entries, szone->num_large_entries, reader, recorder);
+ return err;
+}
+
+// Following method is deprecated: use scalable_zone_statistics instead
+void
+scalable_zone_info(malloc_zone_t *zone, unsigned *info_to_fill, unsigned count)
+{
+ szone_t *szone = (void *)zone;
+ unsigned info[13];
+
+ // We do not lock to facilitate debug
+
+ size_t s = 0;
+ unsigned t = 0;
+ size_t u = 0;
+ mag_index_t mag_index;
+
+ for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
+ s += szone->tiny_magazines[mag_index].mag_bytes_free_at_end;
+ t += szone->tiny_magazines[mag_index].mag_num_objects;
+ u += szone->tiny_magazines[mag_index].mag_num_bytes_in_objects;
+ }
+
+ info[4] = t;
+ info[5] = u;
+
+ for (t = 0, u = 0, mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
+ s += szone->small_magazines[mag_index].mag_bytes_free_at_end;
+ t += szone->small_magazines[mag_index].mag_num_objects;
+ u += szone->small_magazines[mag_index].mag_num_bytes_in_objects;
+ }
+
+ info[6] = t;
+ info[7] = u;
+
+ info[8] = szone->num_large_objects_in_use;
+ info[9] = szone->num_bytes_in_large_objects;
+
+ info[10] = 0; // DEPRECATED szone->num_huge_entries;
+ info[11] = 0; // DEPRECATED szone->num_bytes_in_huge_objects;
+
+ info[12] = szone->debug_flags;
+
+ info[0] = info[4] + info[6] + info[8] + info[10];
+ info[1] = info[5] + info[7] + info[9] + info[11];
+
+ info[3] = (szone->num_tiny_regions - szone->num_tiny_regions_dealloc) * TINY_REGION_SIZE +
+ (szone->num_small_regions - szone->num_small_regions_dealloc) * SMALL_REGION_SIZE + info[9] + info[11];
+
+ info[2] = info[3] - s;
+ memcpy(info_to_fill, info, sizeof(unsigned)*count);
+}
+
+// FIXME: consistent picture requires locking!
+static NOINLINE void
+szone_print(szone_t *szone, boolean_t verbose)
+{
+ unsigned info[13];
+ size_t index;
+ region_t region;
+
+ scalable_zone_info((void *)szone, info, 13);
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
+ "Scalable zone %p: inUse=%d(%y) touched=%y allocated=%y flags=%d\n",
+ szone, info[0], info[1], info[2], info[3], info[12]);
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
+ "\ttiny=%d(%y) small=%d(%y) large=%d(%y) huge=%d(%y)\n",
+ info[4], info[5], info[6], info[7], info[8], info[9], info[10], info[11]);
+ // tiny
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
+ "%d tiny regions:\n", szone->num_tiny_regions);
+ if (szone->num_tiny_regions_dealloc)
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
+ "[%d tiny regions have been vm_deallocate'd]\n", szone->num_tiny_regions_dealloc);
+ for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index) {
+ region = szone->tiny_region_generation->hashed_regions[index];
+ if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) {
+ mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(region);
+ print_tiny_region(verbose, region, (region == szone->tiny_magazines[mag_index].mag_last_region) ?
+ szone->tiny_magazines[mag_index].mag_bytes_free_at_end : 0);
+ }
+ }
+ if (verbose)
+ print_tiny_free_list(szone);
+ // small
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
+ "%d small regions:\n", szone->num_small_regions);
+ if (szone->num_small_regions_dealloc)
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
+ "[%d small regions have been vm_deallocate'd]\n", szone->num_small_regions_dealloc);
+ for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index) {
+ region = szone->small_region_generation->hashed_regions[index];
+ if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) {
+ mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(region);
+ print_small_region(szone, verbose, region,
+ (region == szone->small_magazines[mag_index].mag_last_region) ?
+ szone->small_magazines[mag_index].mag_bytes_free_at_end : 0);
+ }
+ }
+ if (verbose)
+ print_small_free_list(szone);
+}
+
+static void
+szone_log(malloc_zone_t *zone, void *log_address)
+{
+ szone_t *szone = (szone_t *)zone;
+
+ szone->log_address = log_address;
+}
+
+static void
+szone_force_lock(szone_t *szone)
+{
+ mag_index_t i;
+
+ for (i = 0; i < szone->num_tiny_magazines; ++i) {
+ SZONE_MAGAZINE_PTR_LOCK(szone, (&(szone->tiny_magazines[i])));
+ }
+ SZONE_MAGAZINE_PTR_LOCK(szone, (&(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX])));
+
+ for (i = 0; i < szone->num_small_magazines; ++i) {
+ SZONE_MAGAZINE_PTR_LOCK(szone, (&(szone->small_magazines[i])));
+ }
+ SZONE_MAGAZINE_PTR_LOCK(szone, (&(szone->small_magazines[DEPOT_MAGAZINE_INDEX])));
+
+ SZONE_LOCK(szone);
+}
+
+static void
+szone_force_unlock(szone_t *szone)
+{
+ mag_index_t i;
+
+ SZONE_UNLOCK(szone);
+
+ for (i = -1; i < szone->num_small_magazines; ++i) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->small_magazines[i])));
+ }
+
+ for (i = -1; i < szone->num_tiny_magazines; ++i) {
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->tiny_magazines[i])));
+ }
+}
+
+static boolean_t
+szone_locked(szone_t *szone)
+{
+ mag_index_t i;
+ int tookLock;
+
+ tookLock = SZONE_TRY_LOCK(szone);
+ if (tookLock == 0)
+ return 1;
+ SZONE_UNLOCK(szone);
+
+ for (i = -1; i < szone->num_small_magazines; ++i) {
+ tookLock = SZONE_MAGAZINE_PTR_TRY_LOCK(szone, (&(szone->small_magazines[i])));
+ if (tookLock == 0)
+ return 1;
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->small_magazines[i])));
+ }
+
+ for (i = -1; i < szone->num_tiny_magazines; ++i) {
+ tookLock = SZONE_MAGAZINE_PTR_TRY_LOCK(szone, (&(szone->tiny_magazines[i])));
+ if (tookLock == 0)
+ return 1;
+ SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->tiny_magazines[i])));
+ }
+ return 0;
+}
+
+boolean_t
+scalable_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats, unsigned subzone)
+{
+ szone_t *szone = (szone_t *)zone;
+
+ switch (subzone) {
+ case 0:
+ {
+ size_t s = 0;
+ unsigned t = 0;
+ size_t u = 0;
+ mag_index_t mag_index;
+
+ for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
+ s += szone->tiny_magazines[mag_index].mag_bytes_free_at_end;
+ t += szone->tiny_magazines[mag_index].mag_num_objects;
+ u += szone->tiny_magazines[mag_index].mag_num_bytes_in_objects;
+ }
+
+ stats->blocks_in_use = t;
+ stats->size_in_use = u;
+ stats->size_allocated = (szone->num_tiny_regions - szone->num_tiny_regions_dealloc) * TINY_REGION_SIZE;
+ stats->max_size_in_use = stats->size_allocated - s;
+ return 1;
+ }
+ case 1:
+ {
+ size_t s = 0;
+ unsigned t = 0;
+ size_t u = 0;
+ mag_index_t mag_index;
+
+ for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
+ s += szone->small_magazines[mag_index].mag_bytes_free_at_end;
+ t += szone->small_magazines[mag_index].mag_num_objects;
+ u += szone->small_magazines[mag_index].mag_num_bytes_in_objects;
+ }
+
+ stats->blocks_in_use = t;
+ stats->size_in_use = u;
+ stats->size_allocated = (szone->num_small_regions - szone->num_small_regions_dealloc) * SMALL_REGION_SIZE;
+ stats->max_size_in_use = stats->size_allocated - s;
+ return 1;
+ }
+ case 2:
+ stats->blocks_in_use = szone->num_large_objects_in_use;
+ stats->size_in_use = szone->num_bytes_in_large_objects;
+ stats->max_size_in_use = stats->size_allocated = stats->size_in_use;
+ return 1;
+ case 3:
+ stats->blocks_in_use = 0; // DEPRECATED szone->num_huge_entries;
+ stats->size_in_use = 0; // DEPRECATED szone->num_bytes_in_huge_objects;
+ stats->max_size_in_use = stats->size_allocated = 0;
+ return 1;
+ }
+ return 0;
+}
+
+static void
+szone_statistics(szone_t *szone, malloc_statistics_t *stats)
+{
+ size_t large;
+
+ size_t s = 0;
+ unsigned t = 0;
+ size_t u = 0;
+ mag_index_t mag_index;
+
+ for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
+ s += szone->tiny_magazines[mag_index].mag_bytes_free_at_end;
+ t += szone->tiny_magazines[mag_index].mag_num_objects;
+ u += szone->tiny_magazines[mag_index].mag_num_bytes_in_objects;
+ }
+
+ for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
+ s += szone->small_magazines[mag_index].mag_bytes_free_at_end;
+ t += szone->small_magazines[mag_index].mag_num_objects;
+ u += szone->small_magazines[mag_index].mag_num_bytes_in_objects;
+ }
+
+ large = szone->num_bytes_in_large_objects + 0; // DEPRECATED szone->num_bytes_in_huge_objects;
+
+ stats->blocks_in_use = t + szone->num_large_objects_in_use + 0; // DEPRECATED szone->num_huge_entries;
+ stats->size_in_use = u + large;
+ stats->max_size_in_use = stats->size_allocated =
+ (szone->num_tiny_regions - szone->num_tiny_regions_dealloc) * TINY_REGION_SIZE +
+ (szone->num_small_regions - szone->num_small_regions_dealloc) * SMALL_REGION_SIZE + large;
+ // Now we account for the untouched areas
+ stats->max_size_in_use -= s;
+}
+
+static void *
+legacy_zeroing_large_malloc(szone_t *szone, size_t size) {
+ if (size > LARGE_THRESHOLD) // Leopard and earlier returned a ZFOD range, so ...
+ return szone_calloc(szone, 1, size); // Clear to zero always, ham-handedly touching in each page
+ else
+ return szone_malloc(szone, size);
+}
+
+static void *
+legacy_zeroing_large_valloc(szone_t *szone, size_t size) {
+ void *p = szone_valloc(szone, size);
+
+ // Leopard and earlier returned a ZFOD range, so ...
+ memset(p, 0, size); // Clear to zero always, ham-handedly touching in each page
+ return p;
+}
+
+void zeroify_scalable_zone(malloc_zone_t *zone)
+{
+ szone_t *szone = (szone_t *)zone;
+
+ if (szone) {
+ szone->basic_zone.malloc = (void *)legacy_zeroing_large_malloc;
+ szone->basic_zone.valloc = (void *)legacy_zeroing_large_valloc;
+ }
+}
+
+static const struct malloc_introspection_t szone_introspect = {
+ (void *)szone_ptr_in_use_enumerator,
+ (void *)szone_good_size,
+ (void *)szone_check,
+ (void *)szone_print,
+ szone_log,
+ (void *)szone_force_lock,
+ (void *)szone_force_unlock,
+ (void *)szone_statistics,
+ (void *)szone_locked,
+}; // marked as const to spare the DATA section
+
+malloc_zone_t *
+create_scalable_zone(size_t initial_size, unsigned debug_flags)
+{
+ szone_t *szone;
+ uint64_t hw_memsize = 0;
+ size_t uint64_t_size = sizeof(hw_memsize);
+ int err;
+
+ /*
+ * Sanity-check our build-time assumptions about the size of a page.
+ * Since we have sized various things assuming the default page size,
+ * attempting to determine it dynamically is not useful.
+ */
+ if ((vm_page_size != _vm_page_size) || (vm_page_shift != _vm_page_shift)) {
+ malloc_printf("*** FATAL ERROR - machine page size does not match our assumptions.\n");
+ exit(-1);
+ }
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (_COMM_PAGE_VERSION_REQD > (*((short *) _COMM_PAGE_VERSION))) { // _COMM_PAGE_CPU_NUMBER must be present at runtime
+ malloc_printf("*** ERROR - comm page version mismatch.\n");
+ exit(-1);
+ }
+#endif
+
+ /* get memory for the zone, which is now separate from any region.
+ add guard pages to prevent walking from any other vm allocations
+ to here and overwriting the function pointers in basic_zone. */
+ szone = allocate_pages(NULL, SZONE_PAGED_SIZE, 0, SCALABLE_MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC);
+ if (!szone)
+ return NULL;
+
+ /* set up the szone structure */
+#if 0
+#warning CHECK_REGIONS enabled
+ debug_flags |= CHECK_REGIONS;
+#endif
+#if 0
+#warning LOG enabled
+ szone->log_address = ~0;
+#endif
+ szone->trg[0].nextgen = &(szone->trg[1]);
+ szone->trg[1].nextgen = &(szone->trg[0]);
+ szone->tiny_region_generation = &(szone->trg[0]);
+
+ szone->tiny_region_generation->hashed_regions = szone->initial_tiny_regions;
+ szone->tiny_region_generation->num_regions_allocated = INITIAL_NUM_REGIONS;
+ szone->tiny_region_generation->num_regions_allocated_shift = INITIAL_NUM_REGIONS_SHIFT;
+
+ szone->srg[0].nextgen = &(szone->srg[1]);
+ szone->srg[1].nextgen = &(szone->srg[0]);
+ szone->small_region_generation = &(szone->srg[0]);
+
+ szone->small_region_generation->hashed_regions = szone->initial_small_regions;
+ szone->small_region_generation->num_regions_allocated = INITIAL_NUM_REGIONS;
+ szone->small_region_generation->num_regions_allocated_shift = INITIAL_NUM_REGIONS_SHIFT;
+
+
+ /*
+ * Initialize variables that size the free list for SMALL allocations based
+ * upon the amount of memory in the system. Switch to a larger number of
+ * free list entries at 1GB.
+ */
+ if (0 == sysctlbyname("hw.memsize", &hw_memsize, &uint64_t_size, 0, 0) &&
+ hw_memsize >= (1ULL << 30)) {
+ szone->is_largemem = 1;
+ szone->num_small_slots = NUM_SMALL_SLOTS_LARGEMEM;
+ szone->large_threshold = LARGE_THRESHOLD_LARGEMEM;
+ szone->vm_copy_threshold = VM_COPY_THRESHOLD_LARGEMEM;
+ } else {
+ szone->is_largemem = 0;
+ szone->num_small_slots = NUM_SMALL_SLOTS;
+ szone->large_threshold = LARGE_THRESHOLD;
+ szone->vm_copy_threshold = VM_COPY_THRESHOLD;
+ }
+#if LARGE_CACHE
+ szone->large_entry_cache_hoard_lmit = hw_memsize >> 10; // madvise(..., MADV_REUSABLE) death-row arrivals above this threshold [~0.1%]
+
+ /* <rdar://problem/6610904> Reset protection when returning a previous large allocation? */
+ int32_t libSystemVersion = NSVersionOfLinkTimeLibrary("System");
+ if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 112) /* CFSystemVersionSnowLeopard */)
+ szone->large_legacy_reset_mprotect = TRUE;
+ else
+ szone->large_legacy_reset_mprotect = FALSE;
+#endif
+
+ // Initialize the security token.
+#if __LP64__
+ szone->cookie = ((uintptr_t)arc4random() << 32) | (uintptr_t)arc4random();
+#else
+ szone->cookie = arc4random();
+#endif
+
+ szone->basic_zone.version = 6;
+ szone->basic_zone.size = (void *)szone_size;
+ szone->basic_zone.malloc = (void *)szone_malloc;
+ szone->basic_zone.calloc = (void *)szone_calloc;
+ szone->basic_zone.valloc = (void *)szone_valloc;
+ szone->basic_zone.free = (void *)szone_free;
+ szone->basic_zone.realloc = (void *)szone_realloc;
+ szone->basic_zone.destroy = (void *)szone_destroy;
+ szone->basic_zone.batch_malloc = (void *)szone_batch_malloc;
+ szone->basic_zone.batch_free = (void *)szone_batch_free;
+ szone->basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect;
+ szone->basic_zone.memalign = (void *)szone_memalign;
+ szone->basic_zone.free_definite_size = (void *)szone_free_definite_size;
+ szone->debug_flags = debug_flags;
+ LOCK_INIT(szone->large_szone_lock);
+
+#if defined(__ppc__) || defined(__ppc64__)
+ /*
+ * In the interest of compatibility for PPC applications executing via Rosetta,
+ * arrange to zero-fill allocations as occurred by side effect in Leopard and earlier.
+ */
+ zeroify_scalable_zone((malloc_zone_t *)szone);
+#endif
+
+ if ((err = pthread_key_create(&(szone->cpu_id_key), NULL))) {
+ malloc_printf("*** ERROR -pthread_key_create failure err=%d.\n", err);
+ szone->cpu_id_key = (pthread_key_t) -1;
+ }
+
+ // Query the number of configured processors.
+ // Uniprocessor case gets just one tiny and one small magazine (whose index is zero). This gives
+ // the same behavior as the original scalable malloc. MP gets per-CPU magazines
+ // that scale (way) better.
+ int nproc = sysconf(_SC_NPROCESSORS_CONF);
+ szone->num_tiny_magazines = (nproc > 1) ? MIN(nproc, TINY_MAX_MAGAZINES) : 1;
+
+ // FIXME vm_allocate() based on number of configured CPUs
+ magazine_t *tiny_magazines = allocate_pages(NULL, TINY_MAGAZINE_PAGED_SIZE, 0,
+ SCALABLE_MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC);
+ if (NULL == tiny_magazines)
+ return NULL;
+
+ szone->tiny_magazines = &(tiny_magazines[1]); // szone->tiny_magazines[-1] is the Depot
+
+ // The magazines are indexed in [0 .. (num_tiny_magazines - 1)]
+ // Find the smallest power of 2 that exceeds (num_tiny_magazines - 1)
+ szone->num_tiny_magazines_mask_shift = 0;
+ int i = 1;
+ while( i <= (szone->num_tiny_magazines - 1) ) {
+ szone->num_tiny_magazines_mask_shift++;
+ i <<= 1;
+ }
+
+ // Now if i <= TINY_MAX_MAGAZINES we'll never access tiny_magazines[] out of bounds.
+ if (i > TINY_MAX_MAGAZINES) {
+ malloc_printf("*** FATAL ERROR - magazine mask exceeds allocated magazines.\n");
+ exit(-1);
+ }
+
+ // Reduce i by 1 to obtain a mask covering [0 .. (num_tiny_magazines - 1)]
+ szone->num_tiny_magazines_mask = i - 1; // A mask used for hashing to a magazine index (and a safety aid)
+
+ // Init the tiny_magazine locks
+ LOCK_INIT(szone->tiny_regions_lock);
+ LOCK_INIT(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX].magazine_lock);
+ for (i = 0; i < szone->num_tiny_magazines; ++i) {
+ LOCK_INIT(szone->tiny_magazines[i].magazine_lock);
+ }
+
+ szone->num_small_magazines = (nproc > 1) ? MIN(nproc, SMALL_MAX_MAGAZINES) : 1;
+
+ // FIXME vm_allocate() based on number of configured CPUs
+ magazine_t *small_magazines = allocate_pages(NULL, SMALL_MAGAZINE_PAGED_SIZE, 0,
+ SCALABLE_MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC);
+ if (NULL == small_magazines)
+ return NULL;
+
+ szone->small_magazines = &(small_magazines[1]); // szone->small_magazines[-1] is the Depot
+
+ // The magazines are indexed in [0 .. (num_small_magazines - 1)]
+ // Find the smallest power of 2 that exceeds (num_small_magazines - 1)
+ szone->num_small_magazines_mask_shift = 0;
+ while( i <= (szone->num_small_magazines - 1) ) {
+ szone->num_small_magazines_mask_shift++;
+ i <<= 1;
+ }
+
+ // Now if i <= SMALL_MAX_MAGAZINES we'll never access small_magazines[] out of bounds.
+ if (i > SMALL_MAX_MAGAZINES) {
+ malloc_printf("*** FATAL ERROR - magazine mask exceeds allocated magazines.\n");
+ exit(-1);
+ }
+
+ // Reduce i by 1 to obtain a mask covering [0 .. (num_small_magazines - 1)]
+ szone->num_small_magazines_mask = i - 1; // A mask used for hashing to a magazine index (and a safety aid)
+
+ // Init the small_magazine locks
+ LOCK_INIT(szone->small_regions_lock);
+ LOCK_INIT(szone->small_magazines[DEPOT_MAGAZINE_INDEX].magazine_lock);
+ for (i = 0; i < szone->num_small_magazines; ++i) {
+ LOCK_INIT(szone->small_magazines[i].magazine_lock);
+ }
+
+ CHECK(szone, __PRETTY_FUNCTION__);
+ return (malloc_zone_t *)szone;
+}
+
+//
+// purgeable zones have their own "large" allocation pool, but share "tiny" and "large"
+// heaps with a helper_zone identified in the call to create_purgeable_zone()
+//
+static size_t
+purgeable_size(szone_t *szone, const void *ptr)
+{
+ size_t t = szone_size_try_large(szone, ptr);
+
+ if (t)
+ return t;
+ else
+ return szone_size(szone->helper_zone, ptr);
+}
+
+static void *
+purgeable_malloc(szone_t *szone, size_t size) {
+ if (size <= szone->large_threshold)
+ return szone_malloc(szone->helper_zone, size);
+ else
+ return szone_malloc(szone, size);
+}
+
+static void *
+purgeable_calloc(szone_t *szone, size_t num_items, size_t size)
+{
+ size_t total_bytes = num_items * size;
+
+ // Check for overflow of integer multiplication
+ if (num_items > 1) {
+#if __LP64__ /* size_t is uint64_t */
+ if ((num_items | size) & 0xffffffff00000000ul) {
+ // num_items or size equals or exceeds sqrt(2^64) == 2^32, appeal to wider arithmetic
+ __uint128_t product = ((__uint128_t)num_items) * ((__uint128_t)size);
+ if ((uint64_t)(product >> 64)) // compiles to test on upper register of register pair
+ return NULL;
+ }
+#else /* size_t is uint32_t */
+ if ((num_items | size) & 0xffff0000ul) {
+ // num_items or size equals or exceeds sqrt(2^32) == 2^16, appeal to wider arithmetic
+ uint64_t product = ((uint64_t)num_items) * ((uint64_t)size);
+ if ((uint32_t)(product >> 32)) // compiles to test on upper register of register pair
+ return NULL;
+ }
+#endif
+ }
+
+ if (total_bytes <= szone->large_threshold)
+ return szone_calloc(szone->helper_zone, 1, total_bytes);
+ else
+ return szone_calloc(szone, 1, total_bytes);
+}
+
+static void *
+purgeable_valloc(szone_t *szone, size_t size)
+{
+ if (size <= szone->large_threshold)
+ return szone_valloc(szone->helper_zone, size);
+ else
+ return szone_valloc(szone, size);
+}
+
+static void
+purgeable_free(szone_t *szone, void *ptr)
+{
+ large_entry_t *entry;
+
+ SZONE_LOCK(szone);
+ entry = large_entry_for_pointer_no_lock(szone, ptr);
+ SZONE_UNLOCK(szone);
+ if (entry) {
+ return free_large(szone, ptr);
+ } else {
+ return szone_free(szone->helper_zone, ptr);
+ }
+}
+
+static void
+purgeable_free_definite_size(szone_t *szone, void *ptr, size_t size)
+{
+ if (size <= szone->large_threshold)
+ return szone_free_definite_size(szone->helper_zone, ptr, size);
+ else
+ return szone_free_definite_size(szone, ptr, size);
+}
+
+static void *
+purgeable_realloc(szone_t *szone, void *ptr, size_t new_size)
+{
+ if (new_size <= szone->large_threshold)
+ return szone_realloc(szone->helper_zone, ptr, new_size);
+ else
+ return szone_realloc(szone, ptr, new_size);
+}
+
+static void
+purgeable_destroy(szone_t *szone)
+{
+ /* destroy large entries */
+ size_t index = szone->num_large_entries;
+ large_entry_t *large;
+ vm_range_t range_to_deallocate;
+
+ while (index--) {
+ large = szone->large_entries + index;
+ if (large->address) {
+ // we deallocate_pages, including guard pages
+ deallocate_pages(szone, (void *)(large->address), large->size, szone->debug_flags);
+ }
+ }
+ large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate);
+ if (range_to_deallocate.size)
+ deallocate_pages(szone, (void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0);
+
+ /* Now destroy the separate szone region */
+ deallocate_pages(szone, (void *)szone, SZONE_PAGED_SIZE, SCALABLE_MALLOC_ADD_GUARD_PAGES);
+}
+
+static unsigned
+purgeable_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count)
+{
+ return szone_batch_malloc(szone->helper_zone, size, results, count);
+}
+
+static void
+purgeable_batch_free(szone_t *szone, void **to_be_freed, unsigned count)
+{
+ return szone_batch_free(szone->helper_zone, to_be_freed, count);
+}
+
+static void *
+purgeable_memalign(szone_t *szone, size_t alignment, size_t size)
+{
+ if (size <= szone->large_threshold)
+ return szone_memalign(szone->helper_zone, alignment, size);
+ else
+ return szone_memalign(szone, alignment, size);
+}
+
+static kern_return_t
+purgeable_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address,
+ memory_reader_t reader, vm_range_recorder_t recorder)
+{
+ szone_t *szone;
+ kern_return_t err;
+
+ if (!reader) reader = _szone_default_reader;
+
+ err = reader(task, zone_address, sizeof(szone_t), (void **)&szone);
+ if (err) return err;
+
+ err = large_in_use_enumerator(task, context, type_mask,
+ (vm_address_t)szone->large_entries, szone->num_large_entries, reader, recorder);
+ return err;
+}
+
+static size_t
+purgeable_good_size(szone_t *szone, size_t size)
+{
+ if (size <= szone->large_threshold)
+ return szone_good_size(szone->helper_zone, size);
+ else
+ return szone_good_size(szone, size);
+}
+
+static boolean_t
+purgeable_check(szone_t *szone)
+{
+ return 1;
+}
+
+static void
+purgeable_print(szone_t *szone, boolean_t verbose)
+{
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
+ "Scalable zone %p: inUse=%d(%y) flags=%d\n",
+ szone, szone->num_large_objects_in_use, szone->num_bytes_in_large_objects, szone->debug_flags);
+}
+
+static void
+purgeable_log(malloc_zone_t *zone, void *log_address)
+{
+ szone_t *szone = (szone_t *)zone;
+
+ szone->log_address = log_address;
+}
+
+static void
+purgeable_force_lock(szone_t *szone)
+{
+ SZONE_LOCK(szone);
+}
+
+static void
+purgeable_force_unlock(szone_t *szone)
+{
+ SZONE_UNLOCK(szone);
+}
+
+static void
+purgeable_statistics(szone_t *szone, malloc_statistics_t *stats)
+{
+ stats->blocks_in_use = szone->num_large_objects_in_use;
+ stats->size_in_use = stats->max_size_in_use = stats->size_allocated = szone->num_bytes_in_large_objects;
+}
+
+static boolean_t
+purgeable_locked(szone_t *szone)
+{
+ int tookLock;
+
+ tookLock = SZONE_TRY_LOCK(szone);
+ if (tookLock == 0)
+ return 1;
+ SZONE_UNLOCK(szone);
+ return 0;
+}
+
+static const struct malloc_introspection_t purgeable_introspect = {
+ (void *)purgeable_ptr_in_use_enumerator,
+ (void *)purgeable_good_size,
+ (void *)purgeable_check,
+ (void *)purgeable_print,
+ purgeable_log,
+ (void *)purgeable_force_lock,
+ (void *)purgeable_force_unlock,
+ (void *)purgeable_statistics,
+ (void *)purgeable_locked,
+}; // marked as const to spare the DATA section
+
+malloc_zone_t *
+create_purgeable_zone(size_t initial_size, malloc_zone_t *malloc_default_zone, unsigned debug_flags)
+{
+ szone_t *szone;
+
+ /* get memory for the zone, which is now separate from any region.
+ add guard pages to prevent walking from any other vm allocations
+ to here and overwriting the function pointers in basic_zone. */
+ szone = allocate_pages(NULL, SZONE_PAGED_SIZE, 0, SCALABLE_MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC);
+ if (!szone)
+ return NULL;
+
+ /* set up the szone structure */
+#if 0
+#warning LOG enabled
+ szone->log_address = ~0;
+#endif
+
+ /* Purgeable zone does not participate in the adaptive "largemem" sizing. */
+ szone->is_largemem = 0;
+ szone->large_threshold = LARGE_THRESHOLD;
+ szone->vm_copy_threshold = VM_COPY_THRESHOLD;
+
+#if LARGE_CACHE
+ /* <rdar://problem/6610904> Reset protection when returning a previous large allocation? */
+ int32_t libSystemVersion = NSVersionOfLinkTimeLibrary("System");
+ if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 112) /* CFSystemVersionSnowLeopard */)
+ szone->large_legacy_reset_mprotect = TRUE;
+ else
+ szone->large_legacy_reset_mprotect = FALSE;
+#endif
+
+ szone->basic_zone.version = 6;
+ szone->basic_zone.size = (void *)purgeable_size;
+ szone->basic_zone.malloc = (void *)purgeable_malloc;
+ szone->basic_zone.calloc = (void *)purgeable_calloc;
+ szone->basic_zone.valloc = (void *)purgeable_valloc;
+ szone->basic_zone.free = (void *)purgeable_free;
+ szone->basic_zone.realloc = (void *)purgeable_realloc;
+ szone->basic_zone.destroy = (void *)purgeable_destroy;
+ szone->basic_zone.batch_malloc = (void *)purgeable_batch_malloc;
+ szone->basic_zone.batch_free = (void *)purgeable_batch_free;
+ szone->basic_zone.introspect = (struct malloc_introspection_t *)&purgeable_introspect;
+ szone->basic_zone.memalign = (void *)purgeable_memalign;
+ szone->basic_zone.free_definite_size = (void *)purgeable_free_definite_size;
+
+ szone->debug_flags = debug_flags | SCALABLE_MALLOC_PURGEABLE;
+
+ /* Purgeable zone does not support SCALABLE_MALLOC_ADD_GUARD_PAGES. */
+ if (szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) {
+ _malloc_printf(ASL_LEVEL_INFO, "purgeable zone does not support guard pages\n");
+ szone->debug_flags &= ~SCALABLE_MALLOC_ADD_GUARD_PAGES;
+ }
+
+ LOCK_INIT(szone->large_szone_lock);
+
+ szone->helper_zone = (struct szone_s *)malloc_default_zone;
+
+ CHECK(szone, __PRETTY_FUNCTION__);
+ return (malloc_zone_t *)szone;
+}
+
+/*
+ * For use by CheckFix: create a new zone whose behavior is, apart from
+ * the use of death-row and per-CPU magazines, that of Leopard.
+ */
+static NOINLINE void *
+legacy_valloc(szone_t *szone, size_t size)
+{
+ void *ptr;
+ size_t num_pages;
+
+ num_pages = round_page(size) >> vm_page_shift;
+ ptr = large_malloc(szone, num_pages, 0, TRUE);
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr))
+ malloc_printf("legacy_valloc returned %p\n", ptr);
+#endif
+ return ptr;
+}
+
+malloc_zone_t *
+create_legacy_scalable_zone(size_t initial_size, unsigned debug_flags)
+{
+ malloc_zone_t *mzone = create_scalable_zone(initial_size, debug_flags);
+ szone_t *szone = (szone_t *)mzone;
+
+ if (!szone)
+ return NULL;
+
+ szone->is_largemem = 0;
+ szone->num_small_slots = NUM_SMALL_SLOTS;
+ szone->large_threshold = LARGE_THRESHOLD;
+ szone->vm_copy_threshold = VM_COPY_THRESHOLD;
+
+ szone->basic_zone.valloc = (void *)legacy_valloc;
+ szone->basic_zone.free_definite_size = NULL;
+
+ return mzone;
+}
+
+/********* Support code for emacs unexec ************/
+
+/* History of freezedry version numbers:
+ *
+ * 1) Old malloc (before the scalable malloc implementation in this file
+ * existed).
+ * 2) Original freezedrying code for scalable malloc. This code was apparently
+ * based on the old freezedrying code and was fundamentally flawed in its
+ * assumption that tracking allocated memory regions was adequate to fake
+ * operations on freezedried memory. This doesn't work, since scalable
+ * malloc does not store flags in front of large page-aligned allocations.
+ * 3) Original szone-based freezedrying code.
+ * 4) Fresher malloc with tiny zone
+ * 5) 32/64bit compatible malloc
+ * 6) Metadata within 1MB and 8MB region for tiny and small
+ *
+ * No version backward compatibility is provided, but the version number does
+ * make it possible for malloc_jumpstart() to return an error if the application
+ * was freezedried with an older version of malloc.
+ */
+#define MALLOC_FREEZEDRY_VERSION 6
+
+typedef struct {
+ unsigned version;
+ unsigned nszones;
+ szone_t *szones;
+} malloc_frozen;
+
+static void *
+frozen_malloc(szone_t *zone, size_t new_size)
+{
+ return malloc(new_size);
+}
+
+static void *
+frozen_calloc(szone_t *zone, size_t num_items, size_t size)
+{
+ return calloc(num_items, size);
+}
+
+static void *
+frozen_valloc(szone_t *zone, size_t new_size)
+{
+ return valloc(new_size);
+}
+
+static void *
+frozen_realloc(szone_t *zone, void *ptr, size_t new_size)
+{
+ size_t old_size = szone_size(zone, ptr);
+ void *new_ptr;
+
+ if (new_size <= old_size) {
+ return ptr;
+ }
+ new_ptr = malloc(new_size);
+ if (old_size > 0) {
+ memcpy(new_ptr, ptr, old_size);
+ }
+ return new_ptr;
+}
+
+static void
+frozen_free(szone_t *zone, void *ptr)
+{
+}
+
+static void
+frozen_destroy(szone_t *zone)
+{
+}
+
+/********* Pseudo-private API for emacs unexec ************/
+
+/*
+ * malloc_freezedry() records all of the szones in use, so that they can be
+ * partially reconstituted by malloc_jumpstart(). Due to the differences
+ * between reconstituted memory regions and those created by the szone code,
+ * care is taken not to reallocate from the freezedried memory, except in the
+ * case of a non-growing realloc().
+ *
+ * Due to the flexibility provided by the zone registration mechanism, it is
+ * impossible to implement generic freezedrying for any zone type. This code
+ * only handles applications that use the szone allocator, so malloc_freezedry()
+ * returns 0 (error) if any non-szone zones are encountered.
+ */
+
+uintptr_t
+malloc_freezedry(void)
+{
+ extern unsigned malloc_num_zones;
+ extern malloc_zone_t **malloc_zones;
+ malloc_frozen *data;
+ unsigned i;
+
+ /* Allocate space in which to store the freezedry state. */
+ data = (malloc_frozen *) malloc(sizeof(malloc_frozen));
+
+ /* Set freezedry version number so that malloc_jumpstart() can check for compatibility. */
+ data->version = MALLOC_FREEZEDRY_VERSION;
+
+ /* Allocate the array of szone pointers. */
+ data->nszones = malloc_num_zones;
+ data->szones = (szone_t *) calloc(malloc_num_zones, sizeof(szone_t));
+
+ /*
+ * Fill in the array of szone structures. They are copied rather than
+ * referenced, since the originals are likely to be clobbered during malloc
+ * initialization.
+ */
+ for (i = 0; i < malloc_num_zones; i++) {
+ if (strcmp(malloc_zones[i]->zone_name, "DefaultMallocZone")) {
+ /* Unknown zone type. */
+ free(data->szones);
+ free(data);
+ return 0;
+ }
+ memcpy(&data->szones[i], malloc_zones[i], sizeof(szone_t));
+ }
+
+ return((uintptr_t)data);
+}
+
+int
+malloc_jumpstart(uintptr_t cookie)
+{
+ malloc_frozen *data = (malloc_frozen *)cookie;
+ unsigned i;
+
+ if (data->version != MALLOC_FREEZEDRY_VERSION) {
+ /* Unsupported freezedry version. */
+ return 1;
+ }
+
+ for (i = 0; i < data->nszones; i++) {
+ /* Set function pointers. Even the functions that stay the same must be
+ * set, since there are no guarantees that they will be mapped to the
+ * same addresses. */
+ data->szones[i].basic_zone.size = (void *) szone_size;
+ data->szones[i].basic_zone.malloc = (void *) frozen_malloc;
+ data->szones[i].basic_zone.calloc = (void *) frozen_calloc;
+ data->szones[i].basic_zone.valloc = (void *) frozen_valloc;
+ data->szones[i].basic_zone.free = (void *) frozen_free;
+ data->szones[i].basic_zone.realloc = (void *) frozen_realloc;
+ data->szones[i].basic_zone.destroy = (void *) frozen_destroy;
+ data->szones[i].basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect;
+
+ /* Register the freezedried zone. */
+ malloc_zone_register(&data->szones[i].basic_zone);
+ }
+
+ return 0;
+}
--- /dev/null
+provider magmalloc {
+ probe refreshIndex(void *, int, int);
+ probe depotRegion(void *, int, int);
+ probe recircRegion(void *, int, int);
+ probe allocRegion(void *, int);
+ probe deallocRegion(void *, void *);
+ probe madvfreeRegion(void *, void *, void *, int);
+ probe mallocErrorBreak();
+};
+
+#pragma D attributes Evolving/Evolving/ISA provider magmalloc provider
+#pragma D attributes Private/Private/Unknown provider magmalloc module
+#pragma D attributes Private/Private/Unknown provider magmalloc function
+#pragma D attributes Evolving/Evolving/ISA provider magmalloc name
+#pragma D attributes Evolving/Evolving/ISA provider magmalloc args
+
+++ /dev/null
-./makecontext.3
\ No newline at end of file
--- /dev/null
+.\" Copyright (c) 2002 Packet Design, LLC.
+.\" All rights reserved.
+.\"
+.\" Subject to the following obligations and disclaimer of warranty,
+.\" use and redistribution of this software, in source or object code
+.\" forms, with or without modifications are expressly permitted by
+.\" Packet Design; provided, however, that:
+.\"
+.\" (i) Any and all reproductions of the source or object code
+.\" must include the copyright notice above and the following
+.\" disclaimer of warranties; and
+.\" (ii) No rights are granted, in any manner or form, to use
+.\" Packet Design trademarks, including the mark "PACKET DESIGN"
+.\" on advertising, endorsements, or otherwise except as such
+.\" appears in the above copyright notice or in the software.
+.\"
+.\" THIS SOFTWARE IS BEING PROVIDED BY PACKET DESIGN "AS IS", AND
+.\" TO THE MAXIMUM EXTENT PERMITTED BY LAW, PACKET DESIGN MAKES NO
+.\" REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING
+.\" THIS SOFTWARE, INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED
+.\" WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+.\" OR NON-INFRINGEMENT. PACKET DESIGN DOES NOT WARRANT, GUARANTEE,
+.\" OR MAKE ANY REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS
+.\" OF THE USE OF THIS SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY,
+.\" RELIABILITY OR OTHERWISE. IN NO EVENT SHALL PACKET DESIGN BE
+.\" LIABLE FOR ANY DAMAGES RESULTING FROM OR ARISING OUT OF ANY USE
+.\" OF THIS SOFTWARE, INCLUDING WITHOUT LIMITATION, ANY DIRECT,
+.\" INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE, OR CONSEQUENTIAL
+.\" DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, LOSS OF
+.\" USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY THEORY OF
+.\" LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+.\" THE USE OF THIS SOFTWARE, EVEN IF PACKET DESIGN IS ADVISED OF
+.\" THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" $FreeBSD: src/lib/libc/gen/makecontext.3,v 1.4 2002/12/19 09:40:21 ru Exp $
+.\"
+.Dd September 10, 2002
+.Dt MAKECONTEXT 3
+.Os
+.Sh NAME
+.Nm makecontext , swapcontext
+.Nd modify and exchange user thread contexts
+.Sh LIBRARY
+.Lb libc
+.Sh SYNOPSIS
+.In ucontext.h
+.Ft void
+.Fo makecontext
+.Fa "ucontext_t *ucp"
+.Fa "void \*[lp]*func\*[rp]\*[lp]\*[rp]"
+.Fa "int argc" ...
+.Fc
+.Ft int
+.Fn swapcontext "ucontext_t *oucp" "const ucontext_t *ucp"
+.Sh DESCRIPTION
+The
+.Fn makecontext
+function
+modifies the user thread context pointed to by
+.Fa ucp ,
+which must have previously been initialized by a call to
+.Xr getcontext 3
+and had a stack allocated for it.
+The context is modified so that it will continue execution by invoking
+.Fn func
+with the arguments (of type
+.Ft int )
+provided.
+The
+.Fa argc
+argument
+must be equal to the number of additional arguments provided to
+.Fn makecontext
+and also equal to the number of arguments to
+.Fn func ,
+or else the behavior is undefined.
+.Pp
+The
+.Fa "ucp->uc_link"
+argument
+must be initialized before calling
+.Fn makecontext
+and determines the action to take when
+.Fn func
+returns:
+if equal to
+.Dv NULL ,
+the process exits;
+otherwise,
+.Fn setcontext "ucp->uc_link"
+is implicitly invoked.
+.Pp
+The
+.Fn swapcontext
+function
+saves the current thread context in
+.Fa "*oucp"
+and makes
+.Fa "*ucp"
+the currently active context.
+.Sh RETURN VALUES
+If successful,
+.Fn swapcontext
+returns zero;
+otherwise \-1 is returned and the global variable
+.Va errno
+is set appropriately.
+.Sh ERRORS
+The
+.Fn swapcontext
+function
+will fail if:
+.Bl -tag -width Er
+.It Bq Er ENOMEM
+There is not enough stack space in
+.Fa ucp
+to complete the operation.
+.El
+.Sh SEE ALSO
+.Xr setcontext 3 ,
+.Xr ucontext 3
.\"
.\" @APPLE_LICENSE_HEADER_END@
.\"
-.Dd May 23, 2006
+.Dd Aug 13, 2008
.Dt MALLOC 3
.Os
.Sh NAME
The
.Fn free
function deallocates the memory allocation pointed to by
-.Fa ptr .
+.Fa ptr . If
+.Fa ptr
+is a NULL pointer, no operation is performed.
.Sh RETURN VALUES
If successful,
.Fn calloc ,
If set, record all stacks in a manner that is compatible with the
.Nm malloc_history
program.
-.It Ev MallocPreScribble
-If set, fill memory that has been allocated with 0xaa bytes.
-This increases the likelihood that a program making assumptions about the
-contents of freshly allocated memory will fail.
+.It Ev MallocStackLoggingDirectory
+If set, records stack logs to the directory specified instead of saving them to the default location (/tmp).
.It Ev MallocScribble
-If set, fill memory that has been deallocated with 0x55 bytes.
+If set, fill memory that has been allocated with 0xaa bytes.
+This increases the likelihood that a program making assumptions about the contents of
+freshly allocated memory will fail.
+Also if set, fill memory that has been deallocated with 0x55 bytes.
This increases the likelihood that a program will fail due to accessing memory
that is no longer allocated.
.It Ev MallocCheckHeapStart <s>
, such as a calling
.Xr free 3
on a pointer previously freed.
+.It Ev MallocCorruptionAbort
+Similar to
+.Ev
+MallocErrorAbort
+but will not abort in out of memory conditions, making it more useful to catch
+only those errors which will cause memory corruption.
+MallocCorruptionAbort is always set on 64-bit processes.
.It Ev MallocHelp
If set, print a list of environment variables that are paid heed to by the
allocation-related functions, along with short descriptions.
.Xr leaks 1 ,
.Xr malloc_history 1 ,
.Xr abort 3 ,
-.Xr malloc_size 3
-.Pa /Developer/ADC Reference Library/releasenotes/DeveloperTools/MallocOptions.html
+.Xr malloc_size 3 ,
+.Xr malloc_zone_malloc 3 ,
+.Xr posix_memalign 3 ,
+.Xr libgmalloc 3
/*
- * Copyright (c) 1999, 2006, 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 1999, 2006-2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
*/
#include <pthread_internals.h>
+#include "magmallocProvider.h"
#import <stdlib.h>
#import <stdio.h>
#import <string.h>
#import <unistd.h>
-#import <objc/zone.h>
#import <malloc/malloc.h>
#import <fcntl.h>
#import <crt_externs.h>
#import <pthread_internals.h>
#import <limits.h>
#import <dlfcn.h>
+#import <mach/mach_vm.h>
+#import <mach/mach_init.h>
+#import <sys/mman.h>
#import "scalable_malloc.h"
#import "stack_logging.h"
#define USE_SLEEP_RATHER_THAN_ABORT 0
-#define INITIAL_ZONES 8 // After this number, we reallocate for new zones
-
typedef void (malloc_logger_t)(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t result, uint32_t num_hot_frames_to_skip);
__private_extern__ pthread_lock_t _malloc_lock = 0; // initialized in __libc_init
-static malloc_zone_t *initial_malloc_zones[INITIAL_ZONES] = {0};
-/* The following variables are exported for the benefit of performance tools */
+/* The following variables are exported for the benefit of performance tools
+ *
+ * It should always be safe to first read malloc_num_zones, then read
+ * malloc_zones without taking the lock, if only iteration is required
+ */
unsigned malloc_num_zones = 0;
-malloc_zone_t **malloc_zones = initial_malloc_zones;
+unsigned malloc_num_zones_allocated = 0;
+malloc_zone_t **malloc_zones = 0;
malloc_logger_t *malloc_logger = NULL;
unsigned malloc_debug_flags = 0;
static inline malloc_zone_t * find_registered_zone(const void *, size_t *) __attribute__((always_inline));
static inline malloc_zone_t *
find_registered_zone(const void *ptr, size_t *returned_size) {
- // Returns a zone which may contain ptr, or NULL.
- // Speed is critical for this function, so it is not guaranteed to return
- // the zone which contains ptr. For N zones, zones 1 through N - 1 are
- // checked to see if they contain ptr. If so, the zone containing ptr is
- // returned. Otherwise the last zone is returned, since it is the last zone
- // in which ptr may reside. Clients should call zone->size(ptr) on the
- // return value to determine whether or not ptr is an allocated object.
- // This behavior optimizes for the case where ptr is an allocated object,
- // and there is only one zone.
- unsigned index, limit = malloc_num_zones;
- if (limit == 0)
- return NULL;
-
+ // Returns a zone which contains ptr, else NULL
+ unsigned index;
malloc_zone_t **zones = malloc_zones;
- for (index = 0; index < limit - 1; ++index, ++zones) {
+
+ for (index = 0; index < malloc_num_zones; ++index, ++zones) {
malloc_zone_t *zone = *zones;
size_t size = zone->size(zone, ptr);
- if (size) {
+ if (size) { // Claimed by this zone?
if (returned_size) *returned_size = size;
return zone;
}
}
- return malloc_zones[index];
+ // Unclaimed by any zone.
+ if (returned_size) *returned_size = 0;
+ return NULL;
}
__private_extern__ __attribute__((noinline)) void
// that will be called after an error message appears. It does not make
// sense for developers to call this function, so it is marked
// __private_extern__ to prevent it from becoming API.
+ MAGMALLOC_MALLOCERRORBREAK(); // DTrace USDT probe
+}
+
+__private_extern__ boolean_t __stack_logging_locked();
+
+__private_extern__ __attribute__((noinline)) int
+malloc_gdb_po_unsafe(void) {
+ // In order to implement "po" other data formatters in gdb, the debugger
+ // calls functions that call malloc. The debugger will only run one thread
+ // of the program in this case, so if another thread is holding a zone lock,
+ // gdb may deadlock in this case.
+ //
+ // Iterate over the zones in malloc_zones, and call "trylock" on the zone
+ // lock. If trylock succeeds, unlock it, otherwise return "locked". Returns
+ // 0 == safe, 1 == locked/unsafe.
+
+ if (__stack_logging_locked())
+ return 1;
+
+ malloc_zone_t **zones = malloc_zones;
+ unsigned i, e = malloc_num_zones;
+
+ for (i = 0; i != e; ++i) {
+ malloc_zone_t *zone = zones[i];
+
+ // Version must be >= 5 to look at the new introspection field.
+ if (zone->version < 5)
+ continue;
+
+ if (zone->introspect->zone_locked && zone->introspect->zone_locked(zone))
+ return 1;
+ }
+ return 0;
}
/********* Creation and destruction ************/
static void set_flags_from_environment(void);
-// malloc_zone_register_while_locked may drop the lock temporarily
static void
malloc_zone_register_while_locked(malloc_zone_t *zone) {
- /* Note that given the sequencing it is always safe to first get the number of zones, then get malloc_zones without taking the lock, if all you need is to iterate through the list */
- if (malloc_num_zones >= INITIAL_ZONES) {
- malloc_zone_t **zones = malloc_zones;
- malloc_zone_t *pzone = malloc_zones[0];
- boolean_t copy = malloc_num_zones == INITIAL_ZONES;
- if (copy) zones = NULL; // to avoid realloc on something not allocated
- MALLOC_UNLOCK();
- zones = pzone->realloc(pzone, zones, (malloc_num_zones + 1) * sizeof(malloc_zone_t *)); // we leak initial_malloc_zones, not worth tracking it
- MALLOC_LOCK();
- if (copy) memcpy(zones, malloc_zones, malloc_num_zones * sizeof(malloc_zone_t *));
- malloc_zones = zones;
+ size_t protect_size;
+ unsigned i;
+
+ /* scan the list of zones, to see if this zone is already registered. If
+ * so, print an error message and return. */
+ for (i = 0; i != malloc_num_zones; ++i)
+ if (zone == malloc_zones[i]) {
+ _malloc_printf(ASL_LEVEL_ERR, "Attempted to register zone more than once: %p\n", zone);
+ return;
+ }
+
+ if (malloc_num_zones == malloc_num_zones_allocated) {
+ size_t malloc_zones_size = malloc_num_zones * sizeof(malloc_zone_t *);
+ size_t alloc_size = malloc_zones_size + vm_page_size;
+
+ malloc_zone_t **new_zones = mmap(0, alloc_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, VM_MAKE_TAG(VM_MEMORY_MALLOC), 0);
+
+ /* If there were previously allocated malloc zones, we need to copy them
+ * out of the previous array and into the new zones array */
+ if (malloc_zones)
+ memcpy(new_zones, malloc_zones, malloc_zones_size);
+
+ /* Update the malloc_zones pointer, which we leak if it was previously
+ * allocated, and the number of zones allocated */
+ protect_size = alloc_size;
+ malloc_zones = new_zones;
+ malloc_num_zones_allocated = alloc_size / sizeof(malloc_zone_t *);
+ } else {
+ /* If we don't need to reallocate zones, we need to briefly change the
+ * page protection the malloc zones to allow writes */
+ protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
+ vm_protect(mach_task_self(), (uintptr_t)malloc_zones, protect_size, 0, VM_PROT_READ | VM_PROT_WRITE);
}
- malloc_zones[malloc_num_zones] = zone;
- malloc_num_zones++; // note that we do this after setting malloc_num_zones, so enumerations without taking the lock are safe
- // _malloc_printf(ASL_LEVEL_INFO, "Registered %p malloc_zones at address %p is %p [%d zones]\n", zone, &malloc_zones, malloc_zones, malloc_num_zones);
+ malloc_zones[malloc_num_zones++] = zone;
+
+ /* Finally, now that the zone is registered, disallow write access to the
+ * malloc_zones array */
+ vm_protect(mach_task_self(), (uintptr_t)malloc_zones, protect_size, 0, VM_PROT_READ);
+ //_malloc_printf(ASL_LEVEL_INFO, "Registered malloc_zone %p in malloc_zones %p [%u zones, %u bytes]\n", zone, malloc_zones, malloc_num_zones, protect_size);
}
static void
set_flags_from_environment(); // will only set flags up to two times
n = malloc_num_zones;
zone = create_scalable_zone(0, malloc_debug_flags);
- //malloc_zone_register_while_locked may drop the lock temporarily
malloc_zone_register_while_locked(zone);
malloc_set_zone_name(zone, "DefaultMallocZone");
if (n != 0) { // make the default first, for efficiency
- malloc_zone_t *hold = malloc_zones[0];
- if(hold->zone_name && strcmp(hold->zone_name, "DefaultMallocZone") == 0) {
- free((void *)hold->zone_name);
- hold->zone_name = NULL;
- }
- malloc_zones[0] = malloc_zones[n];
- malloc_zones[n] = hold;
+ unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
+ malloc_zone_t *hold = malloc_zones[0];
+ if(hold->zone_name && strcmp(hold->zone_name, "DefaultMallocZone") == 0) {
+ free((void *)hold->zone_name);
+ hold->zone_name = NULL;
+ }
+ vm_protect(mach_task_self(), (uintptr_t)malloc_zones, protect_size, 0, VM_PROT_READ | VM_PROT_WRITE);
+ malloc_zones[0] = malloc_zones[n];
+ malloc_zones[n] = hold;
+ vm_protect(mach_task_self(), (uintptr_t)malloc_zones, protect_size, 0, VM_PROT_READ);
}
// _malloc_printf(ASL_LEVEL_INFO, "%d registered zones\n", malloc_num_zones);
// _malloc_printf(ASL_LEVEL_INFO, "malloc_zones is at %p; malloc_num_zones is at %p\n", (unsigned)&malloc_zones, (unsigned)&malloc_num_zones);
return inline_malloc_default_zone();
}
+malloc_zone_t *
+malloc_default_purgeable_zone(void) {
+ static malloc_zone_t *dpz;
+
+ if (!dpz) {
+ malloc_zone_t *tmp = create_purgeable_zone(0, malloc_default_zone(), malloc_debug_flags);
+ malloc_zone_register(tmp);
+ malloc_set_zone_name(tmp, "DefaultPurgeableMallocZone");
+ if (!__sync_bool_compare_and_swap(&dpz, NULL, tmp))
+ malloc_destroy_zone(tmp);
+ }
+ return dpz;
+}
+
// For debugging, allow stack logging to both memory and disk to compare their results.
static void
stack_logging_log_stack_debug(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t size, uintptr_t ptr_arg, uintptr_t return_val, uint32_t num_hot_to_skip)
close(malloc_debug_file);
malloc_debug_file = STDERR_FILENO;
}
+#if __LP64__
+ malloc_debug_flags = SCALABLE_MALLOC_ABORT_ON_CORRUPTION; // Set always on 64-bit processes
+#else
malloc_debug_flags = 0;
+#endif
stack_logging_enable_logging = 0;
stack_logging_dontcompact = 0;
malloc_logger = NULL;
malloc_logger = (void *)val;
_malloc_printf(ASL_LEVEL_INFO, "recording stacks using recorder %p\n", malloc_logger);
} else if (strcmp(flag,"memory") == 0) {
- malloc_logger = stack_logging_log_stack;
+ malloc_logger = (malloc_logger_t *)stack_logging_log_stack;
_malloc_printf(ASL_LEVEL_INFO, "recording malloc stacks in memory using standard recorder\n");
} else if (strcmp(flag,"both") == 0) {
malloc_logger = stack_logging_log_stack_debug;
}
}
if (getenv("MallocScribble")) {
- malloc_debug_flags |= SCALABLE_MALLOC_DO_SCRIBBLE;
- _malloc_printf(ASL_LEVEL_INFO, "enabling scribbling to detect mods to free blocks\n");
+ malloc_debug_flags |= SCALABLE_MALLOC_DO_SCRIBBLE;
+ _malloc_printf(ASL_LEVEL_INFO, "enabling scribbling to detect mods to free blocks\n");
}
if (getenv("MallocErrorAbort")) {
- malloc_debug_flags |= SCALABLE_MALLOC_ABORT_ON_ERROR;
- _malloc_printf(ASL_LEVEL_INFO, "enabling abort() on bad malloc or free\n");
+ malloc_debug_flags |= SCALABLE_MALLOC_ABORT_ON_ERROR;
+ _malloc_printf(ASL_LEVEL_INFO, "enabling abort() on bad malloc or free\n");
+ }
+#if __LP64__
+ /* initialization above forces SCALABLE_MALLOC_ABORT_ON_CORRUPTION of 64-bit processes */
+#else
+ if (getenv("MallocCorruptionAbort")) { // Set from an environment variable in 32-bit processes
+ malloc_debug_flags |= SCALABLE_MALLOC_ABORT_ON_CORRUPTION;
}
+#endif
flag = getenv("MallocCheckHeapStart");
if (flag) {
malloc_check_start = strtoul(flag, NULL, 0);
"- MallocDoNotProtectPostlude to disable protection (when previous flag set)\n"
"- MallocStackLogging to record all stacks. Tools like leaks can then be applied\n"
"- MallocStackLoggingNoCompact to record all stacks. Needed for malloc_history\n"
+ "- MallocStackLoggingDirectory to set location of stack logs, which can grow large; default is /tmp\n"
"- MallocScribble to detect writing on free blocks and missing initializers:\n"
" 0x55 is written upon free and 0xaa is written on allocation\n"
"- MallocCheckHeapStart <n> to start checking the heap after <n> operations\n"
"- MallocCheckHeapEach <s> to repeat the checking of the heap after <s> operations\n"
"- MallocCheckHeapSleep <t> to sleep <t> seconds on heap corruption\n"
"- MallocCheckHeapAbort <b> to abort on heap corruption if <b> is non-zero\n"
- "- MallocErrorAbort to abort on a bad malloc or free\n"
+ "- MallocCorruptionAbort to abort on malloc errors, but not on out of memory for 32-bit processes\n"
+ " MallocCorruptionAbort is always set on 64-bit processes\n"
+ "- MallocErrorAbort to abort on any malloc error, including out of memory\n"
"- MallocHelp - this help!\n");
}
}
return NULL;
}
if (malloc_def_zone_state < 2) _malloc_initialize();
- zone = create_scalable_zone(start_size, malloc_debug_flags);
+ zone = create_scalable_zone(start_size, flags | malloc_debug_flags);
malloc_zone_register(zone);
return zone;
}
+/*
+ * For use by CheckFix: establish a new default zone whose behavior is, apart from
+ * the use of death-row and per-CPU magazines, that of Leopard.
+ */
+void
+malloc_create_legacy_default_zone(void)
+{
+ malloc_zone_t *zone;
+ int i;
+
+ if (malloc_def_zone_state < 2) _malloc_initialize();
+ zone = create_legacy_scalable_zone(0, malloc_debug_flags);
+
+ MALLOC_LOCK();
+ malloc_zone_register_while_locked(zone);
+
+ //
+ // Establish the legacy scalable zone just created as the default zone.
+ //
+ malloc_zone_t *hold = malloc_zones[0];
+ if(hold->zone_name && strcmp(hold->zone_name, "DefaultMallocZone") == 0) {
+ free((void *)hold->zone_name);
+ hold->zone_name = NULL;
+ }
+ malloc_set_zone_name(zone, "DefaultMallocZone");
+
+ unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
+ vm_protect(mach_task_self(), (uintptr_t)malloc_zones, protect_size, 0, VM_PROT_READ | VM_PROT_WRITE);
+
+ // assert(zone == malloc_zones[malloc_num_zones - 1];
+ for (i = malloc_num_zones - 1; i > 0; --i) {
+ malloc_zones[i] = malloc_zones[i - 1];
+ }
+ malloc_zones[0] = zone;
+
+ vm_protect(mach_task_self(), (uintptr_t)malloc_zones, protect_size, 0, VM_PROT_READ);
+ MALLOC_UNLOCK();
+}
+
void
malloc_destroy_zone(malloc_zone_t *zone) {
malloc_zone_unregister(zone);
/********* Block creation and manipulation ************/
+extern const char *__crashreporter_info__;
+
static void
internal_check(void) {
static vm_address_t *frames = NULL;
static unsigned num_frames;
if (malloc_zone_check(NULL)) {
- _malloc_printf(ASL_LEVEL_NOTICE, "MallocCheckHeap: PASSED check at %dth operation\n", malloc_check_counter-1);
- if (!frames) vm_allocate(mach_task_self(), (void *)&frames, vm_page_size, 1);
- thread_stack_pcs(frames, vm_page_size/sizeof(vm_address_t) - 1, &num_frames);
+ if (!frames) vm_allocate(mach_task_self(), (void *)&frames, vm_page_size, 1);
+ thread_stack_pcs(frames, vm_page_size/sizeof(vm_address_t) - 1, &num_frames);
} else {
+ _SIMPLE_STRING b = _simple_salloc();
+ if (b)
+ _simple_sprintf(b, "*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1);
+ else
+ _malloc_printf(MALLOC_PRINTF_NOLOG, "*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1);
malloc_printf("*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1);
if (frames) {
unsigned index = 1;
- _SIMPLE_STRING b = _simple_salloc();
if (b) {
_simple_sappend(b, "Stack for last operation where the malloc check succeeded: ");
while (index < num_frames) _simple_sprintf(b, "%p ", frames[index++]);
malloc_printf("%s\n(Use 'atos' for a symbolic stack)\n", _simple_string(b));
- _simple_sfree(b);
} else {
/*
* Should only get here if vm_allocate() can't get a single page of
unsigned recomm_start = (malloc_check_counter > malloc_check_each+1) ? malloc_check_counter-1-malloc_check_each : 1;
malloc_printf("*** Recommend using 'setenv MallocCheckHeapStart %d; setenv MallocCheckHeapEach %d' to narrow down failure\n", recomm_start, recomm_each);
}
- if (malloc_check_abort)
+ if (malloc_check_abort) {
+ __crashreporter_info__ = b ? _simple_string(b) : "*** MallocCheckHeap: FAILED check";
abort();
+ } else if (b)
+ _simple_sfree(b);
if (malloc_check_sleep > 0) {
_malloc_printf(ASL_LEVEL_NOTICE, "*** Sleeping for %d seconds to leave time to attach\n",
malloc_check_sleep);
zone->free(zone, ptr);
}
+static void
+malloc_zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
+ if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, 0, 0, 0);
+ if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
+ internal_check();
+ }
+ zone->free_definite_size(zone, ptr, size);
+}
+
malloc_zone_t *
malloc_zone_from_ptr(const void *ptr) {
- malloc_zone_t *zone;
if (!ptr)
return NULL;
- zone = find_registered_zone(ptr, NULL);
- if (zone && zone->size(zone, ptr))
- return zone;
- return NULL;
+ else
+ return find_registered_zone(ptr, NULL);
+}
+
+void *
+malloc_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
+ void *ptr;
+ if (zone->version < 5) // Version must be >= 5 to look at the new memalign field.
+ return NULL;
+ if (!(zone->memalign))
+ return NULL;
+ if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
+ internal_check();
+ }
+ if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
+ return NULL;
+ }
+ if (alignment < sizeof( void *) || // excludes 0 == alignment
+ 0 != (alignment & (alignment - 1))) { // relies on sizeof(void *) being a power of two.
+ return NULL;
+ }
+ ptr = zone->memalign(zone, alignment, size);
+ if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
+ return ptr;
}
/********* Functions for zone implementors ************/
void
malloc_zone_unregister(malloc_zone_t *z) {
unsigned index;
+
+ if (malloc_num_zones == 0)
+ return;
+
MALLOC_LOCK();
- index = malloc_num_zones;
- while (index--) {
- malloc_zone_t *zone = malloc_zones[index];
- if (zone == z) {
- malloc_zones[index] = malloc_zones[--malloc_num_zones];
- MALLOC_UNLOCK();
- return;
- }
+ for (index = 0; index < malloc_num_zones; ++index) {
+ if (z != malloc_zones[index])
+ continue;
+
+ // Modify the page to be allow write access, so that we can update the
+ // malloc_zones array.
+ size_t protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
+ vm_protect(mach_task_self(), (uintptr_t)malloc_zones, protect_size, 0, VM_PROT_READ | VM_PROT_WRITE);
+
+ // If we found a match, swap it with the entry on the back of the list
+ // and null out the back of the list.
+ malloc_zones[index] = malloc_zones[malloc_num_zones - 1];
+ malloc_zones[malloc_num_zones - 1] = NULL;
+ --malloc_num_zones;
+
+ vm_protect(mach_task_self(), (uintptr_t)malloc_zones, protect_size, 0, VM_PROT_READ);
+ MALLOC_UNLOCK();
+ return;
}
MALLOC_UNLOCK();
malloc_printf("*** malloc_zone_unregister() failed for %p\n", z);
void
free(void *ptr) {
malloc_zone_t *zone;
- if (!ptr) return;
- zone = find_registered_zone(ptr, NULL);
- if (zone)
+ size_t size;
+ if (!ptr)
+ return;
+ zone = find_registered_zone(ptr, &size);
+ if (!zone) {
+ malloc_printf("*** error for object %p: pointer being freed was not allocated\n"
+ "*** set a breakpoint in malloc_error_break to debug\n", ptr);
+ malloc_error_break();
+ if ((malloc_debug_flags & (SCALABLE_MALLOC_ABORT_ON_CORRUPTION|SCALABLE_MALLOC_ABORT_ON_ERROR)))
+ abort();
+ } else if (zone->version >= 6 && zone->free_definite_size)
+ malloc_zone_free_definite_size(zone, ptr, size);
+ else
malloc_zone_free(zone, ptr);
}
retval = malloc_zone_malloc(inline_malloc_default_zone(), new_size);
} else {
zone = find_registered_zone(old_ptr, &old_size);
- if (zone && (old_size == 0))
- old_size = zone->size(zone, old_ptr);
- if (zone && (old_size >= new_size))
+ if (zone && old_size >= new_size)
return old_ptr;
- /*
- * if old_size is still 0 here, it means that either zone was NULL or
- * the call to zone->size() returned 0, indicating the pointer is not
- * not in that zone. In this case, just use the default zone.
- */
- if (old_size == 0)
+
+ if (!zone)
zone = inline_malloc_default_zone();
+
retval = malloc_zone_realloc(zone, old_ptr, new_size);
}
if (retval == NULL) {
size_t
malloc_size(const void *ptr) {
size_t size = 0;
- if (!ptr) return size;
- malloc_zone_t *zone = find_registered_zone(ptr, &size);
- /*
- * If we found a zone, and size is 0 then we need to check to see if that
- * zone contains ptr. If size is nonzero, then we know zone contains ptr.
- */
- if (zone && (size == 0))
- size = zone->size(zone, ptr);
+
+ if (!ptr)
+ return size;
+
+ (void)find_registered_zone(ptr, &size);
return size;
}
return zone->introspect->good_size(zone, size);
}
+/*
+ * The posix_memalign() function shall allocate size bytes aligned on a boundary specified by alignment,
+ * and shall return a pointer to the allocated memory in memptr.
+ * The value of alignment shall be a multiple of sizeof( void *), that is also a power of two.
+ * Upon successful completion, the value pointed to by memptr shall be a multiple of alignment.
+ *
+ * Upon successful completion, posix_memalign() shall return zero; otherwise,
+ * an error number shall be returned to indicate the error.
+ *
+ * The posix_memalign() function shall fail if:
+ * EINVAL
+ * The value of the alignment parameter is not a power of two multiple of sizeof( void *).
+ * ENOMEM
+ * There is insufficient memory available with the requested alignment.
+ */
+
+int
+posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+ void *retval;
+
+ /* POSIX is silent on NULL == memptr !?! */
+
+ retval = malloc_zone_memalign(inline_malloc_default_zone(), alignment, size);
+ if (retval == NULL) {
+ // To avoid testing the alignment constraints redundantly, we'll rely on the
+ // test made in malloc_zone_memalign to vet each request. Only if that test fails
+ // and returns NULL, do we arrive here to detect the bogus alignment and give the
+ // required EINVAL return.
+ if (alignment < sizeof( void *) || // excludes 0 == alignment
+ 0 != (alignment & (alignment - 1))) { // relies on sizeof(void *) being a power of two.
+ return EINVAL;
+ }
+ return ENOMEM;
+ } else {
+ *memptr = retval; // Set iff allocation succeeded
+ return 0;
+ }
+}
+
+static malloc_zone_t *
+find_registered_purgeable_zone(void *ptr) {
+ if (!ptr)
+ return NULL;
+
+ /*
+ * Look for a zone which contains ptr. If that zone does not have the purgeable malloc flag
+ * set, or the allocation is too small, do nothing. Otherwise, set the allocation volatile.
+ * FIXME: for performance reasons, we should probably keep a separate list of purgeable zones
+ * and only search those.
+ */
+ size_t size = 0;
+ malloc_zone_t *zone = find_registered_zone(ptr, &size);
+
+ /* FIXME: would really like a zone->introspect->flags->purgeable check, but haven't determined
+ * binary compatibility impact of changing the introspect struct yet. */
+ if (!zone)
+ return NULL;
+
+ /* Check to make sure pointer is page aligned and size is multiple of page size */
+ if ((size < vm_page_size) || ((size % vm_page_size) != 0))
+ return NULL;
+
+ return zone;
+}
+
+void
+malloc_make_purgeable(void *ptr) {
+ malloc_zone_t *zone = find_registered_purgeable_zone(ptr);
+ if (!zone)
+ return;
+
+ int state = VM_PURGABLE_VOLATILE;
+ vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state);
+ return;
+}
+
+/* Returns true if ptr is valid. Ignore the return value from vm_purgeable_control and only report
+ * state. */
+int
+malloc_make_nonpurgeable(void *ptr) {
+ malloc_zone_t *zone = find_registered_purgeable_zone(ptr);
+ if (!zone)
+ return 0;
+
+ int state = VM_PURGABLE_NONVOLATILE;
+ vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state);
+
+ if (state == VM_PURGABLE_EMPTY)
+ return EFAULT;
+
+ return 0;
+}
+
/********* Batch methods ************/
unsigned
// printf("malloc_get_all_zones succesfully found %d zones\n", num_zones);
err = reader(task, zones_address, sizeof(malloc_zone_t *) * num_zones, (void **)addresses);
if (err) {
- malloc_printf("*** malloc_get_all_zones: error reading zones at %p\n", (unsigned)&zones_address);
+ malloc_printf("*** malloc_get_all_zones: error reading zones at %p\n", &zones_address);
return err;
}
// printf("malloc_get_all_zones succesfully read %d zones\n", num_zones);
static void
DefaultMallocError(int x) {
- malloc_printf("*** error %d\n", x);
#if USE_SLEEP_RATHER_THAN_ABORT
+ malloc_printf("*** error %d\n", x);
sleep(3600);
#else
+ _SIMPLE_STRING b = _simple_salloc();
+ if (b) {
+ _simple_sprintf(b, "*** error %d", x);
+ malloc_printf("%s\n", _simple_string(b));
+ __crashreporter_info__ = _simple_string(b);
+ } else {
+ _malloc_printf(MALLOC_PRINTF_NOLOG, "*** error %d", x);
+ __crashreporter_info__ = "*** DefaultMallocError called";
+ }
abort();
#endif
}
return DefaultMallocError;
}
+/* Stack logging fork-handling prototypes */
+extern void __stack_logging_fork_prepare();
+extern void __stack_logging_fork_parent();
+extern void __stack_logging_fork_child();
+
void
_malloc_fork_prepare() {
/* Prepare the malloc module for a fork by insuring that no thread is in a malloc critical section */
malloc_zone_t *zone = malloc_zones[index++];
zone->introspect->force_lock(zone);
}
+ __stack_logging_fork_prepare();
}
void
_malloc_fork_parent() {
/* Called in the parent process after a fork() to resume normal operation. */
unsigned index = 0;
+ __stack_logging_fork_parent();
MALLOC_UNLOCK();
while (index < malloc_num_zones) {
malloc_zone_t *zone = malloc_zones[index++];
_malloc_fork_child() {
/* Called in the child process after a fork() to resume normal operation. In the MTASK case we also have to change memory inheritance so that the child does not share memory with the parent. */
unsigned index = 0;
+ __stack_logging_fork_child();
MALLOC_UNLOCK();
while (index < malloc_num_zones) {
malloc_zone_t *zone = malloc_zones[index++];
--- /dev/null
+.\" Copyright (c) 2008 Apple, Inc. All rights reserved.
+.\"
+.\" @APPLE_LICENSE_HEADER_START@
+.\"
+.\" The contents of this file constitute Original Code as defined in and
+.\" are subject to the Apple Public Source License Version 1.1 (the
+.\" "License"). You may not use this file except in compliance with the
+.\" License. Please obtain a copy of the License at
+.\" http://www.apple.com/publicsource and read it before using this file.
+.\"
+.\" This Original Code and all software distributed under the License are
+.\" distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+.\" FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
+.\" License for the specific language governing rights and limitations
+.\" under the License.
+.\"
+.\" @APPLE_LICENSE_HEADER_END@
+.\"
+.Dd Aug 13, 2008
+.Dt MALLOC_ZONE_MALLOC 3
+.Os
+.Sh NAME
+.Nm malloc_create_zone ,
+.Nm malloc_destroy_zone ,
+.Nm malloc_default_zone ,
+.Nm malloc_zone_from_ptr ,
+.Nm malloc_zone_malloc ,
+.Nm malloc_zone_calloc ,
+.Nm malloc_zone_valloc ,
+.Nm malloc_zone_realloc ,
+.Nm malloc_zone_memalign ,
+.Nm malloc_zone_free
+.Nd zone-based memory allocation
+.Sh SYNOPSIS
+.In malloc/malloc.h
+.Ft malloc_zone_t *
+.Fo malloc_create_zone
+.Fa "vm_size_t start_size"
+.Fa "unsigned flags"
+.Fc
+.Ft void
+.Fo malloc_destroy_zone
+.Fa "malloc_zone_t *zone"
+.Fc
+.Ft malloc_zone_t *
+.Fo malloc_default_zone
+.Fa void
+.Fc
+.Ft malloc_zone_t *
+.Fo malloc_zone_from_ptr
+.Fa "const void *ptr"
+.Fc
+.Ft void *
+.Fo malloc_zone_malloc
+.Fa "malloc_zone_t *zone"
+.Fa "size_t size"
+.Fc
+.Ft void *
+.Fo malloc_zone_calloc
+.Fa "malloc_zone_t *zone"
+.Fa "size_t num_items"
+.Fa "size_t size"
+.Fc
+.Ft void *
+.Fo malloc_zone_valloc
+.Fa "malloc_zone_t *zone"
+.Fa "size_t size"
+.Fc
+.Ft void *
+.Fo malloc_zone_realloc
+.Fa "malloc_zone_t *zone"
+.Fa "void *ptr"
+.Fa "size_t size"
+.Fc
+.Ft void *
+.Fo malloc_zone_memalign
+.Fa "malloc_zone_t *zone"
+.Fa "size_t alignment"
+.Fa "size_t size"
+.Fc
+.Ft void
+.Fo malloc_zone_free
+.Fa "malloc_zone_t *zone"
+.Fa "void *ptr"
+.Fc
+.Sh DESCRIPTION
+The
+.Fn malloc_create_zone
+function creates a malloc zone, advising an initial allocation of
+.Fa start_size
+bytes, and specifying
+.Fa flags
+that alter the standard behavior of the zone.
+The returned malloc zone can be used to provide custom allocation and
+deallocation behavior, and to retrieve additional information about the
+allocations in that zone.
+.Pp
+The
+.Fn malloc_destroy_zone
+function deallocates all memory associated with objects in
+.Fa zone
+as well as
+.Fa zone
+itself.
+.Pp
+The
+.Fn malloc_default_zone
+function returns the default system malloc zone, used by
+.Xr malloc 3 ,
+and
+.Xr free 3 .
+.Pp
+The
+.Fn malloc_zone_from_ptr
+function returns a pointer to the malloc zone which contains
+.Fa ptr
+or NULL, if the pointer does not point to an allocated object in any current
+malloc zone.
+.Pp
+The
+.Fn malloc_zone_malloc ,
+.Fn malloc_zone_calloc ,
+.Fn malloc_zone_valloc ,
+.Fn malloc_zone_realloc ,
+.Fn malloc_zone_memalign ,
+and
+.Fn malloc_zone_free
+perform the same task on
+.Fa zone
+as their non-prefixed variants,
+.Xr malloc 3 ,
+.Xr calloc 3 ,
+.Xr valloc 3 ,
+.Xr realloc 3 ,
+.Xr posix_memalign 3 ,
+and
+.Xr free 3 perform on the default system malloc zone.
+.Sh RETURN VALUES
+The
+.Fn malloc_create_zone ,
+.Fn malloc_default_zone ,
+and
+.Fn malloc_zone_from_ptr
+functions return a pointer to a malloc_zone_t structure, or NULL if there was
+an error.
+.Pp
+The
+.Fn malloc_zone_malloc ,
+.Fn malloc_zone_calloc ,
+.Fn malloc_zone_valloc ,
+.Fn malloc_zone_realloc ,
+and
+.Fn malloc_zone_memalign
+functions return a pointer to allocated memory. If there is an error, they
+return a NULL pointer. They are not required to set
+.Va errno .
+.El
+.Sh SEE ALSO
+.Xr malloc 3 ,
+.Xr posix_memalign 3
extern semaphore_t clock_sem;
#ifdef VARIANT_CANCELABLE
extern void _pthread_testcancel(pthread_t thread, int isconforming);
-extern int __semwait_signal(int cond_sem, int mutex_sem, int timeout, int relative, time_t tv_sec, __int32_t tv_nsec);
+extern int __semwait_signal(int cond_sem, int mutex_sem, int timeout, int relative, __int64_t tv_sec, __int32_t tv_nsec);
#define SEMWAIT_SIGNAL __semwait_signal
#else /* !VARIANT_CANCELABLE */
-extern int __semwait_signal_nocancel(int cond_sem, int mutex_sem, int timeout, int relative, time_t tv_sec, __int32_t tv_nsec);
+extern int __semwait_signal_nocancel(int cond_sem, int mutex_sem, int timeout, int relative, __int64_t tv_sec, __int32_t tv_nsec);
#define SEMWAIT_SIGNAL __semwait_signal_nocancel
#endif /* VARIANT_CANCELABLE */
return -1;
}
}
- ret = SEMWAIT_SIGNAL(clock_sem, MACH_PORT_NULL, 1, 1, requested_time->tv_sec, requested_time->tv_nsec);
+ ret = SEMWAIT_SIGNAL(clock_sem, MACH_PORT_NULL, 1, 1, (int64_t)requested_time->tv_sec, (int32_t)requested_time->tv_nsec);
if (ret < 0) {
if (errno == ETIMEDOUT) {
return 0;
#include <ftw.h>
#include <limits.h>
#include <fcntl.h>
+#include <stdlib.h>
#include <string.h>
#include <unistd.h>
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
maxlen = n;
}
if (read(fd, (char *)&buf, sizeof(buf)) != sizeof(buf) ||
- (N_BADMAG(buf) && *((long *)&buf) != MH_MAGIC &&
- NXSwapBigLongToHost(*((long *)&buf)) != FAT_MAGIC)) {
+ (N_BADMAG(buf) && *((uint32_t *)&buf) != MH_MAGIC &&
+ OSSwapBigToHostInt32(*((uint32_t *)&buf)) != FAT_MAGIC)) {
return (-1);
}
/* Deal with fat file if necessary */
- if (NXSwapBigLongToHost(*((long *)&buf)) == FAT_MAGIC) {
+ if (OSSwapBigToHostInt32(*((uint32_t *)&buf)) == FAT_MAGIC) {
struct host_basic_info hbi;
struct fat_header fh;
struct fat_arch *fat_archs, *fap;
}
/* Convert fat_narchs to host byte order */
- fh.nfat_arch = NXSwapBigLongToHost(fh.nfat_arch);
+ fh.nfat_arch = OSSwapBigToHostInt32(fh.nfat_arch);
/* Read in the fat archs */
fat_archs = (struct fat_arch *)malloc(fh.nfat_arch *
*/
for (i = 0; i < fh.nfat_arch; i++) {
fat_archs[i].cputype =
- NXSwapBigLongToHost(fat_archs[i].cputype);
+ OSSwapBigToHostInt32(fat_archs[i].cputype);
fat_archs[i].cpusubtype =
- NXSwapBigLongToHost(fat_archs[i].cpusubtype);
+ OSSwapBigToHostInt32(fat_archs[i].cpusubtype);
fat_archs[i].offset =
- NXSwapBigLongToHost(fat_archs[i].offset);
+ OSSwapBigToHostInt32(fat_archs[i].offset);
fat_archs[i].size =
- NXSwapBigLongToHost(fat_archs[i].size);
+ OSSwapBigToHostInt32(fat_archs[i].size);
fat_archs[i].align =
- NXSwapBigLongToHost(fat_archs[i].align);
+ OSSwapBigToHostInt32(fat_archs[i].align);
}
#if CPUSUBTYPE_SUPPORT
}
}
- if (*((long *)&buf) == MH_MAGIC) {
+ if (*((uint32_t *)&buf) == MH_MAGIC) {
struct mach_header mh;
struct load_command *load_commands, *lcp;
struct symtab_command *stp;
stp = NULL;
lcp = load_commands;
for (i = 0; i < mh.ncmds; i++) {
- if (lcp->cmdsize % sizeof(long) != 0 ||
+ if (lcp->cmdsize % sizeof(uint32_t) != 0 ||
lcp->cmdsize <= 0 ||
(char *)lcp + lcp->cmdsize >
(char *)load_commands + mh.sizeofcmds) {
lseek(fd, sa, SEEK_SET);
while (n) {
- long savpos;
+ off_t savpos;
m = sizeof (space);
if (n < m)
* SUCH DAMAGE.
*/
+#ifdef VARIANT_DARWINEXTSN
+#define _DARWIN_UNLIMITED_STREAMS
+#endif /* VARIANT_DARWINEXTSN */
+
#if defined(LIBC_SCCS) && !defined(lint)
static char sccsid[] = "@(#)popen.c 8.3 (Berkeley) 5/3/95";
#endif /* LIBC_SCCS and not lint */
#include <string.h>
#include <paths.h>
#include <pthread.h>
+#include <spawn.h>
#include "un-namespace.h"
#include "libc_private.h"
#define environ (*_NSGetEnviron())
/* 3516149 - store file descriptor and use that to close to prevent blocking */
-static struct pid {
+struct pid {
struct pid *next;
FILE *fp;
int fd;
pid_t pid;
-} *pidlist;
-static pthread_mutex_t pidlist_mutex = PTHREAD_MUTEX_INITIALIZER;
+};
+#define pidlist __popen_pidlist
+#define pidlist_mutex __popen_pidlist_mutex
+#ifndef BUILDING_VARIANT
+__private_extern__ struct pid *pidlist = NULL;
+__private_extern__ pthread_mutex_t pidlist_mutex = PTHREAD_MUTEX_INITIALIZER;
+#else /* BUILDING_VARIANT */
+extern struct pid *pidlist;
+extern pthread_mutex_t pidlist_mutex;
+#endif /* !BUILDING_VARIANT */
#define THREAD_LOCK() if (__isthreaded) _pthread_mutex_lock(&pidlist_mutex)
#define THREAD_UNLOCK() if (__isthreaded) _pthread_mutex_unlock(&pidlist_mutex)
{
struct pid *cur;
FILE *iop;
- int pdes[2], pid, twoway;
+ int pdes[2], pid, twoway, other;
char *argv[4];
struct pid *p;
+ posix_spawn_file_actions_t file_actions;
+ int err;
if (type == NULL) {
errno = EINVAL;
if ((*type != 'r' && *type != 'w') || type[1]) {
errno = EINVAL;
return (NULL);
+ }
+ if (pipe(pdes) < 0)
+ return (NULL);
+ }
+
+ /* fdopen can now fail */
+ if (*type == 'r') {
+ iop = fdopen(pdes[0], type);
+ other = pdes[1];
+ } else {
+ iop = fdopen(pdes[1], type);
+ other = pdes[0];
}
- if (pipe(pdes) < 0)
+ if (iop == NULL) {
+ (void)_close(pdes[0]);
+ (void)_close(pdes[1]);
return (NULL);
}
if ((cur = malloc(sizeof(struct pid))) == NULL) {
- (void)_close(pdes[0]);
- (void)_close(pdes[1]);
+ (void)fclose(iop);
+ (void)_close(other);
+ return (NULL);
+ }
+
+ if ((err = posix_spawn_file_actions_init(&file_actions)) != 0) {
+ (void)fclose(iop);
+ (void)_close(other);
+ free(cur);
+ errno = err;
return (NULL);
}
+ if (*type == 'r') {
+ /*
+ * The dup2() to STDIN_FILENO is repeated to avoid
+ * writing to pdes[1], which might corrupt the
+ * parent's copy. This isn't good enough in
+ * general, since the _exit() is no return, so
+ * the compiler is free to corrupt all the local
+ * variables.
+ */
+ (void)posix_spawn_file_actions_addclose(&file_actions, pdes[0]);
+ if (pdes[1] != STDOUT_FILENO) {
+ (void)posix_spawn_file_actions_adddup2(&file_actions, pdes[1], STDOUT_FILENO);
+ (void)posix_spawn_file_actions_addclose(&file_actions, pdes[1]);
+ if (twoway)
+ (void)posix_spawn_file_actions_adddup2(&file_actions, STDOUT_FILENO, STDIN_FILENO);
+ } else if (twoway && (pdes[1] != STDIN_FILENO))
+ (void)posix_spawn_file_actions_adddup2(&file_actions, pdes[1], STDIN_FILENO);
+ } else {
+ if (pdes[0] != STDIN_FILENO) {
+ (void)posix_spawn_file_actions_adddup2(&file_actions, pdes[0], STDIN_FILENO);
+ (void)posix_spawn_file_actions_addclose(&file_actions, pdes[0]);
+ }
+ (void)posix_spawn_file_actions_addclose(&file_actions, pdes[1]);
+ }
+ for (p = pidlist; p; p = p->next) {
+ (void)posix_spawn_file_actions_addclose(&file_actions, p->fd);
+ }
argv[0] = "sh";
argv[1] = "-c";
argv[2] = (char *)command;
argv[3] = NULL;
- THREAD_LOCK();
- switch (pid = fork()) {
- case -1: /* Error. */
- THREAD_UNLOCK();
- (void)_close(pdes[0]);
- (void)_close(pdes[1]);
+ err = posix_spawn(&pid, _PATH_BSHELL, &file_actions, NULL, argv, environ);
+ posix_spawn_file_actions_destroy(&file_actions);
+
+ if (err == ENOMEM || err == EAGAIN) { /* as if fork failed */
+ (void)fclose(iop);
+ (void)_close(other);
free(cur);
+ errno = err;
return (NULL);
- /* NOTREACHED */
- case 0: /* Child. */
- if (*type == 'r') {
- /*
- * The _dup2() to STDIN_FILENO is repeated to avoid
- * writing to pdes[1], which might corrupt the
- * parent's copy. This isn't good enough in
- * general, since the _exit() is no return, so
- * the compiler is free to corrupt all the local
- * variables.
- */
- (void)_close(pdes[0]);
- if (pdes[1] != STDOUT_FILENO) {
- (void)_dup2(pdes[1], STDOUT_FILENO);
- (void)_close(pdes[1]);
- if (twoway)
- (void)_dup2(STDOUT_FILENO, STDIN_FILENO);
- } else if (twoway && (pdes[1] != STDIN_FILENO))
- (void)_dup2(pdes[1], STDIN_FILENO);
- } else {
- if (pdes[0] != STDIN_FILENO) {
- (void)_dup2(pdes[0], STDIN_FILENO);
- (void)_close(pdes[0]);
- }
- (void)_close(pdes[1]);
- }
- for (p = pidlist; p; p = p->next) {
- (void)_close(p->fd);
- }
- _execve(_PATH_BSHELL, argv, environ);
- _exit(127);
- /* NOTREACHED */
+ } else if (err != 0) { /* couldn't exec the shell */
+ pid = -1;
}
- THREAD_UNLOCK();
- /* Parent; assume fdopen can't fail. */
if (*type == 'r') {
- iop = fdopen(pdes[0], type);
cur->fd = pdes[0];
(void)_close(pdes[1]);
} else {
- iop = fdopen(pdes[1], type);
cur->fd = pdes[1];
(void)_close(pdes[0]);
}
return (iop);
}
+#ifndef BUILDING_VARIANT
/*
* pclose --
* Pclose returns -1 if stream is not associated with a `popened' command,
(void)fclose(iop);
+ if (cur->pid < 0) {
+ free(cur);
+ return W_EXITCODE(127, 0);
+ }
do {
pid = _wait4(cur->pid, &pstat, 0, (struct rusage *)0);
} while (pid == -1 && errno == EINTR);
return (pid == -1 ? -1 : pstat);
}
+#endif /* !BUILDING_VARIANT */
--- /dev/null
+.\" Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+.\"
+.\" @APPLE_LICENSE_HEADER_START@
+.\"
+.\" The contents of this file constitute Original Code as defined in and
+.\" are subject to the Apple Public Source License Version 1.1 (the
+.\" "License"). You may not use this file except in compliance with the
+.\" License. Please obtain a copy of the License at
+.\" http://www.apple.com/publicsource and read it before using this file.
+.\"
+.\" This Original Code and all software distributed under the License are
+.\" distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+.\" FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
+.\" License for the specific language governing rights and limitations
+.\" under the License.
+.\"
+.\" @APPLE_LICENSE_HEADER_END@
+.\"
+.Dd April 9, 2008
+.Dt POSIX_MEMALIGN 3
+.Os
+.Sh NAME
+.Nm posix_memalign
+.Nd aligned memory allocation
+.Sh SYNOPSIS
+.In stdlib.h
+.Ft int
+.Fo posix_memalign
+.Fa "void **memptr"
+.Fa "size_t alignment"
+.Fa "size_t size"
+.Fc
+.Sh DESCRIPTION
+The
+.Fn posix_memalign
+function allocates
+.Fa size
+bytes of memory such that the allocation's base address is an exact multiple of
+.Fa alignment ,
+and returns the allocation in the value pointed to by
+.Fa memptr .
+.Pp
+The requested
+.Fa alignment
+must be a power of 2 at least as large as
+.Fn sizeof "void *" .
+.Pp
+Memory that is allocated via
+.Fn posix_memalign
+can be used as an argument in subsequent calls to
+.Xr realloc 3 ,
+.Xr reallocf 3 ,
+and
+.Xr free 3 .
+(Note however, that the allocation returned by
+.Xr realloc 3
+or
+.Xr reallocf 3
+is not guaranteed to preserve the original
+.Fa alignment ) .
+.Sh NOTES
+.Fn posix_memalign
+should be used judiciously as the algorithm that realizes the
+.Fa alignment
+constraint can incur significant memory overhead.
+.Sh RETURN VALUES
+The
+.Fn posix_memalign
+function returns the value 0 if successful; otherwise it returns an error value.
+.Sh ERRORS
+The
+.Fn posix_memalign
+function will fail if:
+.Bl -tag -width Er
+.It Bq Er EINVAL
+The
+.Fa alignment
+parameter is not a power of 2 at least as large as
+.Fn sizeof "void *" .
+.It Bq Er ENOMEM
+Memory allocation error.
+.El
+.Sh SEE ALSO
+.Xr free 3 ,
+.Xr malloc 3 ,
+.Xr realloc 3 ,
+.Xr reallocf 3 ,
+.Xr valloc 3 ,
+.Xr malloc_zone_memalign 3
+.Sh STANDARDS
+The
+.Fn posix_memalign
+function conforms to
+.St -p1003.1-2001 .
#define SCALABLE_MALLOC_DO_SCRIBBLE (1 << 3)
// write 0x55 onto free blocks
#define SCALABLE_MALLOC_ABORT_ON_ERROR (1 << 4)
- // call abort() on a malloc or free error, such as a double free
-
+ // call abort() on any malloc error, such as double free or out of memory.
+#define SCALABLE_MALLOC_PURGEABLE (1 << 5)
+ // allocate objects such that they may be used with VM purgability APIs
+#define SCALABLE_MALLOC_ABORT_ON_CORRUPTION (1 << 6)
+ // call abort() on malloc errors, but not on out of memory.
+
extern malloc_zone_t *create_scalable_zone(size_t initial_size, unsigned debug_flags);
/* Create a new zone that scales for small objects or large objects */
+extern malloc_zone_t *create_purgeable_zone(size_t initial_size, malloc_zone_t *malloc_default_zone, unsigned debug_flags);
+ /* Create a new zone that supports malloc_make{un}purgeable() discipline. */
+
+extern malloc_zone_t *create_legacy_scalable_zone(size_t initial_size, unsigned debug_flags);
+ /*
+ * For use by CheckFix: create a new zone whose behavior is, apart from
+ * the use of death-row and per-CPU magazines, that of Leopard.
+ */
+
/***** Private API for debug and performance tools ********/
extern boolean_t scalable_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats, unsigned subzone);
+++ /dev/null
-./scandir.c
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 1983, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)scandir.c 8.3 (Berkeley) 1/2/94";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/gen/scandir.c,v 1.7 2002/02/01 01:32:19 obrien Exp $");
+
+/*
+ * Scan the directory dirname calling select to make a list of selected
+ * directory entries then sort using qsort and compare routine dcomp.
+ * Returns the number of entries and a pointer to a list of pointers to
+ * struct dirent (through namelist). Returns -1 if there were any errors.
+ */
+
+#include "namespace.h"
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <dirent.h>
+#include <stdlib.h>
+#include <string.h>
+#include "un-namespace.h"
+
+/*
+ * The _GENERIC_DIRSIZ macro is the minimum record length which will hold the directory
+ * entry. This requires the amount of space in struct dirent without the
+ * d_name field, plus enough space for the name and a terminating nul byte
+ * (dp->d_namlen + 1), rounded up to a 4 byte boundary.
+ */
+
+int
+scandir(dirname, namelist, select, dcomp)
+ const char *dirname;
+ struct dirent ***namelist;
+ int (*select)(struct dirent *);
+ int (*dcomp)(const void *, const void *);
+{
+ struct dirent *d, *p, **names = NULL;
+ size_t nitems = 0;
+ struct stat stb;
+ long arraysz;
+ DIR *dirp;
+
+ if ((dirp = opendir(dirname)) == NULL)
+ return(-1);
+ if (_fstat(dirp->dd_fd, &stb) < 0)
+ goto fail;
+
+ /*
+ * estimate the array size by taking the size of the directory file
+ * and dividing it by a multiple of the minimum size entry.
+ */
+ arraysz = (stb.st_size / 24);
+ names = (struct dirent **)malloc(arraysz * sizeof(struct dirent *));
+ if (names == NULL)
+ goto fail;
+
+ while ((d = readdir(dirp)) != NULL) {
+ if (select != NULL && !(*select)(d))
+ continue; /* just selected names */
+ /*
+ * Make a minimum size copy of the data
+ */
+ p = (struct dirent *)malloc(_GENERIC_DIRSIZ(d));
+ if (p == NULL)
+ goto fail;
+ p->d_fileno = d->d_fileno;
+ p->d_type = d->d_type;
+ p->d_reclen = d->d_reclen;
+ p->d_namlen = d->d_namlen;
+ bcopy(d->d_name, p->d_name, p->d_namlen + 1);
+ /*
+ * Check to make sure the array has space left and
+ * realloc the maximum size.
+ */
+ if (nitems >= arraysz) {
+ const int inc = 10; /* increase by this much */
+ struct dirent **names2;
+
+ names2 = (struct dirent **)realloc((char *)names,
+ (arraysz + inc) * sizeof(struct dirent *));
+ if (names2 == NULL) {
+ free(p);
+ goto fail;
+ }
+ names = names2;
+ arraysz += inc;
+ }
+ names[nitems++] = p;
+ }
+ closedir(dirp);
+ if (nitems && dcomp != NULL)
+ qsort(names, nitems, sizeof(struct dirent *), dcomp);
+ *namelist = names;
+ return(nitems);
+
+fail:
+ while (nitems > 0)
+ free(names[--nitems]);
+ free(names);
+ closedir(dirp);
+ return -1;
+}
+
+/*
+ * Alphabetic order comparison routine for those who want it.
+ */
+int
+alphasort(d1, d2)
+ const void *d1;
+ const void *d2;
+{
+ return(strcmp((*(struct dirent **)d1)->d_name,
+ (*(struct dirent **)d2)->d_name));
+}
+++ /dev/null
-./scandir.3
\ No newline at end of file
--- /dev/null
+.\" Copyright (c) 1983, 1991, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. All advertising materials mentioning features or use of this software
+.\" must display the following acknowledgement:
+.\" This product includes software developed by the University of
+.\" California, Berkeley and its contributors.
+.\" 4. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" @(#)scandir.3 8.1 (Berkeley) 6/4/93
+.\" $FreeBSD: src/lib/libc/gen/scandir.3,v 1.8 2002/12/19 09:40:21 ru Exp $
+.\"
+.Dd May 20, 2008
+.Dt SCANDIR 3
+.Os
+.Sh NAME
+.Nm scandir ,
+#ifdef UNIFDEF_BLOCKS
+.Nm scandir_b ,
+#endif
+.Nm alphasort
+.Nd scan a directory
+.Sh SYNOPSIS
+.In sys/types.h
+.In dirent.h
+.Ft int
+.Fn scandir "const char *dirname" "struct dirent ***namelist" "int \\*(lp*select\\*(rp\\*(lpstruct dirent *\\*(rp" "int \\*(lp*compar\\*(rp\\*(lpconst void *, const void *\\*(rp"
+.Ft int
+.Fn alphasort "const void *d1" "const void *d2"
+#ifdef UNIFDEF_BLOCKS
+.Ft int
+.Fn scandir_b "const char *dirname" "struct dirent ***namelist" "int \\*(lp^select\\*(rp\\*(lpstruct dirent *\\*(rp" "int \\*(lp^compar\\*(rp\\*(lpconst void *, const void *\\*(rp"
+#endif
+.Sh DESCRIPTION
+The
+.Fn scandir
+function
+reads the directory
+.Fa dirname
+and builds an array of pointers to directory
+entries using
+.Xr malloc 3 .
+It returns the number of entries in the array.
+A pointer to the array of directory entries is stored in the location
+referenced by
+.Fa namelist .
+.Pp
+The
+.Fa select
+argument is a pointer to a user supplied subroutine which is called by
+.Fn scandir
+to select which entries are to be included in the array.
+The select routine is passed a
+pointer to a directory entry and should return a non-zero
+value if the directory entry is to be included in the array.
+If
+.Fa select
+is null, then all the directory entries will be included.
+.Pp
+The
+.Fa compar
+argument is a pointer to a user supplied subroutine which is passed to
+.Xr qsort 3
+to sort the completed array.
+If this pointer is null, the array is not sorted.
+Note that from within the
+.Fa compar
+subroutine, the two arguments are of type
+.Ft const struct dirent ** ,
+so that a double-dereference is needed to access the fields in the
+.Ft dirent
+structure.
+.Pp
+The
+.Fn alphasort
+function
+is a routine which can be used for the
+.Fa compar
+argument to sort the array alphabetically.
+.Pp
+The memory allocated for the array can be deallocated with
+.Xr free 3 ,
+by freeing each pointer in the array and then the array itself.
+#ifdef UNIFDEF_BLOCKS
+.Pp
+The
+.Fn scandir_b
+function works the same way as the
+.Fn scandir
+function, except that
+.Fa select
+and
+.Fa compar
+are blocks instead of subroutines.
+#endif
+.Sh DIAGNOSTICS
+Returns \-1 if the directory cannot be opened for reading or if
+.Xr malloc 3
+cannot allocate enough memory to hold all the data structures.
+.Sh SEE ALSO
+.Xr directory 3 ,
+.Xr malloc 3 ,
+.Xr qsort 3 ,
+.Xr dir 5
+.Sh HISTORY
+The
+.Fn scandir
+and
+.Fn alphasort
+functions appeared in
+.Bx 4.2 .
+#ifdef UNIFDEF_BLOCKS
+The
+.Fn scandir_b
+function appeared in Mac OS X 10.6.
+#endif
--- /dev/null
+/*
+ * Copyright (c) 1983, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)scandir.c 8.3 (Berkeley) 1/2/94";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/gen/scandir.c,v 1.7 2002/02/01 01:32:19 obrien Exp $");
+
+/*
+ * Scan the directory dirname calling select to make a list of selected
+ * directory entries then sort using qsort and compare routine dcomp.
+ * Returns the number of entries and a pointer to a list of pointers to
+ * struct dirent (through namelist). Returns -1 if there were any errors.
+ */
+
+#include "namespace.h"
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <dirent.h>
+#include <stdlib.h>
+#include <string.h>
+#include "un-namespace.h"
+
+/*
+ * The _GENERIC_DIRSIZ macro is the minimum record length which will hold the directory
+ * entry. This requires the amount of space in struct dirent without the
+ * d_name field, plus enough space for the name and a terminating nul byte
+ * (dp->d_namlen + 1), rounded up to a 4 byte boundary.
+ */
+
+int
+scandir_b(dirname, namelist, select, dcomp)
+ const char *dirname;
+ struct dirent ***namelist;
+ int (^select)(struct dirent *);
+ int (^dcomp)(const void *, const void *);
+{
+ struct dirent *d, *p, **names = NULL;
+ size_t nitems = 0;
+ struct stat stb;
+ long arraysz;
+ DIR *dirp;
+
+ if ((dirp = opendir(dirname)) == NULL)
+ return(-1);
+ if (_fstat(dirp->dd_fd, &stb) < 0)
+ goto fail;
+
+ /*
+ * estimate the array size by taking the size of the directory file
+ * and dividing it by a multiple of the minimum size entry.
+ */
+ arraysz = (stb.st_size / 24);
+ names = (struct dirent **)malloc(arraysz * sizeof(struct dirent *));
+ if (names == NULL)
+ goto fail;
+
+ while ((d = readdir(dirp)) != NULL) {
+ if (select != NULL && !select(d))
+ continue; /* just selected names */
+ /*
+ * Make a minimum size copy of the data
+ */
+ p = (struct dirent *)malloc(_GENERIC_DIRSIZ(d));
+ if (p == NULL)
+ goto fail;
+ p->d_fileno = d->d_fileno;
+ p->d_type = d->d_type;
+ p->d_reclen = d->d_reclen;
+ p->d_namlen = d->d_namlen;
+ bcopy(d->d_name, p->d_name, p->d_namlen + 1);
+ /*
+ * Check to make sure the array has space left and
+ * realloc the maximum size.
+ */
+ if (nitems >= arraysz) {
+ const int inc = 10; /* increase by this much */
+ struct dirent **names2;
+
+ names2 = (struct dirent **)realloc((char *)names,
+ (arraysz + inc) * sizeof(struct dirent *));
+ if (names2 == NULL) {
+ free(p);
+ goto fail;
+ }
+ names = names2;
+ arraysz += inc;
+ }
+ names[nitems++] = p;
+ }
+ closedir(dirp);
+ if (nitems && dcomp != NULL)
+ qsort_b(names, nitems, sizeof(struct dirent *), dcomp);
+ *namelist = names;
+ return(nitems);
+
+fail:
+ while (nitems > 0)
+ free(names[--nitems]);
+ free(names);
+ closedir(dirp);
+ return -1;
+}
+++ /dev/null
-./setmode.c
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 1989, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Dave Borman at Cray Research, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)setmode.c 8.2 (Berkeley) 3/25/94";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/gen/setmode.c,v 1.9 2003/02/23 00:24:03 mikeh Exp $");
+
+#include "namespace.h"
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <ctype.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#ifdef SETMODE_DEBUG
+#include <stdio.h>
+#endif
+#include "un-namespace.h"
+
+#define SET_LEN 6 /* initial # of bitcmd struct to malloc */
+#define SET_LEN_INCR 4 /* # of bitcmd structs to add as needed */
+
+typedef struct bitcmd {
+ char cmd;
+ char cmd2;
+ mode_t bits;
+} BITCMD;
+
+#define CMD2_CLR 0x01
+#define CMD2_SET 0x02
+#define CMD2_GBITS 0x04
+#define CMD2_OBITS 0x08
+#define CMD2_UBITS 0x10
+
+#define compress_mode _sm_compress_mode
+
+static BITCMD *addcmd(BITCMD *, int, int, int, u_int);
+__private_extern__ void compress_mode(BITCMD *);
+#ifdef SETMODE_DEBUG
+static void dumpmode(BITCMD *);
+#endif
+
+#ifndef BUILDING_VARIANT
+/*
+ * Given the old mode and an array of bitcmd structures, apply the operations
+ * described in the bitcmd structures to the old mode, and return the new mode.
+ * Note that there is no '=' command; a strict assignment is just a '-' (clear
+ * bits) followed by a '+' (set bits).
+ */
+mode_t
+getmode(bbox, omode)
+ const void *bbox;
+ mode_t omode;
+{
+ const BITCMD *set;
+ mode_t clrval, newmode, value;
+
+ set = (const BITCMD *)bbox;
+ newmode = omode;
+ for (value = 0;; set++)
+ switch(set->cmd) {
+ /*
+ * When copying the user, group or other bits around, we "know"
+ * where the bits are in the mode so that we can do shifts to
+ * copy them around. If we don't use shifts, it gets real
+ * grundgy with lots of single bit checks and bit sets.
+ */
+ case 'u':
+ value = (newmode & S_IRWXU) >> 6;
+ goto common;
+
+ case 'g':
+ value = (newmode & S_IRWXG) >> 3;
+ goto common;
+
+ case 'o':
+ value = newmode & S_IRWXO;
+common: if (set->cmd2 & CMD2_CLR) {
+ clrval =
+ (set->cmd2 & CMD2_SET) ? S_IRWXO : value;
+ if (set->cmd2 & CMD2_UBITS)
+ newmode &= ~((clrval<<6) & set->bits);
+ if (set->cmd2 & CMD2_GBITS)
+ newmode &= ~((clrval<<3) & set->bits);
+ if (set->cmd2 & CMD2_OBITS)
+ newmode &= ~(clrval & set->bits);
+ }
+ if (set->cmd2 & CMD2_SET) {
+ if (set->cmd2 & CMD2_UBITS)
+ newmode |= (value<<6) & set->bits;
+ if (set->cmd2 & CMD2_GBITS)
+ newmode |= (value<<3) & set->bits;
+ if (set->cmd2 & CMD2_OBITS)
+ newmode |= value & set->bits;
+ }
+ break;
+
+ case '+':
+ newmode |= set->bits;
+ break;
+
+ case '-':
+ newmode &= ~set->bits;
+ break;
+
+ case 'X':
+ if (omode & (S_IFDIR|S_IXUSR|S_IXGRP|S_IXOTH))
+ newmode |= set->bits;
+ break;
+
+ case '\0':
+ default:
+#ifdef SETMODE_DEBUG
+ (void)printf("getmode:%04o -> %04o\n", omode, newmode);
+#endif
+ return (newmode);
+ }
+}
+#endif /* BUILDING_VARIANT */
+
+#define ADDCMD(a, b, c, d) \
+ if (set >= endset) { \
+ BITCMD *newset; \
+ setlen += SET_LEN_INCR; \
+ newset = realloc(saveset, sizeof(BITCMD) * setlen); \
+ if (!newset) { \
+ if (saveset) \
+ free(saveset); \
+ saveset = NULL; \
+ return (NULL); \
+ } \
+ set = newset + (set - saveset); \
+ saveset = newset; \
+ endset = newset + (setlen - 2); \
+ } \
+ set = addcmd(set, (a), (b), (c), (d))
+
+#ifndef VARIANT_LEGACY
+#define STANDARD_BITS (S_ISUID|S_ISGID|S_IRWXU|S_IRWXG|S_IRWXO|S_ISTXT)
+#else /* VARIANT_LEGACY */
+#define STANDARD_BITS (S_ISUID|S_ISGID|S_IRWXU|S_IRWXG|S_IRWXO)
+#endif /* !VARIANT_LEGACY */
+
+void *
+setmode(p)
+ const char *p;
+{
+ int perm, who;
+ char op, *ep;
+ BITCMD *set, *saveset, *endset;
+ sigset_t sigset, sigoset;
+ mode_t mask;
+ int equalopdone=0, permXbits, setlen;
+ long perml;
+
+ if (!*p)
+ return (NULL);
+
+ /*
+ * Get a copy of the mask for the permissions that are mask relative.
+ * Flip the bits, we want what's not set. Since it's possible that
+ * the caller is opening files inside a signal handler, protect them
+ * as best we can.
+ */
+ sigfillset(&sigset);
+ (void)_sigprocmask(SIG_BLOCK, &sigset, &sigoset);
+ (void)umask(mask = umask(0));
+ mask = ~mask;
+ (void)_sigprocmask(SIG_SETMASK, &sigoset, NULL);
+
+ setlen = SET_LEN + 2;
+
+ if ((set = malloc((u_int)(sizeof(BITCMD) * setlen))) == NULL)
+ return (NULL);
+ saveset = set;
+ endset = set + (setlen - 2);
+
+ /*
+ * If an absolute number, get it and return; disallow non-octal digits
+ * or illegal bits.
+ */
+ if (isdigit((unsigned char)*p)) {
+ perml = strtol(p, &ep, 8);
+#ifndef VARIANT_LEGACY
+ if (*ep || perml < 0 || perml & ~STANDARD_BITS)
+#else /* VARIANT_LEGACY */
+ if (*ep || perml < 0 || perml & ~(STANDARD_BITS|S_ISTXT))
+#endif /* !VARIANT_LEGACY */
+ {
+ free(saveset);
+ return (NULL);
+ }
+ perm = (mode_t)perml;
+#ifndef VARIANT_LEGACY
+ ADDCMD('=', STANDARD_BITS, perm, mask);
+#else /* VARIANT_LEGACY */
+ ADDCMD('=', (STANDARD_BITS|S_ISTXT), perm, mask);
+#endif /* !VARIANT_LEGACY */
+ set->cmd = 0;
+ return (saveset);
+ }
+
+ /*
+ * Build list of structures to set/clear/copy bits as described by
+ * each clause of the symbolic mode.
+ */
+ for (;;) {
+ /* First, find out which bits might be modified. */
+ for (who = 0;; ++p) {
+ switch (*p) {
+ case 'a':
+ who |= STANDARD_BITS;
+ break;
+ case 'u':
+ who |= S_ISUID|S_IRWXU;
+ break;
+ case 'g':
+ who |= S_ISGID|S_IRWXG;
+ break;
+ case 'o':
+ who |= S_IRWXO;
+ break;
+ default:
+ goto getop;
+ }
+ }
+
+getop: if ((op = *p++) != '+' && op != '-' && op != '=') {
+ free(saveset);
+ return (NULL);
+ }
+ if (op == '=')
+ equalopdone = 0;
+
+#ifdef VARIANT_LEGACY
+ who &= ~S_ISTXT;
+#endif /* VARIANT_LEGACY */
+ for (perm = 0, permXbits = 0;; ++p) {
+ switch (*p) {
+ case 'r':
+ perm |= S_IRUSR|S_IRGRP|S_IROTH;
+ break;
+ case 's':
+ /* If only "other" bits ignore set-id. */
+ if (!who || who & ~S_IRWXO)
+ perm |= S_ISUID|S_ISGID;
+ break;
+ case 't':
+ /* If only "other" bits ignore sticky. */
+ if (!who || who & ~S_IRWXO) {
+#ifdef VARIANT_LEGACY
+ who |= S_ISTXT;
+#endif /* VARIANT_LEGACY */
+ perm |= S_ISTXT;
+ }
+ break;
+ case 'w':
+ perm |= S_IWUSR|S_IWGRP|S_IWOTH;
+ break;
+ case 'X':
+ permXbits = S_IXUSR|S_IXGRP|S_IXOTH;
+ break;
+ case 'x':
+ perm |= S_IXUSR|S_IXGRP|S_IXOTH;
+ break;
+ case 'u':
+ case 'g':
+ case 'o':
+ /*
+ * When ever we hit 'u', 'g', or 'o', we have
+ * to flush out any partial mode that we have,
+ * and then do the copying of the mode bits.
+ */
+ if (perm) {
+ ADDCMD(op, who, perm, mask);
+ perm = 0;
+ }
+ if (op == '=')
+ equalopdone = 1;
+ if (op == '+' && permXbits) {
+ ADDCMD('X', who, permXbits, mask);
+ permXbits = 0;
+ }
+ ADDCMD(*p, who, op, mask);
+ break;
+
+ default:
+ /*
+ * Add any permissions that we haven't already
+ * done.
+ */
+ if (perm || (op == '=' && !equalopdone)) {
+ if (op == '=')
+ equalopdone = 1;
+ ADDCMD(op, who, perm, mask);
+ perm = 0;
+ }
+ if (permXbits) {
+ ADDCMD('X', who, permXbits, mask);
+ permXbits = 0;
+ }
+ goto apply;
+ }
+ }
+
+apply: if (!*p)
+ break;
+ if (*p != ',')
+ goto getop;
+ ++p;
+ }
+ set->cmd = 0;
+#ifdef SETMODE_DEBUG
+ (void)printf("Before compress_mode()\n");
+ dumpmode(saveset);
+#endif
+ compress_mode(saveset);
+#ifdef SETMODE_DEBUG
+ (void)printf("After compress_mode()\n");
+ dumpmode(saveset);
+#endif
+ return (saveset);
+}
+
+static BITCMD *
+addcmd(set, op, who, oparg, mask)
+ BITCMD *set;
+ int oparg, who;
+ int op;
+ u_int mask;
+{
+ switch (op) {
+ case '=':
+ set->cmd = '-';
+ set->bits = who ? who : STANDARD_BITS;
+ set++;
+
+ op = '+';
+ /* FALLTHROUGH */
+ case '+':
+ case '-':
+ case 'X':
+ set->cmd = op;
+ set->bits = (who ? who : mask) & oparg;
+ break;
+
+ case 'u':
+ case 'g':
+ case 'o':
+ set->cmd = op;
+ if (who) {
+ set->cmd2 = ((who & S_IRUSR) ? CMD2_UBITS : 0) |
+ ((who & S_IRGRP) ? CMD2_GBITS : 0) |
+ ((who & S_IROTH) ? CMD2_OBITS : 0);
+ set->bits = (mode_t)~0;
+ } else {
+ set->cmd2 = CMD2_UBITS | CMD2_GBITS | CMD2_OBITS;
+ set->bits = mask;
+ }
+
+ if (oparg == '+')
+ set->cmd2 |= CMD2_SET;
+ else if (oparg == '-')
+ set->cmd2 |= CMD2_CLR;
+ else if (oparg == '=')
+ set->cmd2 |= CMD2_SET|CMD2_CLR;
+ break;
+ }
+ return (set + 1);
+}
+
+#ifdef SETMODE_DEBUG
+static void
+dumpmode(set)
+ BITCMD *set;
+{
+ for (; set->cmd; ++set)
+ (void)printf("cmd: '%c' bits %04o%s%s%s%s%s%s\n",
+ set->cmd, set->bits, set->cmd2 ? " cmd2:" : "",
+ set->cmd2 & CMD2_CLR ? " CLR" : "",
+ set->cmd2 & CMD2_SET ? " SET" : "",
+ set->cmd2 & CMD2_UBITS ? " UBITS" : "",
+ set->cmd2 & CMD2_GBITS ? " GBITS" : "",
+ set->cmd2 & CMD2_OBITS ? " OBITS" : "");
+}
+#endif
+
+#ifndef BUILDING_VARIANT
+/*
+ * Given an array of bitcmd structures, compress by compacting consecutive
+ * '+', '-' and 'X' commands into at most 3 commands, one of each. The 'u',
+ * 'g' and 'o' commands continue to be separate. They could probably be
+ * compacted, but it's not worth the effort.
+ */
+__private_extern__ void
+compress_mode(set)
+ BITCMD *set;
+{
+ BITCMD *nset;
+ int setbits, clrbits, Xbits, op;
+
+ for (nset = set;;) {
+ /* Copy over any 'u', 'g' and 'o' commands. */
+ while ((op = nset->cmd) != '+' && op != '-' && op != 'X') {
+ *set++ = *nset++;
+ if (!op)
+ return;
+ }
+
+ for (setbits = clrbits = Xbits = 0;; nset++) {
+ if ((op = nset->cmd) == '-') {
+ clrbits |= nset->bits;
+ setbits &= ~nset->bits;
+ Xbits &= ~nset->bits;
+ } else if (op == '+') {
+ setbits |= nset->bits;
+ clrbits &= ~nset->bits;
+ Xbits &= ~nset->bits;
+ } else if (op == 'X')
+ Xbits |= nset->bits & ~setbits;
+ else
+ break;
+ }
+ if (clrbits) {
+ set->cmd = '-';
+ set->cmd2 = 0;
+ set->bits = clrbits;
+ set++;
+ }
+ if (setbits) {
+ set->cmd = '+';
+ set->cmd2 = 0;
+ set->bits = setbits;
+ set++;
+ }
+ if (Xbits) {
+ set->cmd = 'X';
+ set->cmd2 = 0;
+ set->bits = Xbits;
+ set++;
+ }
+ }
+}
+#endif /* BUILDING_VARIANT */
mib[1] = KERN_PROCNAME;
/* ignore errors as this is not a hard error */
- sysctl(mib, 2, NULL, NULL, &buf[0], 2*MAXCOMLEN);
+ sysctl(mib, 2, NULL, NULL, &buf[0], strlen(buf));
}
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2000, 2002-2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
extern void spin_lock(int *);
extern void spin_unlock(int *);
extern void thread_stack_pcs(vm_address_t *, unsigned, unsigned *);
+extern const char *__crashreporter_info__;
static inline void *allocate_pages(unsigned) __attribute__((always_inline));
static inline void *allocate_pages(unsigned bytes) {
if (vm_allocate(mach_task_self(), (vm_address_t *)&address, bytes,
VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL)| TRUE)) {
malloc_printf("*** out of memory while stack logging\n");
+ __crashreporter_info__ = "*** out of memory while stack logging\n";
abort();
}
return (void *)address;
arg1 *= arg2; arg2 = arg3; arg3 = 0; type &= ~stack_logging_flag_calloc;
}
if (type & stack_logging_flag_object) {
- unsigned *class = (unsigned *)arg1;
+ unsigned *class = (unsigned *)(long)arg1;
arg1 = arg2 + class[5]; // corresponds to the instance_size field
arg2 = 0; arg3 = 0; type = stack_logging_type_alloc;
}
if (stack_logging_type_alloc) {
if (!result) return;
stack_logging_log_stack(stack_logging_type_alloc, 0, 0, 0, result, num_hot_to_skip+1);
- stack_logging_log_stack(stack_logging_type_alloc, arg1, 0, 0, *((int *)result), num_hot_to_skip+1);
+ stack_logging_log_stack(stack_logging_type_alloc, arg1, 0, 0, *((int *)(long)result), num_hot_to_skip+1);
return;
}
if (stack_logging_type_dealloc) {
if (!arg1) return;
- stack_logging_log_stack(stack_logging_type_dealloc, *((int *)arg1), 0, 0, 0, num_hot_to_skip+1);
+ stack_logging_log_stack(stack_logging_type_dealloc, *((int *)(long)arg1), 0, 0, 0, num_hot_to_skip+1);
stack_logging_log_stack(stack_logging_type_dealloc, arg1, 0, 0, 0, num_hot_to_skip+1);
return;
}
if (type == stack_logging_flag_set_handle_size) {
if (!arg1) return;
// Thanks to a horrible hack, arg3 contains the prvious handle value
- if (arg3 == *((int *)arg1)) return;
+ if (arg3 == *((int *)(long)arg1)) return;
stack_logging_log_stack(stack_logging_type_dealloc, arg3, 0, 0, 0, num_hot_to_skip+1);
- stack_logging_log_stack(stack_logging_type_alloc, arg2, 0, 0, *((int *)arg1), num_hot_to_skip+1);
+ stack_logging_log_stack(stack_logging_type_alloc, arg2, 0, 0, *((int *)(long)arg1), num_hot_to_skip+1);
return;
}
if (type == (stack_logging_type_dealloc|stack_logging_type_alloc)) {
rec->address = STACK_LOGGING_DISGUISE(arg1); // we disguise the address
}
// printf("Before getting samples 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
- thread_stack_pcs(stack_entries, MAX_NUM_PC - 1, &count);
+ thread_stack_pcs((vm_address_t *)stack_entries, MAX_NUM_PC - 1, &count);
// We put at the bottom of the stack a marker that denotes the thread (+1 for good measure...)
- stack_entries[count++] = (int)pthread_self() + 1;
+ stack_entries[count++] = (int)(long)pthread_self() + 1;
/* now let's unique the sample */
// printf("Uniquing 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
rec->uniqued_stack = stack_logging_get_unique_stack(&stack_logging_the_record_list->uniquing_table, &stack_logging_the_record_list->uniquing_table_num_pages, stack_entries, count, num_hot_to_skip+2); // we additionally skip the warmest 2 entries that are an artefact of the code
}
index++;
}
- fprintf(stderr, "*** stack_logging: no record found for 0x%x\n", address);
+ fprintf(stderr, "*** stack_logging: no record found for 0x%lx\n", (long)address);
return 0;
}
/*
- * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <sys/stat.h>
#include <sys/mman.h>
#include <pthread.h>
+#include <paths.h>
#include <errno.h>
#include "stack_logging.h"
#include "malloc_printf.h"
-#include "_simple.h" // as included by malloc.c, this defines ASL_LEVEL_INFO
+#include "_simple.h" // as included by malloc.c, this defines ASL_LEVEL_INFO
#pragma mark -
#pragma mark Defines
#define ASL_LEVEL_INFO stderr
#endif
-#define STACK_LOGGING_THREAD_HASH_SIZE 2048 // must be an even power of two
#define STACK_LOGGING_MAX_STACK_SIZE 512
#define STACK_LOGGING_BLOCK_WRITING_SIZE 8192
-#define STACK_LOGGING_NUMBER_RECENT_BACKTRACES 50
-#define STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY 100
-#define STACK_LOGGING_MAX_THREAD_COLLISIONS 3
-#define STACK_LOGGING_MIN_SAME_FRAMES 3
#define STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED 3
-#define STACK_LOGGING_REMOTE_CACHE_DEFAULT_COLLISION_ALLOWANCE 5
-#define STACK_LOGGING_REMOTE_CACHE_DEFAULT_NODE_CAPACITY 1 << 14 // <2mb for 32->32, ~3mb for (32->64 || 64->32), ~4mb for 64->64
-#define STACK_LOGGING_REMOTE_CACHE_COLLISION_GROWTH_RATE 3
-#define STACK_LOGGING_REMOTE_LINKS_PER_BLOCK (1 << 20) // this sets a maximum number of malloc/frees that can be read in to: 1^30;
- // this means if the .index file is >24gb, remote access will start to fail.
- // note: at this point, the .stack file will probably be ~56gb on top of that and
- // it'll also be using around 20 gb of memory in the analyzing process...
- // all of these are 64-bit stats; the 32-bit analyzing process limits are lower.
- // in short, if you want to analyze a process making > 1 billion malloc/frees
- // (after compaction), bump this number slightly.
+
+#define BACKTRACE_UNIQUING_DEBUG 0
+
+// The expansion factor controls the shifting up of table size. A factor of 1 will double the size upon expanding,
+// 2 will quadruple the size, etc. Maintaining a 66% fill in an ideal table requires the collision allowance to
+// increase by 3 for every quadrupling of the table size (although this the constant applied to insertion
+// performance O(c*n))
+#define EXPAND_FACTOR 2
+#define COLLISION_GROWTH_RATE 3
+
+// For a uniquing table, the useful node size is slots := floor(table_byte_size / (2 * sizeof(mach_vm_address_t)))
+// Some useful numbers for the initial max collision value (desiring 66% fill):
+// 16K-23K slots -> 16 collisions
+// 24K-31K slots -> 17 collisions
+// 32K-47K slots -> 18 collisions
+// 48K-79K slots -> 19 collisions
+// 80K-96K slots -> 20 collisions
+#define INITIAL_MAX_COLLIDE 19
+#define DEFAULT_UNIQUING_PAGE_SIZE 256
#pragma mark -
#pragma mark Macros
#pragma mark -
#pragma mark Types
-#pragma mark - stack_logging_backtrace_event
-typedef struct {
- int16_t offset_delta; // may want to expand this one; should always be < 0.
- uint16_t num_identical_frames;
- uint16_t num_new_hot_frames; // count of backtrace[]
-} stack_logging_backtrace_event;
-
-#pragma mark - stack_logging_index_event
typedef struct {
uintptr_t argument;
uintptr_t address;
uint64_t offset_and_flags; // top 8 bits are actually the flags!
} stack_logging_index_event;
-#pragma mark - stack_logging_index_event32
typedef struct {
uint32_t argument;
uint32_t address;
uint64_t offset_and_flags; // top 8 bits are actually the flags!
} stack_logging_index_event32;
-#pragma mark - stack_logging_index_event64
typedef struct {
uint64_t argument;
uint64_t address;
uint64_t offset_and_flags; // top 8 bits are actually the flags!
} stack_logging_index_event64;
-#pragma mark - thread_backtrace_history
-// for management of previous backtraces (by thread):
+#pragma pack(push,4)
typedef struct {
- vm_address_t thread;
- uint32_t hash_pos;
- uint64_t logging_index;
- int64_t logging_offset;
- uint32_t full_backtrace_countdown;
- uint32_t backtrace_length;
- uintptr_t *backtrace;
-} thread_backtrace_history;
-
-#pragma mark - stack_buffer_shared_memory
+ uint64_t numPages; // number of pages of the table
+ uint64_t numNodes;
+ uint64_t tableSize;
+ uint64_t untouchableNodes;
+ mach_vm_address_t table_address;
+ int32_t max_collide;
+ // 'table_address' is just an always 64-bit version of the pointer-sized 'table' field to remotely read;
+ // it's important that the offset of 'table_address' in the struct does not change between 32 and 64-bit.
+#if BACKTRACE_UNIQUING_DEBUG
+ uint64_t nodesFull;
+ uint64_t backtracesContained;
+#endif
+ mach_vm_address_t *table; // allocated using vm_allocate()
+} backtrace_uniquing_table;
+#pragma pack(pop)
+
// for storing/looking up allocations that haven't yet be written to disk; consistent size across 32/64-bit processes.
// It's important that these fields don't change alignment due to the architecture because they may be accessed from an
// analyzing process with a different arch - hence the pragmas.
#pragma pack(push,4)
typedef struct {
- uint64_t start_index_offset;
- uint64_t start_stack_offset;
- uint32_t next_free_index_buffer_offset;
- uint32_t next_free_stack_buffer_offset;
- char index_buffer[STACK_LOGGING_BLOCK_WRITING_SIZE];
- char stack_buffer[STACK_LOGGING_BLOCK_WRITING_SIZE];
+ uint64_t start_index_offset;
+ uint32_t next_free_index_buffer_offset;
+ mach_vm_address_t uniquing_table_address;
+ char index_buffer[STACK_LOGGING_BLOCK_WRITING_SIZE];
+ backtrace_uniquing_table *uniquing_table;
} stack_buffer_shared_memory;
#pragma pack(pop)
-#pragma mark - index_ll_node
-// linked-list node in table for allocations of a single address
-typedef struct index_ll_node {
- struct index_ll_node *next;
- uint64_t index_file_offset;
-} index_ll_node;
-
-#pragma mark - remote_index_node32
-// 32-bit target process address slot in table
-typedef struct {
- uint32_t address;
- index_ll_node *linked_list;
- index_ll_node *last_link;
-} remote_index_node32;
-
-#pragma mark - remote_index_node64
-// 64-bit target process variant
+// target process address -> record table (for __mach_stack_logging_get_frames)
typedef struct {
uint64_t address;
- index_ll_node *linked_list;
- index_ll_node *last_link;
-} remote_index_node64;
+ uint64_t index_file_offset;
+} remote_index_node;
-#pragma mark - remote_index_cache
// for caching index information client-side:
typedef struct {
size_t cache_size;
size_t cache_node_capacity;
uint32_t collision_allowance;
- uint64_t cache_node_count; // Debug only.
- uint64_t cache_llnode_count; // Debug only.
- size_t in_use_node_size; // sizeof(remote_index_node32) || sizeof(remote_index_node64)
- void *table_memory; // this can be malloced; it's on the client side.
- remote_index_node32 *casted_table32; // represents table memory as 32-bit.
- remote_index_node64 *casted_table64; // ditto, 64-bit
+ remote_index_node *table_memory; // this can be malloced; it's on the client side.
stack_buffer_shared_memory *shmem; // shared memory
stack_buffer_shared_memory snapshot; // memory snapshot of the remote process' shared memory
uint32_t last_pre_written_index_size;
uint64_t last_index_file_offset;
- index_ll_node *blocks[1024];
- uint32_t current_block;
- uint32_t next_block_index;
+ backtrace_uniquing_table uniquing_table; // snapshot of the remote process' uniquing table
} remote_index_cache;
-#pragma mark - remote_task_file_streams
// for reading stack history information from remote processes:
typedef struct {
task_t remote_task;
int32_t task_is_64_bit;
int32_t in_use_count;
FILE *index_file_stream;
- FILE *stack_file_stream;
remote_index_cache *cache;
} remote_task_file_streams;
#pragma mark -
-#pragma mark Constants
-
-static stack_buffer_shared_memory *pre_write_buffers;
-static char *pre_write_backtrace_event_buffer = NULL;
-static char *pre_write_index_buffer = NULL;
+#pragma mark Constants/Globals
static OSSpinLock stack_logging_lock = OS_SPINLOCK_INIT;
-static uint64_t current_logging_index = 0;
-static int64_t total_offset = 0;
+
+// support for multi-threaded forks
+extern void __stack_logging_fork_prepare();
+extern void __stack_logging_fork_parent();
+extern void __stack_logging_fork_child();
+
+// support for gdb and others checking for stack_logging locks
+__private_extern__ boolean_t __stack_logging_locked();
// single-thread access variables
-static vm_address_t stack_buffer[STACK_LOGGING_NUMBER_RECENT_BACKTRACES][STACK_LOGGING_MAX_STACK_SIZE];
-static thread_backtrace_history thread_buffer[STACK_LOGGING_THREAD_HASH_SIZE];
-static int32_t current_stack_buffer = 0;
+static stack_buffer_shared_memory *pre_write_buffers;
+static vm_address_t *stack_buffer;
static uintptr_t last_logged_malloc_address = 0;
-static uint32_t last_logged_backtrace_offset_diff = 0;
-static thread_backtrace_history compaction_saved_differencing_history;
-
-// Constants to define stack logging directory and path names.
-// Files will get written to /tmp/stack-logs.<pid>.<progname>.XXXXXX/stack-logs.{index,stacks}
-// The directory is securely created with mkdtemp() and the files inside it just have static names for simplicity.
-static const char *temporary_directory = "/tmp";
-static const char *stack_logging_directory_base_name = "stack-logs.";
-static const char *index_file_name = "stack-logs.index";
-static const char *stack_file_name = "stack-logs.stacks";
-
-static char stack_logs_directory[PATH_MAX];
+
+// Constants to define stack logging file path names.
+// Files will get written as /tmp/stack-logs.<pid>.<progname>.XXXXXX.index
+// unless the base directory is specified otherwise with MallocStackLoggingDirectory.
+// In this case, a file /tmp/stack-logs.<pid>.<progname>.XXXXXX.link will also be created.
+static const char *stack_log_file_base_name = "stack-logs.";
+static const char *stack_log_file_suffix = ".index";
+static const char *stack_log_link_suffix = ".link";
+
+static char stack_log_location[PATH_MAX];
+static char stack_log_reference_file[PATH_MAX];
static char index_file_path[PATH_MAX];
-static char stack_file_path[PATH_MAX];
static int index_file_descriptor = -1;
-static int stack_file_descriptor = -1;
// for accessing remote log files
static remote_task_file_streams remote_fds[STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED];
static OSSpinLock remote_fd_list_lock = OS_SPINLOCK_INIT;
// activation variables
-
static int logging_use_compaction = 1; // set this to zero to always disable compaction.
// We set malloc_logger to NULL to disable logging, if we encounter errors
extern malloc_logger_t *malloc_logger;
#pragma mark -
-#pragma mark Disk Stack Logging
+#pragma mark In-Memory Backtrace Uniquing
-static void delete_log_files(void); // pre-declare
+static __attribute__((always_inline))
+inline void*
+allocate_pages(uint64_t memSize)
+{
+ mach_vm_address_t allocatedMem = 0ull;
+ if (mach_vm_allocate(mach_task_self(), &allocatedMem, memSize, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL)) != KERN_SUCCESS) {
+ malloc_printf("allocate_pages(): virtual memory exhaused!\n");
+ }
+ return (void*)(uintptr_t)allocatedMem;
+}
-static void
-append_int(char * filename, pid_t pid)
+static __attribute__((always_inline))
+inline int
+deallocate_pages(void* memPointer, uint64_t memSize)
+{
+ return mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)memPointer, memSize);
+}
+
+static backtrace_uniquing_table*
+__create_uniquing_table(void)
{
- unsigned int value;
- size_t len;
- unsigned int i;
- unsigned int count;
+ backtrace_uniquing_table *uniquing_table = (backtrace_uniquing_table*)allocate_pages((uint64_t)round_page(sizeof(backtrace_uniquing_table)));
+ if (!uniquing_table) return NULL;
+ bzero(uniquing_table, sizeof(backtrace_uniquing_table));
+ uniquing_table->numPages = DEFAULT_UNIQUING_PAGE_SIZE;
+ uniquing_table->tableSize = uniquing_table->numPages * vm_page_size;
+ uniquing_table->numNodes = ((uniquing_table->tableSize / (sizeof(mach_vm_address_t) * 2)) >> 1) << 1; // make sure it's even.
+ uniquing_table->table = (mach_vm_address_t*)(uintptr_t)allocate_pages(uniquing_table->tableSize);
+ uniquing_table->table_address = (uintptr_t)uniquing_table->table;
+ uniquing_table->max_collide = INITIAL_MAX_COLLIDE;
+ uniquing_table->untouchableNodes = 0;
+
+#if BACKTRACE_UNIQUING_DEBUG
+ malloc_printf("create_uniquing_table(): creating. size: %lldKB == %lldMB, numnodes: %lld (%lld untouchable)\n", uniquing_table->tableSize >> 10, uniquing_table->tableSize >> 20, uniquing_table->numNodes, uniquing_table->untouchableNodes);
+ malloc_printf("create_uniquing_table(): table: %p; end: %p\n", uniquing_table->table, (void*)((uintptr_t)uniquing_table->table + (uintptr_t)uniquing_table->tableSize));
+#endif
+ return uniquing_table;
+}
+
+static void
+__expand_uniquing_table(backtrace_uniquing_table *uniquing_table)
+{
+ mach_vm_address_t *oldTable = uniquing_table->table;
+ uint64_t oldsize = uniquing_table->tableSize;
+ uint64_t oldnumnodes = uniquing_table->numNodes;
- len = strlen(filename);
+ uniquing_table->numPages = uniquing_table->numPages << EXPAND_FACTOR;
+ uniquing_table->tableSize = uniquing_table->numPages * vm_page_size;
+ uniquing_table->numNodes = ((uniquing_table->tableSize / (sizeof(mach_vm_address_t) * 2)) >> 1) << 1; // make sure it's even.
+ mach_vm_address_t *newTable = (mach_vm_address_t*)(uintptr_t)allocate_pages(uniquing_table->tableSize);
- count = 0;
- value = pid;
+ uniquing_table->table = newTable;
+ uniquing_table->table_address = (uintptr_t)uniquing_table->table;
+ uniquing_table->max_collide = uniquing_table->max_collide + COLLISION_GROWTH_RATE;
+
+ if (mach_vm_copy(mach_task_self(), (mach_vm_address_t)(uintptr_t)oldTable, oldsize, (mach_vm_address_t)(uintptr_t)newTable) != KERN_SUCCESS) {
+ malloc_printf("expandUniquingTable(): VMCopyFailed\n");
+ }
+ uniquing_table->untouchableNodes = oldnumnodes;
+
+#if BACKTRACE_UNIQUING_DEBUG
+ malloc_printf("expandUniquingTable(): expanded from nodes full: %lld of: %lld (~%2d%%); to nodes: %lld (inactive = %lld); unique bts: %lld\n",
+ uniquing_table->nodesFull, oldnumnodes, (int)(((uniquing_table->nodesFull * 100.0) / (double)oldnumnodes) + 0.5),
+ uniquing_table->numNodes, uniquing_table->untouchableNodes, uniquing_table->backtracesContained);
+ malloc_printf("expandUniquingTable(): allocate: %p; end: %p\n", newTable, (void*)((uintptr_t)newTable + (uintptr_t)(uniquing_table->tableSize)));
+ malloc_printf("expandUniquingTable(): deallocate: %p; end: %p\n", oldTable, (void*)((uintptr_t)oldTable + (uintptr_t)oldsize));
+#endif
+
+ if (deallocate_pages(oldTable, oldsize) != KERN_SUCCESS) {
+ malloc_printf("expandUniquingTable(): mach_vm_deallocate failed. [%p]\n", uniquing_table->table);
+ }
+}
+
+static int
+__enter_frames_in_table(backtrace_uniquing_table *uniquing_table, uint64_t *foundIndex, mach_vm_address_t *frames, int32_t count)
+{
+ mach_vm_address_t thisPC;
+ uint64_t hash, uParent = (uint64_t)(-1ll), modulus = (uniquing_table->numNodes-uniquing_table->untouchableNodes-1);
+ int32_t collisions, lcopy = count, returnVal = 1;
+ uint64_t hash_multiplier = ((uniquing_table->numNodes - uniquing_table->untouchableNodes)/(uniquing_table->max_collide*2+1));
+ mach_vm_address_t *node;
+ while (--lcopy >= 0) {
+ thisPC = frames[lcopy];
+
+ // hash = initialHash(uniquing_table, uParent, thisPC);
+ hash = uniquing_table->untouchableNodes + (((uParent << 4) ^ (thisPC >> 2)) % modulus);
+ collisions = uniquing_table->max_collide;
+
+ while (collisions--) {
+ node = uniquing_table->table + (hash * 2);
+
+ if (*node == 0 && node[1] == 0) {
+ // blank; store this entry!
+ // Note that we need to test for both head[0] and head[1] as (0, -1) is a valid entry
+ node[0] = thisPC;
+ node[1] = uParent;
+ uParent = hash;
+#if BACKTRACE_UNIQUING_DEBUG
+ uniquing_table->nodesFull++;
+ if (lcopy == 0) {
+ uniquing_table->backtracesContained++;
+ }
+#endif
+ break;
+ }
+ if (*node == thisPC && node[1] == uParent) {
+ // hit! retrieve index and go.
+ uParent = hash;
+ break;
+ }
+
+ hash += collisions * hash_multiplier + 1;
+
+ if (hash >= uniquing_table->numNodes) {
+ hash -= (uniquing_table->numNodes - uniquing_table->untouchableNodes); // wrap around.
+ }
+ }
+
+ if (collisions < 0) {
+ returnVal = 0;
+ break;
+ }
+ }
+
+ if (returnVal) *foundIndex = uParent;
+
+ return returnVal;
+}
+
+static void
+__unwind_stack_from_table_index(backtrace_uniquing_table *uniquing_table, uint64_t index_pos, mach_vm_address_t *out_frames_buffer, uint32_t *out_frames_count, uint32_t max_frames)
+{
+ mach_vm_address_t *node = uniquing_table->table + (index_pos * 2);
+ uint32_t foundFrames = 0;
+ if (index_pos < uniquing_table->numNodes) {
+ while (foundFrames < max_frames) {
+ out_frames_buffer[foundFrames++] = node[0];
+ if (node[1] == (mach_vm_address_t)(-1ll)) break;
+ node = uniquing_table->table + (node[1] * 2);
+ }
+ }
+
+ *out_frames_count = foundFrames;
+}
+
+#pragma mark -
+#pragma mark Disk Stack Logging
+
+static void delete_log_files(void); // pre-declare
+static int delete_logging_file(char *log_location);
+
+static void
+append_int(char * filename, pid_t pid, size_t maxLength)
+{
+ size_t len = strlen(filename);
+
+ uint32_t count = 0;
+ pid_t value = pid;
while (value > 0) {
value /= 10;
- count ++;
+ count++;
}
- filename[len + count] = 0;
+ if (len + count >= maxLength) return; // don't modify the string if it would violate maxLength
+
+ filename[len + count] = '\0';
value = pid;
- for(i = 0 ; i < count ; i ++) {
+ uint32_t i;
+ for (i = 0 ; i < count ; i ++) {
filename[len + count - 1 - i] = '0' + value % 10;
value /= 10;
}
}
-// If successful, returns path to directory that was created. Otherwise returns NULL.
+// If successful, returns path to log file that was created. Otherwise returns NULL.
static char *
-create_log_files(void)
+create_log_file(void)
{
pid_t pid = getpid();
const char *progname = getprogname();
- char path_name[PATH_MAX];
- char *created_directory = NULL;
+ char *created_log_location = NULL;
// WARNING! use of snprintf can induce malloc() calls
- strlcpy(stack_logs_directory, temporary_directory, PATH_MAX);
- strlcat(stack_logs_directory, "/", PATH_MAX);
- strlcat(stack_logs_directory, stack_logging_directory_base_name, PATH_MAX);
- append_int(stack_logs_directory, pid);
+ bool use_alternate_location = false;
+ char *evn_log_directory = getenv("MallocStackLoggingDirectory");
+ if (evn_log_directory && *evn_log_directory) {
+ use_alternate_location = true;
+ strlcpy(stack_log_location, evn_log_directory, (size_t)PATH_MAX);
+ size_t evn_log_len = strlen(stack_log_location);
+ // add the '/' only if it's not already there.
+ if (evn_log_directory[evn_log_len-1] != '/') {
+ strlcat(stack_log_location, "/", (size_t)PATH_MAX);
+ }
+ } else {
+ strlcpy(stack_log_location, _PATH_TMP, (size_t)PATH_MAX);
+ }
+
+ strlcat(stack_log_location, stack_log_file_base_name, (size_t)PATH_MAX);
+ append_int(stack_log_location, pid, (size_t)PATH_MAX);
if (progname && progname[0] != '\0') {
- strlcat(stack_logs_directory, ".", PATH_MAX);
- strlcat(stack_logs_directory, progname, PATH_MAX);
+ strlcat(stack_log_location, ".", (size_t)PATH_MAX);
+ strlcat(stack_log_location, progname, (size_t)PATH_MAX);
}
- strlcat(stack_logs_directory, ".XXXXXX", PATH_MAX);
-
- // Securely make temporary directory for the log files, then create the files.
- if (mkdtemp(stack_logs_directory) == stack_logs_directory) {
- strlcpy(path_name, stack_logs_directory, PATH_MAX);
- strlcat(path_name, "/", PATH_MAX);
- strlcat(path_name, index_file_name, PATH_MAX);
- strlcpy(index_file_path, path_name, PATH_MAX);
- index_file_descriptor = open(path_name, O_WRONLY | O_TRUNC | O_CREAT, 0600);
-
- strlcpy(path_name, stack_logs_directory, PATH_MAX);
- strlcat(path_name, "/", PATH_MAX);
- strlcat(path_name, stack_file_name, PATH_MAX);
- strlcpy(stack_file_path, path_name, PATH_MAX);
- stack_file_descriptor = open(path_name, O_WRONLY | O_TRUNC | O_CREAT, 0600);
-
- if (index_file_descriptor == -1 || stack_file_descriptor == -1) {
- _malloc_printf(ASL_LEVEL_INFO, "unable to create stack log files in directory %s\n", stack_logs_directory);
- delete_log_files();
- created_directory = NULL;
- } else {
- _malloc_printf(ASL_LEVEL_INFO, "stack logs being written into %s\n", stack_logs_directory);
- created_directory = stack_logs_directory;
+ if (!use_alternate_location) strlcat(stack_log_location, ".XXXXXX", (size_t)PATH_MAX);
+ strlcat(stack_log_location, stack_log_file_suffix, (size_t)PATH_MAX);
+
+ // in the case where the user has specified an alternate location, drop a reference file
+ // in /tmp with the suffix 'stack_log_link_suffix' (".link") and save the path of the
+ // stack logging file there.
+ if (use_alternate_location) {
+ strlcpy(stack_log_reference_file, _PATH_TMP, (size_t)PATH_MAX);
+ strlcat(stack_log_reference_file, stack_log_file_base_name, (size_t)PATH_MAX);
+ append_int(stack_log_reference_file, pid, (size_t)PATH_MAX);
+ if (progname && progname[0] != '\0') {
+ strlcat(stack_log_reference_file, ".", (size_t)PATH_MAX);
+ strlcat(stack_log_reference_file, progname, (size_t)PATH_MAX);
+ }
+ strlcat(stack_log_reference_file, ".XXXXXX", (size_t)PATH_MAX);
+ strlcat(stack_log_reference_file, stack_log_link_suffix, (size_t)PATH_MAX);
+
+ int link_file_descriptor = mkstemps(stack_log_reference_file, (int)strlen(stack_log_link_suffix));
+ if (link_file_descriptor == -1) {
+ _malloc_printf(ASL_LEVEL_INFO, "unable to create stack reference file at %s\n", stack_log_location);
+ return NULL;
+ }
+ ssize_t written = write(link_file_descriptor, stack_log_location, strlen(stack_log_location));
+ if (written < (ssize_t)strlen(stack_log_location)) {
+ _malloc_printf(ASL_LEVEL_INFO, "unable to write to stack reference file at %s\n", stack_log_location);
+ return NULL;
}
+ const char *description_string = "\n(This is a reference file to the stack logs at the path above.)\n";
+ write(link_file_descriptor, description_string, strlen(description_string));
+ close(link_file_descriptor);
+ }
+
+ // Securely create the log file.
+ if ((index_file_descriptor = mkstemps(stack_log_location, (int)strlen(stack_log_file_suffix))) != -1) {
+ _malloc_printf(ASL_LEVEL_INFO, "stack logs being written into %s\n", stack_log_location);
+ created_log_location = stack_log_location;
} else {
- _malloc_printf(ASL_LEVEL_INFO, "unable to create stack log directory %s\n", stack_logs_directory);
- created_directory = NULL;
+ _malloc_printf(ASL_LEVEL_INFO, "unable to create stack logs at %s\n", stack_log_location);
+ if (use_alternate_location) delete_logging_file(stack_log_reference_file);
+ stack_log_reference_file[0] = '\0';
+ stack_log_location[0] = '\0';
+ created_log_location = NULL;
}
- return created_directory;
+ return created_log_location;
+}
+
+// Check to see if the log file is actually a reference to another location
+static int
+log_file_is_reference(char *log_location, char *out_reference_loc_buffer, size_t max_reference_path_size)
+{
+ if (log_location == NULL || log_location[0] == '\0') return 0;
+
+ size_t log_len = strlen(log_location);
+ size_t link_suffix_len = strlen(stack_log_link_suffix);
+ if (log_len < link_suffix_len || strncmp(log_location+log_len-link_suffix_len, stack_log_link_suffix, link_suffix_len) != 0) {
+ // not a reference file.
+ return 0;
+ }
+
+ if (!out_reference_loc_buffer || max_reference_path_size == 0) return 1;
+
+ FILE *reference_file = fopen(log_location, "r");
+ if (reference_file == NULL) {
+ // if unable to open the file, it may be because another user created it; no need to warn.
+ out_reference_loc_buffer[0] = '\0';
+ return 1;
+ }
+
+ char *ret = fgets(out_reference_loc_buffer, (int)max_reference_path_size, reference_file);
+ if (!ret) {
+ out_reference_loc_buffer[0] = '\0';
+ _malloc_printf(ASL_LEVEL_INFO, "unable to read from stack logging reference file at %s\n", log_location);
+ return 1;
+ } else {
+ size_t read_line_len = strlen(out_reference_loc_buffer);
+ if (read_line_len >= 1 && out_reference_loc_buffer[read_line_len-1] == '\n') {
+ out_reference_loc_buffer[read_line_len-1] = '\0';
+ }
+ }
+
+ fclose(reference_file);
+
+ return 1;
}
// This function may be called from either the target process when exiting, or from either the the target process or
// a stack log analysis process, when reaping orphaned stack log files.
// Returns -1 if the files exist and they couldn't be removed, returns 0 otherwise.
static int
-delete_log_files_in_directory(char *logdir)
+delete_logging_file(char *log_location)
{
- char path_name[PATH_MAX];
- int unlink_count = 0;
- int failure_count = 0;
- struct stat statbuf;
-
- if (logdir == NULL || logdir[0] == '\0') return 0;
-
- strlcpy(path_name, logdir, PATH_MAX);
- strlcat(path_name, "/", PATH_MAX);
- strlcat(path_name, index_file_name, PATH_MAX);
- if (unlink(path_name) == 0) {
- unlink_count++;
- } else if (stat(path_name, &statbuf) == 0) {
- failure_count++;
- }
+ if (log_location == NULL || log_location[0] == '\0') return 0;
- strlcpy(path_name, logdir, PATH_MAX);
- strlcat(path_name, "/", PATH_MAX);
- strlcat(path_name, stack_file_name, PATH_MAX);
- if (unlink(path_name) == 0) {
- unlink_count++;
- } else if (stat(path_name, &statbuf) == 0) {
- failure_count++;
+ struct stat statbuf;
+ if (unlink(log_location) != 0 && stat(log_location, &statbuf) == 0) {
+ return -1;
}
-
- if (rmdir(logdir) == -1) failure_count++;
-
- return (failure_count > 0) ? -1 : 0;
+ return 0;
}
// This function will be called from atexit() in the target process.
static void
delete_log_files(void)
{
- if (stack_logs_directory == NULL || stack_logs_directory[0] == '\0') return;
-
- if (delete_log_files_in_directory(stack_logs_directory) == 0) {
- _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", stack_logs_directory);
- stack_file_path[0] = '\0';
- index_file_path[0] = '\0';
- } else {
- _malloc_printf(ASL_LEVEL_INFO, "unable to delete stack logs from %s\n", stack_logs_directory);
+ if (stack_log_location && stack_log_location[0]) {
+ if (delete_logging_file(stack_log_location) == 0) {
+ _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", stack_log_location);
+ index_file_path[0] = '\0';
+ } else {
+ _malloc_printf(ASL_LEVEL_INFO, "unable to delete stack logs from %s\n", stack_log_location);
+ }
+ }
+ if (stack_log_reference_file && stack_log_reference_file[0]) {
+ delete_logging_file(stack_log_reference_file);
}
}
size_t size = sizeof(struct kinfo_proc);
int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
- sysctl(mib, 4, kpt, &size, NULL, 0); // size is either 1 or 0 entries when we ask for a single pid
+ sysctl(mib, 4, kpt, &size, NULL, (size_t)0); // size is either 1 or 0 entries when we ask for a single pid
return (size==sizeof(struct kinfo_proc));
}
{
DIR *dp;
struct dirent *entry;
- int prefix_length;
char prefix_name[PATH_MAX];
char pathname[PATH_MAX];
pid_t current_pid = getpid();
- if ((dp = opendir(temporary_directory)) == NULL) {
+ if ((dp = opendir(_PATH_TMP)) == NULL) {
return;
}
- strlcpy(prefix_name, stack_logging_directory_base_name, PATH_MAX);
- prefix_length = strlen(prefix_name);
+ strlcpy(prefix_name, stack_log_file_base_name, (size_t)PATH_MAX);
+ size_t prefix_length = strlen(prefix_name);
while ( (entry = readdir(dp)) != NULL ) {
- if ( entry->d_type == DT_DIR && ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) ) {
+ if ( entry->d_type != DT_DIR && entry->d_type != DT_LNK && ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) ) {
long pid = strtol(&entry->d_name[prefix_length], (char **)NULL, 10);
- if ( (! is_process_running(pid)) || (remove_for_this_pid && pid == current_pid) ) {
- strlcpy(pathname, temporary_directory, PATH_MAX);
- strlcat(pathname, "/", PATH_MAX);
- strlcat(pathname, entry->d_name, PATH_MAX);
- if (delete_log_files_in_directory(pathname) == 0) {
+ if ( (! is_process_running((pid_t)pid)) || (remove_for_this_pid && (pid_t)pid == current_pid) ) {
+ strlcpy(pathname, _PATH_TMP, (size_t)PATH_MAX);
+ strlcat(pathname, entry->d_name, (size_t)PATH_MAX);
+ char reference_file_buffer[PATH_MAX];
+ bool pathname_is_ref_file = false;
+ if (log_file_is_reference(pathname, reference_file_buffer, (size_t)PATH_MAX) && *reference_file_buffer) {
+ pathname_is_ref_file = true;
+ if (delete_logging_file(reference_file_buffer) == 0) {
+ if (remove_for_this_pid && pid == current_pid) {
+ _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", reference_file_buffer);
+ } else {
+ _malloc_printf(ASL_LEVEL_INFO, "process %ld no longer exists, stack logs deleted from %s\n", pid, reference_file_buffer);
+ }
+ }
+ }
+ if (delete_logging_file(pathname) == 0) {
if (remove_for_this_pid && pid == current_pid) {
- _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", pathname);
+ if (!pathname_is_ref_file) _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", pathname);
} else {
- _malloc_printf(ASL_LEVEL_INFO, "process %d no longer exists, stack logs deleted from %s\n", pid, pathname);
+ if (!pathname_is_ref_file) _malloc_printf(ASL_LEVEL_INFO, "process %ld no longer exists, stack logs deleted from %s\n", pid, pathname);
}
+ char shmem_name_string[PATH_MAX];
+ strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
+ append_int(shmem_name_string, (pid_t)pid, (size_t)PATH_MAX);
+ if (pid != current_pid) shm_unlink(shmem_name_string);
}
}
}
if (fd == index_file_descriptor) {
file_to_reopen = index_file_path;
fd_to_reset = &index_file_descriptor;
- }
- else if (fd == stack_file_descriptor) {
- file_to_reopen = stack_file_path;
- fd_to_reset = &stack_file_descriptor;
} else {
// We don't know about this file. Return (and abort()).
- _malloc_printf(ASL_LEVEL_INFO, "Unknown file descriptor (it's neither the index file, nor the stacks file)\n");
+ _malloc_printf(ASL_LEVEL_INFO, "Unknown file descriptor; expecting stack logging index file\n");
return -1;
}
int fds_to_close[3] = { 0 };
while (fd < 3) {
if (fd == -1) {
- _malloc_printf(ASL_LEVEL_INFO, "unable to re-open stack log file %s\n", file_to_reopen);
+ _malloc_printf(ASL_LEVEL_INFO, "unable to re-open stack logging file %s\n", file_to_reopen);
delete_log_files();
return -1;
}
char * p;
if (index_file_descriptor == -1) {
- if (create_log_files() == NULL) {
+ if (create_log_file() == NULL) {
return;
}
}
// Write the events before the index so that hopefully the events will be on disk if the index refers to them.
- p = pre_write_backtrace_event_buffer;
- remaining = (size_t)pre_write_buffers->next_free_stack_buffer_offset;
- while (remaining > 0) {
- written = robust_write(stack_file_descriptor, p, remaining);
- if (written == -1) {
- _malloc_printf(ASL_LEVEL_INFO, "Unable to write to stack logging file %s (%s)\n", stack_file_path, strerror(errno));
- disable_stack_logging();
- return;
- }
- p += written;
- remaining -= written;
- }
- p = pre_write_index_buffer;
+ p = pre_write_buffers->index_buffer;
remaining = (size_t)pre_write_buffers->next_free_index_buffer_offset;
while (remaining > 0) {
written = robust_write(index_file_descriptor, p, remaining);
remaining -= written;
}
- pre_write_buffers->start_stack_offset += pre_write_buffers->next_free_stack_buffer_offset;
pre_write_buffers->start_index_offset += pre_write_buffers->next_free_index_buffer_offset;
- pre_write_buffers->next_free_index_buffer_offset = pre_write_buffers->next_free_stack_buffer_offset = 0;
+ pre_write_buffers->next_free_index_buffer_offset = 0;
}
static void
// these buffers to get logs for even the most recent allocations. The remote process will need to pause this process to assure that
// the contents of these buffers don't change while being inspected.
char shmem_name_string[PATH_MAX];
- strlcpy(shmem_name_string, stack_logging_directory_base_name, (size_t)PATH_MAX);
- append_int(shmem_name_string, getpid());
+ strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
+ append_int(shmem_name_string, getpid(), (size_t)PATH_MAX);
int shmid = shm_open(shmem_name_string, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
if (shmid < 0) {
}
// Store and use the buffer offsets in shared memory so that they can be accessed remotely
- pre_write_buffers->start_index_offset = pre_write_buffers->start_stack_offset = 0ull;
- pre_write_buffers->next_free_index_buffer_offset = pre_write_buffers->next_free_stack_buffer_offset = 0;
- pre_write_backtrace_event_buffer = pre_write_buffers->stack_buffer;
- pre_write_index_buffer = pre_write_buffers->index_buffer;
+ pre_write_buffers->start_index_offset = 0ull;
+ pre_write_buffers->next_free_index_buffer_offset = 0;
+
+ // create the backtrace uniquing table
+ pre_write_buffers->uniquing_table = __create_uniquing_table();
+ pre_write_buffers->uniquing_table_address = (mach_vm_address_t)(uintptr_t)pre_write_buffers->uniquing_table;
+ if (!pre_write_buffers->uniquing_table) {
+ _malloc_printf(ASL_LEVEL_INFO, "error while allocating stack uniquing table\n");
+ disable_stack_logging();
+ return;
+ }
+
+ stack_buffer = (vm_address_t*)allocate_pages((uint64_t)round_page(sizeof(vm_address_t) * STACK_LOGGING_MAX_STACK_SIZE));
+ if (!stack_buffer) {
+ _malloc_printf(ASL_LEVEL_INFO, "error while allocating stack trace buffer\n");
+ disable_stack_logging();
+ return;
+ }
// malloc() can be called by the following, so these need to be done outside the stack_logging_lock but after the buffers have been set up.
atexit(delete_log_files); // atexit() can call malloc()
reap_orphaned_log_files(true); // this calls opendir() which calls malloc()
- // this call to flush data ensures that the log files (while possibly empty) exist; analyzing processes will rely on this assumption.
- flush_data();
+ // this call ensures that the log files exist; analyzing processes will rely on this assumption.
+ if (create_log_file() == NULL) {
+ disable_stack_logging();
+ return;
+ }
}
}
// lock and enter
OSSpinLockLock(&stack_logging_lock);
+ if (!stack_logging_enable_logging) {
+ OSSpinLockUnlock(&stack_logging_lock);
+ return;
+ }
+
// compaction
if (last_logged_malloc_address && (type_flags & stack_logging_type_dealloc) && STACK_LOGGING_DISGUISE(ptr_arg) == last_logged_malloc_address) {
// *waves hand* the last allocation never occurred
pre_write_buffers->next_free_index_buffer_offset -= (uint32_t)sizeof(stack_logging_index_event);
- pre_write_buffers->next_free_stack_buffer_offset -= last_logged_backtrace_offset_diff;
- total_offset -= (int64_t)last_logged_backtrace_offset_diff;
last_logged_malloc_address = 0ul;
- // not going to subtract from the current_stack_buffer or current_logging_index indecies;
- // there is no intention to restore the previously held stack. the differencing history
- // must be reset to its previous value, though.
- thread_buffer[compaction_saved_differencing_history.hash_pos] = compaction_saved_differencing_history;
-
OSSpinLockUnlock(&stack_logging_lock);
return;
}
-
- // locate previous backtrace for this thread
- short difference = 1;
-
- uint32_t collisions = STACK_LOGGING_MAX_THREAD_COLLISIONS;
- uint32_t hashed_thread = self_thread & (STACK_LOGGING_THREAD_HASH_SIZE-1);
- while (thread_buffer[hashed_thread].thread && thread_buffer[hashed_thread].thread != self_thread) {
- if (--collisions == 0) {
- difference = 0;
- break;
- }
- hashed_thread++;
- }
-
+
// gather stack
uint32_t count;
- thread_stack_pcs(stack_buffer[current_stack_buffer], STACK_LOGGING_MAX_STACK_SIZE, &count);
- stack_buffer[current_stack_buffer][count++] = self_thread + 1; // stuffing thread # in the coldest slot. Add 1 to match what the old stack logging did.
+ thread_stack_pcs(stack_buffer, STACK_LOGGING_MAX_STACK_SIZE-1, &count); // only gather up to STACK_LOGGING_MAX_STACK_SIZE-1 since we append thread id
+ stack_buffer[count++] = self_thread + 1; // stuffing thread # in the coldest slot. Add 1 to match what the old stack logging did.
num_hot_to_skip += 2;
if (count <= num_hot_to_skip) {
// Oops! Didn't get a valid backtrace from thread_stack_pcs().
OSSpinLockUnlock(&stack_logging_lock);
return;
- }
-
- // easy access variables
- thread_backtrace_history *historical = &thread_buffer[hashed_thread];
- vm_address_t *frames = stack_buffer[current_stack_buffer];
-
- // increment as necessary
- current_logging_index++;
- current_stack_buffer++;
- if (current_stack_buffer == STACK_LOGGING_NUMBER_RECENT_BACKTRACES) current_stack_buffer = 0;
-
- // difference (if possible)
- if (historical->logging_index + STACK_LOGGING_NUMBER_RECENT_BACKTRACES <= current_logging_index) difference = 0;
- else if (historical->full_backtrace_countdown == 0) difference = 0;
-
- uint32_t sameness = 0;
- if (difference) {
- uint32_t old_count = historical->backtrace_length;
- int32_t new_count = (int32_t)count;
- while (old_count-- && new_count-- > (int32_t)num_hot_to_skip) {
- if (historical->backtrace[old_count] == frames[new_count]) sameness++;
- else break;
- }
-
- if (sameness < STACK_LOGGING_MIN_SAME_FRAMES) { // failure; pretend nothing was the same
- difference = 0;
- }
}
-
- // create events for byte storage
+
+ // unique stack in memory
count -= num_hot_to_skip;
- stack_logging_backtrace_event current_event;
- current_event.num_identical_frames = (difference ? sameness : 0);
- current_event.num_new_hot_frames = (difference ? count - sameness : count);
- current_event.offset_delta = (difference ? historical->logging_offset - total_offset : 0);
- int64_t this_offset_change = sizeof(stack_logging_backtrace_event) + (current_event.num_new_hot_frames * sizeof(uintptr_t));
+#if __LP64__
+ mach_vm_address_t *frames = (mach_vm_address_t*)stack_buffer + num_hot_to_skip;
+#else
+ mach_vm_address_t frames[STACK_LOGGING_MAX_STACK_SIZE];
+ uint32_t i;
+ for (i = 0; i < count; i++) {
+ frames[i] = stack_buffer[i+num_hot_to_skip];
+ }
+#endif
+
+ uint64_t uniqueStackIdentifier = (uint64_t)(-1ll);
+ while (!__enter_frames_in_table(pre_write_buffers->uniquing_table, &uniqueStackIdentifier, frames, (int32_t)count)) {
+ __expand_uniquing_table(pre_write_buffers->uniquing_table);
+ }
stack_logging_index_event current_index;
if (type_flags & stack_logging_type_alloc) {
current_index.argument = size;
if (logging_use_compaction) {
last_logged_malloc_address = current_index.address; // disguised
- last_logged_backtrace_offset_diff = (uint32_t)this_offset_change;
- compaction_saved_differencing_history = *historical;
}
} else {
current_index.address = STACK_LOGGING_DISGUISE(ptr_arg);
current_index.argument = 0ul;
last_logged_malloc_address = 0ul;
}
- current_index.offset_and_flags = STACK_LOGGING_OFFSET_AND_FLAGS(total_offset, type_flags);
-
- // prepare for differencing next time
- historical->backtrace = (uintptr_t*)(frames + num_hot_to_skip);
- historical->backtrace_length = count;
- if (difference) historical->full_backtrace_countdown--;
- else historical->full_backtrace_countdown = STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY;
- historical->logging_index = current_logging_index;
- historical->logging_offset = total_offset;
- historical->thread = self_thread;
- historical->hash_pos = hashed_thread;
-
+ current_index.offset_and_flags = STACK_LOGGING_OFFSET_AND_FLAGS(uniqueStackIdentifier, type_flags);
+
+// the following line is a good debugging tool for logging each allocation event as it happens.
+// malloc_printf("{0x%lx, %lld}\n", STACK_LOGGING_DISGUISE(current_index.address), uniqueStackIdentifier);
+
// flush the data buffer to disk if necessary
- if (pre_write_buffers->next_free_stack_buffer_offset + this_offset_change >= STACK_LOGGING_BLOCK_WRITING_SIZE) {
- flush_data();
- } else if (pre_write_buffers->next_free_index_buffer_offset + sizeof(stack_logging_index_event) >= STACK_LOGGING_BLOCK_WRITING_SIZE) {
+ if (pre_write_buffers->next_free_index_buffer_offset + sizeof(stack_logging_index_event) >= STACK_LOGGING_BLOCK_WRITING_SIZE) {
flush_data();
}
// store bytes in buffers
- memcpy(pre_write_index_buffer+pre_write_buffers->next_free_index_buffer_offset, ¤t_index, sizeof(stack_logging_index_event));
- memcpy(pre_write_backtrace_event_buffer+pre_write_buffers->next_free_stack_buffer_offset, ¤t_event, sizeof(stack_logging_backtrace_event));
- memcpy(pre_write_backtrace_event_buffer+pre_write_buffers->next_free_stack_buffer_offset+sizeof(stack_logging_backtrace_event), frames+num_hot_to_skip, (size_t)this_offset_change - sizeof(stack_logging_backtrace_event));
+ memcpy(pre_write_buffers->index_buffer+pre_write_buffers->next_free_index_buffer_offset, ¤t_index, sizeof(stack_logging_index_event));
pre_write_buffers->next_free_index_buffer_offset += (uint32_t)sizeof(stack_logging_index_event);
- pre_write_buffers->next_free_stack_buffer_offset += (uint32_t)this_offset_change;
- total_offset += this_offset_change;
OSSpinLockUnlock(&stack_logging_lock);
}
+void
+__stack_logging_fork_prepare() {
+ OSSpinLockLock(&stack_logging_lock);
+}
+
+void
+__stack_logging_fork_parent() {
+ OSSpinLockUnlock(&stack_logging_lock);
+}
+
+void
+__stack_logging_fork_child() {
+ malloc_logger = NULL;
+ stack_logging_enable_logging = 0;
+ OSSpinLockUnlock(&stack_logging_lock);
+}
+
+boolean_t
+__stack_logging_locked()
+{
+ bool acquired_lock = OSSpinLockTry(&stack_logging_lock);
+ if (acquired_lock) OSSpinLockUnlock(&stack_logging_lock);
+ return (acquired_lock ? false : true);
+}
+
#pragma mark -
#pragma mark Remote Stack Log Access
extern kern_return_t __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *num_frames);
// Gets the last allocation record about address
-if !address, will load both index and stack logs and iterate through (expensive)
+if !address, will load index and iterate through (expensive)
else will load just index, search for stack, and then use third function here to retrieve. (also expensive)
extern kern_return_t __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context);
// Applies enumerator to all records involving address sending context as enumerator's second parameter; if !address, applies enumerator to all records
#pragma mark - caching
-static inline size_t hash_index_32(uint32_t address, size_t max_pos) __attribute__((always_inline));
-static inline size_t hash_index_32(uint32_t address, size_t max_pos) {
-// return (((OSSwapInt32(address >> 2) << 3) & 0x96AAAA98) ^ (address >> 2)) % (max_pos-1);
- return (address >> 2) % (max_pos-1); // simplicity rules.
+__attribute__((always_inline)) static inline size_t
+hash_index(uint64_t address, size_t max_pos) {
+ return (size_t)((address >> 2) % (max_pos-1)); // simplicity rules.
}
-static inline size_t hash_index_64(uint64_t address, size_t max_pos) __attribute__((always_inline));
-static inline size_t hash_index_64(uint64_t address, size_t max_pos) {
-// return (size_t)((((OSSwapInt64(address >> 3) << 2) & 0x54AA0A0AAA54ull) ^ (address >> 3)) % (max_pos - 1));
- return (size_t)((address >> 3) % (max_pos-1)); // simplicity rules.
+__attribute__((always_inline)) static inline size_t
+hash_multiplier(size_t capacity, uint32_t allowed_collisions) {
+ return (capacity/(allowed_collisions*2+1));
}
-static void
-transfer_node_ll32(remote_index_cache *cache, remote_index_node32 *old_node)
-{
- uint32_t collisions = 0;
- size_t pos = hash_index_32(old_node->address, cache->cache_node_capacity);
- do {
- if (cache->casted_table32[pos].address == old_node->address) { // hit like this shouldn't happen.
- fprintf(stderr, "impossible collision! two address==address lists! (transfer_node_ll32)\n");
- break;
- } else if (cache->casted_table32[pos].address == 0) { // empty
- cache->casted_table32[pos] = *old_node;
- break;
- } else {
- pos++;
- if (pos >= cache->cache_node_capacity) pos = 0;
- }
- collisions++;
- } while (collisions <= cache->collision_allowance);
-
- if (collisions > cache->collision_allowance) {
- fprintf(stderr, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node_ll32)\n", sizeof(void*)*8);
- }
+__attribute__((always_inline)) static inline size_t
+next_hash(size_t hash, size_t multiplier, size_t capacity, uint32_t collisions) {
+ hash += multiplier * collisions;
+ if (hash >= capacity) hash -= capacity;
+ return hash;
}
static void
-transfer_node_ll64(remote_index_cache *cache, remote_index_node64 *old_node)
+transfer_node(remote_index_cache *cache, remote_index_node *old_node)
{
uint32_t collisions = 0;
- size_t pos = hash_index_64(old_node->address, cache->cache_node_capacity);
+ size_t pos = hash_index(old_node->address, cache->cache_node_capacity);
+ size_t multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
do {
- if (cache->casted_table64[pos].address == old_node->address) { // hit!
- fprintf(stderr, "impossible collision! two address==address lists! (transfer_node_ll64)\n");
+ if (cache->table_memory[pos].address == old_node->address) { // hit like this shouldn't happen.
+ fprintf(stderr, "impossible collision! two address==address lists! (transfer_node)\n");
break;
- } else if (cache->casted_table64[pos].address == 0) { // empty
- cache->casted_table64[pos] = *old_node;
+ } else if (cache->table_memory[pos].address == 0) { // empty
+ cache->table_memory[pos] = *old_node;
break;
} else {
- pos++;
- if (pos >= cache->cache_node_capacity) pos = 0;
+ collisions++;
+ pos = next_hash(pos, multiplier, cache->cache_node_capacity, collisions);
}
- collisions++;
} while (collisions <= cache->collision_allowance);
if (collisions > cache->collision_allowance) {
- fprintf(stderr, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node_ll64)\n", sizeof(void*)*8);
+ fprintf(stderr, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node)\n", sizeof(void*)*8);
}
}
{
// keep old stats
size_t old_node_capacity = cache->cache_node_capacity;
- uint64_t old_node_count = cache->cache_node_count;
- uint64_t old_llnode_count = cache->cache_llnode_count;
- void *old_table = cache->table_memory;
+ remote_index_node *old_table = cache->table_memory;
// double size
- cache->cache_size <<= 1;
- cache->cache_node_capacity <<= 1;
- cache->collision_allowance += STACK_LOGGING_REMOTE_CACHE_COLLISION_GROWTH_RATE;
- cache->table_memory = (void*)calloc(cache->cache_node_capacity, cache->in_use_node_size);
- if (cache->casted_table32) cache->casted_table32 = cache->table_memory;
- else cache->casted_table64 = cache->table_memory;
-
+ cache->cache_size <<= 2;
+ cache->cache_node_capacity <<= 2;
+ cache->collision_allowance += 3;
+ cache->table_memory = (void*)calloc(cache->cache_node_capacity, sizeof(remote_index_node));
+
// repopulate (expensive!)
size_t i;
- if (cache->casted_table32) { // if target is 32-bit
- remote_index_node32 *casted_old_table = (remote_index_node32*)old_table;
- for (i = 0; i < old_node_capacity; i++) {
- if (casted_old_table[i].address) {
- transfer_node_ll32(cache, &casted_old_table[i]);
- }
+ for (i = 0; i < old_node_capacity; i++) {
+ if (old_table[i].address) {
+ transfer_node(cache, &old_table[i]);
}
- } else {
- remote_index_node64 *casted_old_table = (remote_index_node64*)old_table;
- for (i = 0; i < old_node_capacity; i++) {
- if (casted_old_table[i].address) {
- transfer_node_ll64(cache, &casted_old_table[i]);
- }
- }
- }
-
- cache->cache_node_count = old_node_count;
- cache->cache_llnode_count = old_llnode_count;
+ }
free(old_table);
// printf("cache expanded to %0.2f mb (eff: %3.0f%%, capacity: %lu, nodes: %llu, llnodes: %llu)\n", ((float)(cache->cache_size))/(1 << 20), ((float)(cache->cache_node_count)*100.0)/((float)(cache->cache_node_capacity)), cache->cache_node_capacity, cache->cache_node_count, cache->cache_llnode_count);
}
static void
-insert_node32(remote_index_cache *cache, uint32_t address, uint64_t index_file_offset)
+insert_node(remote_index_cache *cache, uint64_t address, uint64_t index_file_offset)
{
uint32_t collisions = 0;
- size_t pos = hash_index_32(address, cache->cache_node_capacity);
-
- if (cache->next_block_index >= STACK_LOGGING_REMOTE_LINKS_PER_BLOCK) {
- cache->next_block_index = 0;
- cache->current_block++;
- cache->blocks[cache->current_block] = (index_ll_node*)malloc(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node));
-/* printf("node buffer added. total nodes: %ul (%u buffers, %0.2f mb)\n", STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*(cache->current_block+1),
- cache->current_block+1, ((float)(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node)*(cache->current_block+1)))/(1 << 20));
-*/
- }
- index_ll_node *new_node = &cache->blocks[cache->current_block][cache->next_block_index++];
- new_node->index_file_offset = index_file_offset;
- new_node->next = NULL;
-
- bool inserted = false;
- while (!inserted) {
- if (cache->casted_table32[pos].address == address) { // hit!
- cache->casted_table32[pos].last_link->next = new_node; // insert at end
- cache->casted_table32[pos].last_link = new_node;
- inserted = true;
- break;
- } else if (cache->casted_table32[pos].address == 0) { // empty
- cache->casted_table32[pos].address = address;
- cache->casted_table32[pos].linked_list = new_node;
- cache->casted_table32[pos].last_link = new_node;
- cache->cache_node_count++;
- inserted = true;
- break;
- } else {
- pos++;
- if (pos >= cache->cache_node_capacity) pos = 0;
- }
- collisions++;
- if (collisions > cache->collision_allowance) {
- expand_cache(cache);
- pos = hash_index_32(address, cache->cache_node_capacity);
- collisions = 0;
- }
- }
-
- cache->cache_llnode_count++;
-
-}
+ size_t pos = hash_index(address, cache->cache_node_capacity);
+ size_t multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
-static void
-insert_node64(remote_index_cache *cache, uint64_t address, uint64_t index_file_offset)
-{
- uint32_t collisions = 0;
- size_t pos = hash_index_64(address, cache->cache_node_capacity);
-
- if (cache->next_block_index >= STACK_LOGGING_REMOTE_LINKS_PER_BLOCK) {
- cache->next_block_index = 0;
- cache->current_block++;
- cache->blocks[cache->current_block] = (index_ll_node*)malloc(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node));
- }
- index_ll_node *new_node = &cache->blocks[cache->current_block][cache->next_block_index++];
- new_node->index_file_offset = index_file_offset;
- new_node->next = NULL;
-
bool inserted = false;
while (!inserted) {
- if (cache->casted_table64[pos].address == address) { // hit!
- cache->casted_table64[pos].last_link->next = new_node; // insert at end
- cache->casted_table64[pos].last_link = new_node;
- inserted = true;
- break;
- } else if (cache->casted_table64[pos].address == 0) { // empty
- cache->casted_table64[pos].address = address;
- cache->casted_table64[pos].linked_list = new_node;
- cache->casted_table64[pos].last_link = new_node;
+ if (cache->table_memory[pos].address == 0ull || cache->table_memory[pos].address == address) { // hit or empty
+ cache->table_memory[pos].address = address;
+ cache->table_memory[pos].index_file_offset = index_file_offset;
inserted = true;
break;
- } else {
- pos++;
- if (pos >= cache->cache_node_capacity) pos = 0;
}
+
collisions++;
+ pos = next_hash(pos, multiplier, cache->cache_node_capacity, collisions);
+
if (collisions > cache->collision_allowance) {
expand_cache(cache);
- pos = hash_index_64(address, cache->cache_node_capacity);
+ pos = hash_index(address, cache->cache_node_capacity);
+ multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
collisions = 0;
}
}
// create from scratch if necessary.
if (!cache) {
descriptors->cache = cache = (remote_index_cache*)calloc((size_t)1, sizeof(remote_index_cache));
- cache->cache_node_capacity = STACK_LOGGING_REMOTE_CACHE_DEFAULT_NODE_CAPACITY;
- cache->collision_allowance = STACK_LOGGING_REMOTE_CACHE_DEFAULT_COLLISION_ALLOWANCE;
- cache->cache_node_count = cache->cache_llnode_count = 0;
+ cache->cache_node_capacity = 1 << 14;
+ cache->collision_allowance = 17;
cache->last_index_file_offset = 0;
- cache->next_block_index = 0;
- cache->current_block = 0;
- cache->blocks[0] = (index_ll_node*)malloc(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node));
- cache->in_use_node_size = (descriptors->task_is_64_bit ? sizeof(remote_index_node64) : sizeof(remote_index_node32));
- cache->cache_size = cache->cache_node_capacity*cache->in_use_node_size;
- cache->table_memory = (void*)calloc(cache->cache_node_capacity, cache->in_use_node_size);
- if (descriptors->task_is_64_bit) cache->casted_table64 = (remote_index_node64*)(cache->table_memory);
- else cache->casted_table32 = (remote_index_node32*)(cache->table_memory);
+ cache->cache_size = cache->cache_node_capacity*sizeof(remote_index_node);
+ cache->table_memory = (void*)calloc(cache->cache_node_capacity, sizeof(remote_index_node));
// now map in the shared memory, if possible
char shmem_name_string[PATH_MAX];
- strlcpy(shmem_name_string, stack_logging_directory_base_name, (size_t)PATH_MAX);
- append_int(shmem_name_string, descriptors->remote_pid);
+ strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
+ append_int(shmem_name_string, descriptors->remote_pid, (size_t)PATH_MAX);
int shmid = shm_open(shmem_name_string, O_RDWR, S_IRUSR | S_IWUSR);
if (shmid >= 0) {
}
// if a snapshot is necessary, memcpy from remote frozen process' memory
- // note: there were two ways to do this Ð spin lock or suspend. suspend allows us to
+ // note: there were two ways to do this – spin lock or suspend. suspend allows us to
// analyze processes even if they were artificially suspended. with a lock, there'd be
// worry that the target was suspended with the lock taken.
if (update_snapshot) {
memcpy(&cache->snapshot, cache->shmem, sizeof(stack_buffer_shared_memory));
+ // also need to update our version of the remote uniquing table
+ vm_address_t local_uniquing_address = 0ul;
+ mach_msg_type_number_t local_uniquing_size = 0;
+ mach_vm_size_t desired_size = round_page(sizeof(backtrace_uniquing_table));
+ kern_return_t err;
+ if ((err = mach_vm_read(descriptors->remote_task, cache->shmem->uniquing_table_address, desired_size, &local_uniquing_address, &local_uniquing_size)) != KERN_SUCCESS
+ || local_uniquing_size != desired_size) {
+ fprintf(stderr, "error while attempting to mach_vm_read remote stack uniquing table (%d): %s\n", err, mach_error_string(err));
+ } else {
+ // the mach_vm_read was successful, so acquire the uniquing table
+
+ // need to re-read the table, so deallocate the current memory
+ if (cache->uniquing_table.table) mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)(cache->uniquing_table.table), cache->uniquing_table.tableSize);
+
+ // the following line gathers the uniquing table structure data, but the actual table memory is invalid since it's a pointer from the
+ // remote process. this pointer will be mapped shared in a few lines.
+ cache->uniquing_table = *((backtrace_uniquing_table*)local_uniquing_address);
+
+ vm_address_t local_table_address = 0ul;
+ mach_msg_type_number_t local_table_size = 0;
+
+ err = mach_vm_read(descriptors->remote_task, cache->uniquing_table.table_address, cache->uniquing_table.tableSize, &local_table_address, &local_table_size);
+ if (err == KERN_SUCCESS) cache->uniquing_table.table = (mach_vm_address_t*)local_table_address;
+ else cache->uniquing_table.table = NULL;
+
+ mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)local_uniquing_address, (mach_vm_size_t)local_uniquing_size);
+ }
}
// resume
}
off_t current_index_position = cache->last_index_file_offset;
do {
- number_slots = MIN(delta_indecies - read_this_update, number_slots);
+ number_slots = (size_t)MIN(delta_indecies - read_this_update, number_slots);
read_count = fread(bufferSpace, read_size, number_slots, the_index);
if (descriptors->task_is_64_bit) {
for (i = 0; i < read_count; i++) {
- insert_node64(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)current_index_position);
+ insert_node(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)current_index_position);
read_this_update++;
current_index_position += read_size;
}
} else {
for (i = 0; i < read_count; i++) {
- insert_node32(cache, STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)current_index_position);
+ insert_node(cache, (mach_vm_address_t)STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)current_index_position);
read_this_update++;
current_index_position += read_size;
}
off_t current_index_position = cache->snapshot.start_index_offset;
if (descriptors->task_is_64_bit) {
for (i = last_snapshot_scan_index; i < free_snapshot_scan_index; i++) {
- insert_node64(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
+ insert_node(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
}
} else {
for (i = last_snapshot_scan_index; i < free_snapshot_scan_index; i++) {
- insert_node32(cache, STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
+ insert_node(cache, (mach_vm_address_t)STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
}
}
}
-
}
static void
destroy_cache_for_file_streams(remote_task_file_streams *descriptors)
{
- uint32_t i;
- for (i = 0; i <= descriptors->cache->current_block; i++) {
- free(descriptors->cache->blocks[i]); // clears the linked list nodes.
- }
if (descriptors->cache->shmem) {
munmap(descriptors->cache->shmem, sizeof(stack_buffer_shared_memory));
}
{
DIR *dp;
struct dirent *entry;
- int prefix_length;
char prefix_name[PATH_MAX];
char pathname[PATH_MAX];
reap_orphaned_log_files(false); // reap any left-over log files (for non-existant processes, but not for this analysis process)
- if ((dp = opendir(temporary_directory)) == NULL) {
+ if ((dp = opendir(_PATH_TMP)) == NULL) {
return;
}
// It's OK to use snprintf in this routine since it should only be called by the clients
// of stack logging, and thus calls to malloc are OK.
- snprintf(prefix_name, PATH_MAX, "%s%d.", stack_logging_directory_base_name, pid); // make sure to use "%s%d." rather than just "%s%d" to match the whole pid
- prefix_length = strlen(prefix_name);
+ snprintf(prefix_name, (size_t)PATH_MAX, "%s%d.", stack_log_file_base_name, pid); // make sure to use "%s%d." rather than just "%s%d" to match the whole pid
+ size_t prefix_length = strlen(prefix_name);
while ( (entry = readdir(dp)) != NULL ) {
if ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) {
- snprintf(pathname, PATH_MAX, "%s/%s/%s", temporary_directory, entry->d_name, index_file_name);
- this_task_streams->index_file_stream = fopen(pathname, "r");
-
- snprintf(pathname, PATH_MAX, "%s/%s/%s", temporary_directory, entry->d_name, stack_file_name);
- this_task_streams->stack_file_stream = fopen(pathname, "r");
-
+ snprintf(pathname, (size_t)PATH_MAX, "%s%s", _PATH_TMP, entry->d_name);
+ char reference_file[PATH_MAX];
+ if (log_file_is_reference(pathname, reference_file, (size_t)PATH_MAX)) {
+ this_task_streams->index_file_stream = fopen(reference_file, "r");
+ } else {
+ this_task_streams->index_file_stream = fopen(pathname, "r");
+ }
+
break;
}
}
static remote_task_file_streams*
retain_file_streams_for_task(task_t task)
{
+ if (task == MACH_PORT_NULL) return NULL;
+
OSSpinLockLock(&remote_fd_list_lock);
// see if they're already in use
}
}
fclose(remote_fds[next_remote_task_fd].index_file_stream);
- fclose(remote_fds[next_remote_task_fd].stack_file_stream);
destroy_cache_for_file_streams(&remote_fds[next_remote_task_fd]);
}
open_log_files(pid, this_task_streams);
// check if opens failed
- if (this_task_streams->index_file_stream == NULL || this_task_streams->stack_file_stream == NULL) {
+ if (this_task_streams->index_file_stream == NULL) {
if (this_task_streams->index_file_stream) fclose(this_task_streams->index_file_stream);
- if (this_task_streams->stack_file_stream) fclose(this_task_streams->stack_file_stream);
OSSpinLockUnlock(&remote_fd_list_lock);
return NULL;
}
update_cache_for_file_streams(remote_fd);
uint32_t collisions = 0;
+ size_t hash = hash_index(address, remote_fd->cache->cache_node_capacity);
+ size_t multiplier = hash_multiplier(remote_fd->cache->cache_node_capacity, remote_fd->cache->collision_allowance);
uint64_t located_file_position = 0;
- bool found = false;
- size_t hash = 0;
- if (remote_fd->task_is_64_bit) {
- hash = hash_index_64(address, remote_fd->cache->cache_node_capacity);
- do {
- if (remote_fd->cache->casted_table64[hash].address == address) { // hit!
- located_file_position = remote_fd->cache->casted_table64[hash].last_link->index_file_offset;
- found = true;
- break;
- } else if (remote_fd->cache->casted_table64[hash].address == 0) { // failure!
- break;
- }
- hash++;
- if (hash >= remote_fd->cache->cache_node_capacity) hash = 0;
- } while (collisions <= remote_fd->cache->collision_allowance);
- } else {
- hash = hash_index_32((uint32_t)address, remote_fd->cache->cache_node_capacity);
- do {
- if (remote_fd->cache->casted_table32[hash].address == (uint32_t)address) { // hit!
- located_file_position = remote_fd->cache->casted_table32[hash].last_link->index_file_offset;
- found = true;
- break;
- } else if (remote_fd->cache->casted_table32[hash].address == 0) { // failure!
- break;
- }
- hash++;
- if (hash >= remote_fd->cache->cache_node_capacity) hash = 0;
- } while (collisions <= remote_fd->cache->collision_allowance);
- }
+
+ bool found = false;
+ do {
+ if (remote_fd->cache->table_memory[hash].address == address) { // hit!
+ located_file_position = remote_fd->cache->table_memory[hash].index_file_offset;
+ found = true;
+ break;
+ } else if (remote_fd->cache->table_memory[hash].address == 0ull) { // failure!
+ break;
+ }
+
+ collisions++;
+ hash = next_hash(hash, multiplier, remote_fd->cache->cache_node_capacity, collisions);
+
+ } while (collisions <= remote_fd->cache->collision_allowance);
if (found) {
// prepare for the read; target process could be 32 or 64 bit.
mach_stack_logging_record_t pass_record;
kern_return_t err = KERN_SUCCESS;
- if (reading_all_addresses) { // just stupidly read the index file from disk
-
- // update (read index file once and only once)
- update_cache_for_file_streams(remote_fd);
+ // update (read index file once and only once)
+ update_cache_for_file_streams(remote_fd);
+
+ FILE *the_index = (remote_fd->index_file_stream);
- FILE *the_index = (remote_fd->index_file_stream);
-
- // prepare for the read; target process could be 32 or 64 bit.
- char bufferSpace[2048]; // 2 kb
- stack_logging_index_event32 *target_32_index = (stack_logging_index_event32*)bufferSpace;
- stack_logging_index_event64 *target_64_index = (stack_logging_index_event64*)bufferSpace;
- uint32_t target_addr_32 = (uint32_t)STACK_LOGGING_DISGUISE((uint32_t)address);
- uint64_t target_addr_64 = STACK_LOGGING_DISGUISE((uint64_t)address);
- size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
- size_t number_slots = (size_t)(2048/read_size);
- uint64_t total_slots = remote_fd->cache->last_index_file_offset / read_size;
+ // prepare for the read; target process could be 32 or 64 bit.
+ char bufferSpace[2048]; // 2 kb
+ stack_logging_index_event32 *target_32_index = (stack_logging_index_event32*)bufferSpace;
+ stack_logging_index_event64 *target_64_index = (stack_logging_index_event64*)bufferSpace;
+ uint32_t target_addr_32 = (uint32_t)STACK_LOGGING_DISGUISE((uint32_t)address);
+ uint64_t target_addr_64 = STACK_LOGGING_DISGUISE((uint64_t)address);
+ size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
+ size_t number_slots = (size_t)(2048/read_size);
+ uint64_t total_slots = remote_fd->cache->last_index_file_offset / read_size;
+
+ // perform the search
+ size_t read_count = 0;
+ int64_t current_file_offset = 0;
+ uint32_t i;
+ do {
+ // at this point, we need to read index events; read them from the file until it's necessary to grab them from the shared memory snapshot
+ // and crop file reading to the point where we last scanned
+ number_slots = (size_t)MIN(number_slots, total_slots);
- // perform the search
- size_t read_count = 0;
- int64_t current_file_offset = 0;
- uint32_t i;
- do {
- // at this point, we need to read index events; read them from the file until it's necessary to grab them from the shared memory snapshot
- // and crop file reading to the point where we last scanned
- number_slots = (size_t)MIN(number_slots, total_slots);
-
- // if out of file to read (as of the time we entered this function), try to use shared memory snapshot
- if (number_slots == 0) {
- if (remote_fd->cache->shmem && remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset > (uint64_t)current_file_offset) {
- // use shared memory
- target_32_index = (stack_logging_index_event32*)remote_fd->cache->snapshot.index_buffer;
- target_64_index = (stack_logging_index_event64*)remote_fd->cache->snapshot.index_buffer;
- read_count = (uint32_t)(remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset - current_file_offset) / read_size;
- current_file_offset += read_count * read_size;
- } else {
- break;
- }
+ // if out of file to read (as of the time we entered this function), try to use shared memory snapshot
+ if (number_slots == 0) {
+ if (remote_fd->cache->shmem && remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset > (uint64_t)current_file_offset) {
+ // use shared memory
+ target_32_index = (stack_logging_index_event32*)remote_fd->cache->snapshot.index_buffer;
+ target_64_index = (stack_logging_index_event64*)remote_fd->cache->snapshot.index_buffer;
+ read_count = (uint32_t)(remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset - current_file_offset) / read_size;
+ current_file_offset += read_count * read_size;
} else {
- // get and save index (enumerator could modify)
- fseeko(the_index, current_file_offset, SEEK_SET);
- read_count = fread(bufferSpace, read_size, number_slots, the_index);
- current_file_offset = ftello(the_index);
- total_slots -= read_count;
- }
-
- if (remote_fd->task_is_64_bit) {
- for (i = 0; i < read_count; i++) {
- if (reading_all_addresses || target_64_index[i].address == target_addr_64) {
- pass_record.address = STACK_LOGGING_DISGUISE(target_64_index[i].address);
- pass_record.argument = target_64_index[i].argument;
- pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_64_index[i].offset_and_flags);
- pass_record.type_flags = STACK_LOGGING_FLAGS(target_64_index[i].offset_and_flags);
- enumerator(pass_record, context);
- }
- }
- } else {
- for (i = 0; i < read_count; i++) {
- if (reading_all_addresses || target_32_index[i].address == target_addr_32) {
- pass_record.address = STACK_LOGGING_DISGUISE(target_32_index[i].address);
- pass_record.argument = target_32_index[i].argument;
- pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_32_index[i].offset_and_flags);
- pass_record.type_flags = STACK_LOGGING_FLAGS(target_32_index[i].offset_and_flags);
- enumerator(pass_record, context);
- }
- }
+ break;
}
- } while (read_count);
-
- } else { // searching for a single address' history
-
- // update (read index file once and only once)
- update_cache_for_file_streams(remote_fd);
-
- // get linked-list of events
- uint32_t collisions = 0;
- uint64_t located_file_position = 0;
- size_t hash = 0;
- index_ll_node *index_position_linked_list = NULL;
- if (remote_fd->task_is_64_bit) {
- hash = hash_index_64(address, remote_fd->cache->cache_node_capacity);
- do {
- if (remote_fd->cache->casted_table64[hash].address == address) { // hit!
- index_position_linked_list = remote_fd->cache->casted_table64[hash].linked_list;
- break;
- } else if (remote_fd->cache->casted_table64[hash].address == 0) { // failure!
- break;
- }
- hash++;
- if (hash >= remote_fd->cache->cache_node_capacity) hash = 0;
- } while (collisions <= remote_fd->cache->collision_allowance);
} else {
- hash = hash_index_32((uint32_t)address, remote_fd->cache->cache_node_capacity);
- do {
- if (remote_fd->cache->casted_table32[hash].address == (uint32_t)address) { // hit!
- index_position_linked_list = remote_fd->cache->casted_table32[hash].linked_list;
- break;
- } else if (remote_fd->cache->casted_table32[hash].address == 0) { // failure!
- break;
- }
- hash++;
- if (hash >= remote_fd->cache->cache_node_capacity) hash = 0;
- } while (collisions <= remote_fd->cache->collision_allowance);
+ // get and save index (enumerator could modify)
+ fseeko(the_index, current_file_offset, SEEK_SET);
+ read_count = fread(bufferSpace, read_size, number_slots, the_index);
+ current_file_offset = ftello(the_index);
+ total_slots -= read_count;
}
- // if we got something, run it
- char bufferSpace[128];
- size_t read_count = 0;
- stack_logging_index_event32 *target_32_index = (stack_logging_index_event32*)bufferSpace;
- stack_logging_index_event64 *target_64_index = (stack_logging_index_event64*)bufferSpace;
- size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
- while (index_position_linked_list) {
- located_file_position = index_position_linked_list->index_file_offset;
-
- if (located_file_position >= remote_fd->cache->snapshot.start_index_offset) {
- if (remote_fd->cache->shmem && located_file_position >= remote_fd->cache->snapshot.start_index_offset && remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset > (uint64_t)located_file_position) {
- // use shared memory
- target_32_index = (stack_logging_index_event32*)(remote_fd->cache->snapshot.index_buffer + located_file_position - remote_fd->cache->snapshot.start_index_offset);
- target_64_index = (stack_logging_index_event64*)target_32_index;
- read_count = 1;
- } else {
- err = KERN_FAILURE;
- break;
- }
- } else {
- fseeko(remote_fd->index_file_stream, (off_t)located_file_position, SEEK_SET);
- read_count = fread(bufferSpace, read_size, (size_t)1, remote_fd->index_file_stream);
- if (!read_count) {
- err = KERN_FAILURE;
- break;
+ if (remote_fd->task_is_64_bit) {
+ for (i = 0; i < read_count; i++) {
+ if (reading_all_addresses || target_64_index[i].address == target_addr_64) {
+ pass_record.address = STACK_LOGGING_DISGUISE(target_64_index[i].address);
+ pass_record.argument = target_64_index[i].argument;
+ pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_64_index[i].offset_and_flags);
+ pass_record.type_flags = STACK_LOGGING_FLAGS(target_64_index[i].offset_and_flags);
+ enumerator(pass_record, context);
}
}
- if (remote_fd->task_is_64_bit) {
- pass_record.address = STACK_LOGGING_DISGUISE(target_64_index[0].address);
- pass_record.argument = target_64_index[0].argument;
- pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_64_index[0].offset_and_flags);
- pass_record.type_flags = STACK_LOGGING_FLAGS(target_64_index[0].offset_and_flags);
- enumerator(pass_record, context);
- } else {
- pass_record.address = STACK_LOGGING_DISGUISE(target_32_index[0].address);
- pass_record.argument = target_32_index[0].argument;
- pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_32_index[0].offset_and_flags);
- pass_record.type_flags = STACK_LOGGING_FLAGS(target_32_index[0].offset_and_flags);
- enumerator(pass_record, context);
+ } else {
+ for (i = 0; i < read_count; i++) {
+ if (reading_all_addresses || target_32_index[i].address == target_addr_32) {
+ pass_record.address = STACK_LOGGING_DISGUISE(target_32_index[i].address);
+ pass_record.argument = target_32_index[i].argument;
+ pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_32_index[i].offset_and_flags);
+ pass_record.type_flags = STACK_LOGGING_FLAGS(target_32_index[i].offset_and_flags);
+ enumerator(pass_record, context);
+ }
}
- index_position_linked_list = index_position_linked_list->next;
}
-
- }
-
+ } while (read_count);
+
release_file_streams_for_task(task);
return err;
}
__mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count)
{
remote_task_file_streams *remote_fd = retain_file_streams_for_task(task);
- if (remote_fd == NULL) {
- return KERN_FAILURE;
- }
-
- // prepare for initial read
- FILE *stack_fd;
- stack_fd = (remote_fd->stack_file_stream);
- char bytes_buffer[16];
- stack_logging_backtrace_event *target_stack_event = (stack_logging_backtrace_event*)bytes_buffer;
- size_t read_size = sizeof(stack_logging_backtrace_event);
- size_t read_count = 0;
- off_t reading_offset = (off_t)stack_identifier;
-
- // get a temporary spot for the backtrace frames to go and reference the stack space such that the reference
- // can be later pointed at the shared memory snapshot and data read from there.
- uint64_t temp_frames_buffer[STACK_LOGGING_MAX_STACK_SIZE];
- uint64_t *big_frames = (uint64_t*)temp_frames_buffer;
- uint32_t *small_frames = (uint32_t*)temp_frames_buffer;
- size_t target_frame_size = (remote_fd->task_is_64_bit ? sizeof(uint64_t) : sizeof(uint32_t));
- char *snapshot_backtrace_location = NULL;
-
- int done = 0;
- int32_t total_frames = -1;
- int32_t hot_frames_read = 0;
- size_t new_hot_frames = 0;
- int32_t number_needed_hot_frames_in_event;
- size_t number_hot_frames_to_skip;
- int32_t i;
- bool skip_file_read;
-
- while (!done) {
-
- // not in cache; read record Ð from disk if possible, shared memory snapshot if necessary.
- if (remote_fd->cache->shmem && reading_offset >= (off_t)(remote_fd->cache->snapshot.start_stack_offset)) {
- // must read from shared memory; the record isn't on disk yet
- snapshot_backtrace_location = (remote_fd->cache->snapshot.stack_buffer + (reading_offset - remote_fd->cache->snapshot.start_stack_offset));
- *target_stack_event = *(stack_logging_backtrace_event*)snapshot_backtrace_location;
- big_frames = (uint64_t*)(snapshot_backtrace_location + sizeof(stack_logging_backtrace_event));
- small_frames = (uint32_t*)big_frames;
- skip_file_read = true;
- } else {
- // the record's on disk
- i = fseeko(stack_fd, reading_offset, SEEK_SET);
- if (i != 0) break; // unable to seek to the target position
- read_count = fread(target_stack_event, read_size, (size_t)1, stack_fd);
- if (read_count == 0) break;
-
- big_frames = (uint64_t*)temp_frames_buffer;
- small_frames = (uint32_t*)temp_frames_buffer;
- skip_file_read = false;
- }
-
- if (total_frames < 0) {
- total_frames = target_stack_event->num_new_hot_frames + target_stack_event->num_identical_frames;
- if (total_frames > (int32_t)max_stack_frames) break; // don't know what to do with this; we'll just KERN_FAILURE.
- }
-
- // do the math to find how many frames to apply from previous event
- new_hot_frames = target_stack_event->num_new_hot_frames;
- number_needed_hot_frames_in_event = total_frames - hot_frames_read - target_stack_event->num_identical_frames;
- number_hot_frames_to_skip = new_hot_frames - number_needed_hot_frames_in_event;
-
- // read and apply the important frames of this one
- if (number_needed_hot_frames_in_event > 0) {
- if (!skip_file_read) {
- read_count = fread(temp_frames_buffer, target_frame_size, new_hot_frames, stack_fd);
- if (read_count < new_hot_frames) break;
- }
-
- if (remote_fd->task_is_64_bit) {
- for (i = 0; i < number_needed_hot_frames_in_event; i++) {
- stack_frames_buffer[hot_frames_read++] = big_frames[i+number_hot_frames_to_skip];
- }
- } else {
- for (i = 0; i < number_needed_hot_frames_in_event; i++) {
- stack_frames_buffer[hot_frames_read++] = small_frames[i+number_hot_frames_to_skip];
- }
- }
- }
-
- reading_offset += target_stack_event->offset_delta;
-
- if (hot_frames_read == total_frames) done = 1;
- else if (target_stack_event->offset_delta == 0) {
- fprintf(stderr, "incomplete stack record (identifier: 0x%qx)\n", reading_offset);
- break;
- }
- }
+ if (remote_fd == NULL) return KERN_FAILURE;
+
+ __unwind_stack_from_table_index(&remote_fd->cache->uniquing_table, stack_identifier, stack_frames_buffer, count, max_stack_frames);
release_file_streams_for_task(task);
- if (done) {
- *count = hot_frames_read;
- return KERN_SUCCESS;
- } else {
- return KERN_FAILURE;
- }
+ if (*count) return KERN_SUCCESS;
+ else return KERN_FAILURE;
}
#include <sys/wait.h>
+int
main()
{
int status;
int i;
+ size_t total_globals = 0ul;
fprintf(stderr, "master test process is %d\n", getpid());
- fprintf(stderr, "sizeof stack_buffer: %d\n", sizeof(stack_buffer));
- fprintf(stderr, "sizeof thread_buffer: %d\n", sizeof(thread_buffer));
- fprintf(stderr, "sizeof stack_logs_directory: %d\n", sizeof(stack_logs_directory));
- fprintf(stderr, "sizeof remote_fds: %d\n", sizeof(remote_fds));
- fprintf(stderr, "address of pre_write_backtrace_event_buffer: %p\n", &pre_write_backtrace_event_buffer);
- fprintf(stderr, "address of logging_use_compaction: %p\n", &logging_use_compaction);
- // fprintf(stderr, "size of all global data: %d\n", (logging_use_compaction) - (pre_write_backtrace_event_buffer) + sizeof(logging_use_compaction));
-
- create_log_files();
+ fprintf(stderr, "sizeof pre_write_buffers: %lu\n", sizeof(pre_write_buffers)); total_globals += sizeof(pre_write_buffers);
+ fprintf(stderr, "sizeof stack_buffer: %lu\n", sizeof(stack_buffer)); total_globals += sizeof(stack_buffer);
+ fprintf(stderr, "sizeof last_logged_malloc_address: %lu\n", sizeof(last_logged_malloc_address)); total_globals += sizeof(last_logged_malloc_address);
+ fprintf(stderr, "sizeof stack_log_file_base_name: %lu\n", sizeof(stack_log_file_base_name)); total_globals += sizeof(stack_log_file_base_name);
+ fprintf(stderr, "sizeof stack_log_file_suffix: %lu\n", sizeof(stack_log_file_suffix)); total_globals += sizeof(stack_log_file_suffix);
+ fprintf(stderr, "sizeof stack_log_link_suffix: %lu\n", sizeof(stack_log_link_suffix)); total_globals += sizeof(stack_log_link_suffix);
+ fprintf(stderr, "sizeof stack_log_location: %lu\n", sizeof(stack_log_location)); total_globals += sizeof(stack_log_location);
+ fprintf(stderr, "sizeof stack_log_reference_file: %lu\n", sizeof(stack_log_reference_file)); total_globals += sizeof(stack_log_reference_file);
+ fprintf(stderr, "sizeof index_file_path: %lu\n", sizeof(index_file_path)); total_globals += sizeof(index_file_path);
+ fprintf(stderr, "sizeof index_file_descriptor: %lu\n", sizeof(index_file_descriptor)); total_globals += sizeof(index_file_descriptor);
+ fprintf(stderr, "sizeof remote_fds: %lu\n", sizeof(remote_fds)); total_globals += sizeof(remote_fds);
+ fprintf(stderr, "sizeof next_remote_task_fd: %lu\n", sizeof(next_remote_task_fd)); total_globals += sizeof(next_remote_task_fd);
+ fprintf(stderr, "sizeof remote_task_fd_count: %lu\n", sizeof(remote_task_fd_count)); total_globals += sizeof(remote_task_fd_count);
+ fprintf(stderr, "sizeof remote_fd_list_lock: %lu\n", sizeof(remote_fd_list_lock)); total_globals += sizeof(remote_fd_list_lock);
+ fprintf(stderr, "sizeof logging_use_compaction: %lu\n", sizeof(logging_use_compaction)); total_globals += sizeof(logging_use_compaction);
+
+ fprintf(stderr, "size of all global data: %lu\n", total_globals);
+
+ create_log_file();
// create a few child processes and exit them cleanly so their logs should get cleaned up
fprintf(stderr, "\ncreating child processes and exiting cleanly\n");
for (i = 0; i < 3; i++) {
if (fork() == 0) {
fprintf(stderr, "\nin child processes %d\n", getpid());
- create_log_files();
+ create_log_file();
fprintf(stderr, "exiting child processes %d\n", getpid());
exit(1);
}
for (i = 0; i < 3; i++) {
if (fork() == 0) {
fprintf(stderr, "\nin child processes %d\n", getpid());
- create_log_files();
+ create_log_file();
fprintf(stderr, "exiting child processes %d\n", getpid());
_exit(1);
}
// this should reap any remaining logs
fprintf(stderr, "\nexiting master test process %d\n", getpid());
delete_log_files();
+ return 0;
}
#endif
.\" @(#)sysctl.3 8.4 (Berkeley) 5/9/95
.\" $FreeBSD: src/lib/libc/gen/sysctl.3,v 1.63 2004/07/02 23:52:10 ru Exp $
.\"
-.Dd January 23, 2001
+.Dd October 21, 2008
.Dt SYSCTL 3
.Os
.Sh NAME
.It "HW_NCPU integer no"
.It "HW_BYTEORDER integer no"
.It "HW_PHYSMEM integer no"
+.It "HW_MEMSIZE integer no"
.It "HW_USERMEM integer no"
.It "HW_PAGESIZE integer no"
.It "HW_FLOATINGPOINT integer no"
.It Li HW_BYTEORDER
The byteorder (4,321, or 1,234).
.It Li HW_PHYSMEM
-The bytes of physical memory.
+The bytes of physical memory represented by a 32-bit integer (for backward compatibility). Use HW_MEMSIZE instead.
+.It Li HW_MEMSIZE
+The bytes of physical memory represented by a 64-bit integer.
.It Li HW_USERMEM
The bytes of non-kernel memory.
.It Li HW_PAGESIZE
.Bl -column "Second level nameXXXXXX" "struct loadavgXXX" -offset indent
.It Sy "Second level name Type Changeable"
.It "VM_LOADAVG struct loadavg no"
-.It "VM_METER struct vmtotal no"
.It "VM_PAGEOUT_ALGORITHM integer yes"
.It "VM_SWAPPING_ENABLED integer maybe"
.It "VM_V_CACHE_MAX integer yes"
Return the load average history.
The returned data consists of a
.Va struct loadavg .
-.It Li VM_METER
-Return the system wide virtual memory statistics.
-The returned data consists of a
-.Va struct vmtotal .
.It Li VM_PAGEOUT_ALGORITHM
0 if the statistics-based page management algorithm is in use
or 1 if the near-LRU algorithm is in use.
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999-2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights
+ * Reserved. This file contains Original Code and/or Modifications of
+ * Original Code as defined in and that are subject to the Apple Public
+ * Source License Version 1.0 (the 'License'). You may not use this file
+ * except in compliance with the License. Please obtain a copy of the
+ * License at http://www.apple.com/publicsource and read it before using
+ * this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
+ * License for the specific language governing rights and limitations
+ * under the License."
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <sys/uio.h>
#include <sys/un.h>
#include <netdb.h>
-
+#include <mach/mach.h>
+#include <servers/bootstrap.h>
#include <errno.h>
#include <fcntl.h>
#include <paths.h>
#include <notify.h>
#include <asl.h>
#include <asl_private.h>
+#include <asl_ipc.h>
#ifdef __STDC__
#include <stdarg.h>
#define INTERNALLOG LOG_ERR|LOG_CONS|LOG_PERROR|LOG_PID
#ifdef BUILDING_VARIANT
-__private_extern__ int _sl_LogFile; /* fd for log */
-__private_extern__ int _sl_connected; /* have done connect */
__private_extern__ int _sl_LogStat; /* status bits, set by openlog() */
__private_extern__ const char *_sl_LogTag; /* string to tag the entry with */
__private_extern__ int _sl_LogFacility; /* default facility code */
-__private_extern__ int _sl_LogMask; /* mask of priorities to be logged */
+__private_extern__ int _sl_LogMask; /* local mask of priorities to be logged */
+__private_extern__ int _sl_MasterLogMask; /* master (remote control) mask of priorities to be logged */
+__private_extern__ int _sl_ProcLogMask; /* process-specific (remote control) mask of priorities to be logged */
+__private_extern__ int _sl_RCToken; /* for remote control change notification */
__private_extern__ int _sl_NotifyToken; /* for remote control of priority filter */
__private_extern__ int _sl_NotifyMaster; /* for remote control of priority filter */
+__private_extern__ int _sl_pid; /* pid */
#else /* !BUILDING_VARIANT */
-__private_extern__ int _sl_LogFile = -1; /* fd for log */
-__private_extern__ int _sl_connected = 0; /* have done connect */
-__private_extern__ int _sl_LogStat = 0; /* status bits, set by openlog() */
+__private_extern__ int _sl_LogStat = 0; /* status bits, set by openlog() */
__private_extern__ const char *_sl_LogTag = NULL; /* string to tag the entry with */
__private_extern__ int _sl_LogFacility = LOG_USER; /* default facility code */
-__private_extern__ int _sl_LogMask = 0xff; /* mask of priorities to be logged */
-__private_extern__ int _sl_NotifyToken = -1; /* for remote control of max logged priority */
-__private_extern__ int _sl_NotifyMaster = -1; /* for remote control of max logged priority */
+__private_extern__ int _sl_LogMask = 0xff; /* mask of priorities to be logged */
+__private_extern__ int _sl_MasterLogMask = 0; /* master mask of priorities to be logged */
+__private_extern__ int _sl_ProcLogMask = 0; /* process-specific mask of priorities to be logged */
+__private_extern__ int _sl_RCToken = -1; /* for remote control change notification */
+__private_extern__ int _sl_NotifyToken = -1; /* for remote control of max logged priority */
+__private_extern__ int _sl_NotifyMaster = -1; /* for remote control of max logged priority */
+__private_extern__ int _sl_pid = -1; /* pid */
#endif /* BUILDING_VARIANT */
__private_extern__ void _sl_init_notify();
+#define ASL_SERVICE_NAME "com.apple.system.logger"
+static mach_port_t asl_server_port = MACH_PORT_NULL;
+
#define NOTIFY_SYSTEM_MASTER "com.apple.system.syslog.master"
#define NOTIFY_PREFIX_SYSTEM "com.apple.system.syslog"
#define NOTIFY_PREFIX_USER "user.syslog"
void
vsyslog(int pri, const char *fmt, va_list ap)
{
- int status, i, saved_errno, filter, rc_filter;
+ int status, i, saved_errno, filter, check, rc_filter;
time_t tick;
struct timeval tval;
- pid_t pid;
- uint32_t elen, count;
+ uint32_t elen, count, outlen;
char *p, *str, *expanded, *err_str, hname[MAXHOSTNAMELEN+1];
+ const char *val;
uint64_t cval;
int fd, mask, level, facility;
aslmsg msg;
+ kern_return_t kstatus;
+ caddr_t out;
saved_errno = errno;
+ if (_sl_pid == -1) _sl_pid = getpid();
+
/* Check for invalid bits. */
if (pri & ~(LOG_PRIMASK | LOG_FACMASK))
{
if (facility == 0) facility = _sl_LogFacility;
- /* Get remote-control priority filter */
- filter = _sl_LogMask;
- rc_filter = 0;
-
_sl_init_notify();
- if (_sl_NotifyToken >= 0)
+ /* initialize or re-check process-specific and master filters */
+ if (_sl_RCToken >= 0)
{
- if (notify_get_state(_sl_NotifyToken, &cval) == NOTIFY_STATUS_OK)
+ check = 0;
+ status = notify_check(_sl_RCToken, &check);
+ if ((status == NOTIFY_STATUS_OK) && (check != 0))
{
- if (cval != 0)
+ if (_sl_NotifyMaster >= 0)
{
- filter = cval;
- rc_filter = 1;
+ cval = 0;
+ if (notify_get_state(_sl_NotifyMaster, &cval) == NOTIFY_STATUS_OK) _sl_MasterLogMask = cval;
}
- }
- }
- if ((rc_filter == 0) && (_sl_NotifyMaster >= 0))
- {
- if (notify_get_state(_sl_NotifyMaster, &cval) == NOTIFY_STATUS_OK)
- {
- if (cval != 0)
+ if (_sl_NotifyToken >= 0)
{
- filter = cval;
+ cval = 0;
+ if (notify_get_state(_sl_NotifyToken, &cval) == NOTIFY_STATUS_OK) _sl_ProcLogMask = cval;
}
}
}
+ filter = _sl_LogMask;
+ rc_filter = 0;
+
+ /* master filter overrides local filter */
+ if (_sl_MasterLogMask != 0)
+ {
+ filter = _sl_MasterLogMask;
+ rc_filter = 1;
+ }
+
+ /* process-specific filter overrides local and master */
+ if (_sl_ProcLogMask != 0)
+ {
+ filter = _sl_ProcLogMask;
+ rc_filter = 1;
+ }
+
mask = LOG_MASK(level);
if ((mask & filter) == 0) return;
free(str);
}
}
-
- pid = getpid();
+
str = NULL;
- asprintf(&str, "%u", pid);
+ asprintf(&str, "%u", _sl_pid);
if (str != NULL)
{
asl_set(msg, ASL_KEY_PID, str);
if (_sl_LogStat & LOG_PERROR)
{
p = NULL;
- if (_sl_LogStat & LOG_PID) asprintf(&p, "%s[%u]: %s", (_sl_LogTag == NULL) ? "???" : _sl_LogTag, pid, str);
+ if (_sl_LogStat & LOG_PID) asprintf(&p, "%s[%u]: %s", (_sl_LogTag == NULL) ? "???" : _sl_LogTag, _sl_pid, str);
else asprintf(&p, "%s: %s", (_sl_LogTag == NULL) ? "???" : _sl_LogTag, str);
if (p != NULL)
free(str);
}
- /* Get connected, output the message to the local logger. */
+ /* Set "ASLOption store" if remote control is active */
+ if (rc_filter != 0)
+ {
+ val = asl_get(msg, ASL_KEY_OPTION);
+ if (val == NULL)
+ {
+ asl_set(msg, ASL_KEY_OPTION, ASL_OPT_STORE);
+ }
+ else
+ {
+ str = NULL;
+ asprintf(&str, "%s %s", ASL_OPT_STORE, val);
+ if (str != NULL)
+ {
+ asl_set(msg, ASL_KEY_OPTION, str);
+ free(str);
+ str = NULL;
+ }
+ }
+ }
+
+ /* send a mach message to syslogd */
str = asl_format_message(msg, ASL_MSG_FMT_RAW, ASL_TIME_FMT_SEC, ASL_ENCODE_ASL, &count);
if (str != NULL)
{
- p = NULL;
- asprintf(&p, "%10u %s", count, str);
- free(str);
-
- if (p != NULL)
+ outlen = count + 11;
+ kstatus = vm_allocate(mach_task_self(), (vm_address_t *)&out, outlen + 1, TRUE);
+ if (kstatus == KERN_SUCCESS)
{
- count += 12;
- if (_sl_connected == 0) openlog(_sl_LogTag, _sl_LogStat | LOG_NDELAY, 0);
+ memset(out, 0, outlen + 1);
+ snprintf((char *)out, outlen, "%10u %s", count, str);
- status = send(_sl_LogFile, p, count, 0);
- if (status< 0)
- {
- closelog();
- openlog(_sl_LogTag, _sl_LogStat | LOG_NDELAY, 0);
- status = send(_sl_LogFile, p, count, 0);
- }
+ status = 0;
+ if (asl_server_port == MACH_PORT_NULL) kstatus = bootstrap_look_up(bootstrap_port, ASL_SERVICE_NAME, &asl_server_port);
+
+ if (kstatus == KERN_SUCCESS) kstatus = _asl_server_message(asl_server_port, (caddr_t)out, outlen + 1);
+ else vm_deallocate(mach_task_self(), (vm_address_t)out, outlen + 1);
- if (status >= 0)
+ if (kstatus == KERN_SUCCESS)
{
- free(p);
+ free(str);
asl_free(msg);
return;
}
-
- free(p);
}
+
+ free(str);
}
/*
iov.iov_len = count - 1;
iov.iov_base = p;
writev(fd, &iov, 1);
-
+
free(p);
}
#ifndef BUILDING_VARIANT
-static struct sockaddr_un SyslogAddr; /* AF_UNIX address of local logger */
+__private_extern__ void
+_syslog_fork_child()
+{
+ _sl_RCToken = -1;
+ _sl_NotifyToken = -1;
+ _sl_NotifyMaster = -1;
+
+ asl_server_port = MACH_PORT_NULL;
+
+ _sl_pid = getpid();
+}
__private_extern__ void
_sl_init_notify()
{
int status;
char *notify_name;
- const char *prefix;
+ uint32_t euid;
if (_sl_LogStat & LOG_NO_NOTIFY)
{
+ _sl_RCToken = -2;
_sl_NotifyMaster = -2;
_sl_NotifyToken = -2;
return;
}
+ if (_sl_RCToken == -1)
+ {
+ status = notify_register_check(NOTIFY_RC, &_sl_RCToken);
+ if (status != NOTIFY_STATUS_OK) _sl_RCToken = -2;
+ }
+
if (_sl_NotifyMaster == -1)
{
status = notify_register_plain(NOTIFY_SYSTEM_MASTER, &_sl_NotifyMaster);
{
_sl_NotifyToken = -2;
+ euid = geteuid();
notify_name = NULL;
- prefix = NOTIFY_PREFIX_USER;
- if (getuid() == 0) prefix = NOTIFY_PREFIX_SYSTEM;
- asprintf(¬ify_name, "%s.%d", prefix, getpid());
+ if (euid == 0) asprintf(¬ify_name, "%s.%d", NOTIFY_PREFIX_SYSTEM, getpid());
+ else asprintf(¬ify_name, "user.uid.%d.syslog.%d", euid, getpid());
if (notify_name != NULL)
{
}
void
-openlog(ident, logstat, logfac)
- const char *ident;
- int logstat, logfac;
+openlog(const char *ident, int logstat, int logfac)
{
+ kern_return_t kstatus;
+
if (ident != NULL) _sl_LogTag = ident;
_sl_LogStat = logstat;
if (logfac != 0 && (logfac &~ LOG_FACMASK) == 0) _sl_LogFacility = logfac;
- if (_sl_LogFile == -1)
- {
- SyslogAddr.sun_family = AF_UNIX;
- (void)strncpy(SyslogAddr.sun_path, _PATH_LOG, sizeof(SyslogAddr.sun_path));
- if (_sl_LogStat & LOG_NDELAY)
- {
- if ((_sl_LogFile = socket(AF_UNIX, SOCK_DGRAM, 0)) == -1) return;
- (void)fcntl(_sl_LogFile, F_SETFD, 1);
- }
- }
-
- if ((_sl_LogFile != -1) && (_sl_connected == 0))
+ if (asl_server_port == MACH_PORT_NULL)
{
- if (connect(_sl_LogFile, (struct sockaddr *)&SyslogAddr, sizeof(SyslogAddr)) == -1)
- {
- (void)close(_sl_LogFile);
- _sl_LogFile = -1;
- }
- else
- {
- _sl_connected = 1;
- }
+ kstatus = bootstrap_look_up(bootstrap_port, ASL_SERVICE_NAME, &asl_server_port);
}
+ _sl_pid = getpid();
_sl_init_notify();
}
void
closelog()
{
- if (_sl_LogFile >= 0) {
- (void)close(_sl_LogFile);
- _sl_LogFile = -1;
- }
- _sl_connected = 0;
+ if (asl_server_port != MACH_PORT_NULL) mach_port_deallocate(mach_task_self(), asl_server_port);
+ asl_server_port = MACH_PORT_NULL;
+
+ if (_sl_NotifyToken != -1) notify_cancel(_sl_NotifyToken);
+ _sl_NotifyToken = -1;
+
+ if (_sl_NotifyMaster != -1) notify_cancel(_sl_NotifyMaster);
+ _sl_NotifyMaster = -1;
}
/* setlogmask -- set the log mask level */
int
-setlogmask(pmask)
- int pmask;
+setlogmask(int pmask)
{
int omask;
static pthread_mutex_t ttyname_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_key_t ttyname_key;
static int ttyname_init = 0;
-extern int __pthread_tsd_first;
char *
ttyname(int fd)
_pthread_mutex_lock(&ttyname_lock);
if (ttyname_init == 0) {
/* __PTK_LIBC_TTYNAME_KEY */
- ttyname_key = __pthread_tsd_first+1;
+ ttyname_key = __LIBC_PTHREAD_KEY_TTYNAME;
if (pthread_key_init_np(ttyname_key, free)) {
int save = errno;
_pthread_mutex_unlock(&ttyname_lock);
*
* @APPLE_LICENSE_HEADER_END@
*/
+#include <pthread.h>
#ifdef UTMP_COMPAT
#define UTMP_COMPAT_UTMP0 0x01
#define LASTLOG_FACILITY "com.apple.system.lastlog"
#define UTMPX_FACILITY "com.apple.system.utmpx"
+#define UTMPX_LOCK if (__is_threaded) pthread_mutex_lock(&utmpx_mutex)
+#define UTMPX_UNLOCK if (__is_threaded) pthread_mutex_unlock(&utmpx_mutex)
+
extern int utfile_system; /* are we using _PATH_UTMPX? */
+extern int __is_threaded;
+extern pthread_mutex_t utmpx_mutex;
#ifdef __LP64__
#define __need_struct_timeval32
};
#endif /* __LP64__ */
+void _endutxent(void);
+void _setutxent(void);
struct utmpx *_pututxline(const struct utmpx *);
#ifdef __LP64__
void _utmpx32_64(const struct utmpx32 *, struct utmpx *);
static struct utmpx ut;
static char utfile[MAXPATHLEN] = _PATH_UTMPX;
__private_extern__ int utfile_system = 1; /* are we using _PATH_UTMPX? */
+__private_extern__ pthread_mutex_t utmpx_mutex = PTHREAD_MUTEX_INITIALIZER;
static struct utmpx *_getutxid(const struct utmpx *);
__private_extern__ const char _utmpx_vers[] = "utmpx-1.00";
-void
-setutxent()
+__private_extern__ void
+_setutxent()
{
(void)memset(&ut, 0, sizeof(ut));
void
-endutxent()
+setutxent()
+{
+ UTMPX_LOCK;
+ _setutxent();
+ UTMPX_UNLOCK;
+}
+
+
+__private_extern__ void
+_endutxent()
{
(void)memset(&ut, 0, sizeof(ut));
}
-struct utmpx *
-getutxent()
+void
+endutxent()
+{
+ UTMPX_LOCK;
+ _endutxent();
+ UTMPX_UNLOCK;
+}
+
+
+static struct utmpx *
+_getutxent()
{
#ifdef __LP64__
struct utmpx32 ut32;
else
readonly = 1;
}
-
+
+ fcntl(fileno(fp), F_SETFD, 1); /* set close-on-exec flag */
/* get file size in order to check if new file */
if (fstat(fileno(fp), &st) == -1)
return NULL;
}
+
+struct utmpx *
+getutxent()
+{
+ struct utmpx *ret;
+ UTMPX_LOCK;
+ ret = _getutxent();
+ UTMPX_UNLOCK;
+ return ret;
+}
+
struct utmpx *
getutxid(const struct utmpx *utx)
{
struct utmpx temp;
const struct utmpx *ux;
+ struct utmpx *ret;
_DIAGASSERT(utx != NULL);
if (utx->ut_type == EMPTY)
return NULL;
+ UTMPX_LOCK;
/* make a copy as needed, and auto-fill if requested */
ux = _utmpx_working_copy(utx, &temp, 1);
- if (!ux)
+ if (!ux) {
+ UTMPX_UNLOCK;
return NULL;
+ }
- return _getutxid(ux);
+ ret = _getutxid(ux);
+ UTMPX_UNLOCK;
+ return ret;
}
default:
return NULL;
}
- } while (getutxent() != NULL);
+ } while (_getutxent() != NULL);
return NULL;
}
_DIAGASSERT(utx != NULL);
+ UTMPX_LOCK;
do {
switch (ut.ut_type) {
case EMPTY:
case LOGIN_PROCESS:
case USER_PROCESS:
if (strncmp(ut.ut_line, utx->ut_line,
- sizeof(ut.ut_line)) == 0)
+ sizeof(ut.ut_line)) == 0) {
+ UTMPX_UNLOCK;
return &ut;
+ }
break;
default:
break;
}
- } while (getutxent() != NULL);
+ } while (_getutxent() != NULL);
+ UTMPX_UNLOCK;
return NULL;
}
return NULL;
}
+ UTMPX_LOCK;
if ((ux = _pututxline(utx)) != NULL && utfile_system) {
_utmpx_asl(ux); /* the equivalent of wtmpx and lastlogx */
#ifdef UTMP_COMPAT
_write_utmp_compat(ux);
#endif /* UTMP_COMPAT */
}
+ UTMPX_UNLOCK;
return ux;
}
#ifdef __LP64__
struct utmpx32 ut32;
#endif /* __LP64__ */
- int gotlock = 0;
+ struct flock fl;
+#define gotlock (fl.l_start >= 0)
+ fl.l_start = -1; /* also means we haven't locked */
if (utfile_system)
if ((fp != NULL && readonly) || (fp == NULL && geteuid() != 0)) {
errno = EPERM;
}
if (fp == NULL) {
- (void)getutxent();
+ (void)_getutxent();
if (fp == NULL || readonly) {
errno = EPERM;
return NULL;
return NULL;
if ((x = _getutxid(ux)) == NULL) {
- setutxent();
+ _setutxent();
if ((x = _getutxid(ux)) == NULL) {
/*
* utx->ut_type has any original mask bits, while
errno = EINVAL;
return NULL;
}
- if (lockf(fileno(fp), F_LOCK, (off_t)0) == -1)
+ /*
+ * Replace lockf() with fcntl() and a fixed start
+ * value. We should already be at EOF.
+ */
+ if ((fl.l_start = lseek(fileno(fp), 0, SEEK_CUR)) < 0)
+ return NULL;
+ fl.l_len = 0;
+ fl.l_whence = SEEK_SET;
+ fl.l_type = F_WRLCK;
+ if (fcntl(fileno(fp), F_SETLKW, &fl) == -1)
return NULL;
- gotlock++;
if (fseeko(fp, (off_t)0, SEEK_END) == -1)
goto fail;
}
fail:
if (gotlock) {
int save = errno;
- if (lockf(fileno(fp), F_ULOCK, (off_t)0) == -1)
+ fl.l_type = F_UNLCK;
+ if (fcntl(fileno(fp), F_SETLK, &fl) == -1)
return NULL;
errno = save;
}
{
size_t len;
+ UTMPX_LOCK;
if (fname == NULL) {
strcpy(utfile, _PATH_UTMPX);
utfile_system = 1;
- endutxent();
+ _endutxent();
+ UTMPX_UNLOCK;
return 1;
}
len = strlen(fname);
- if (len >= sizeof(utfile))
+ if (len >= sizeof(utfile)) {
+ UTMPX_UNLOCK;
return 0;
+ }
/* must end in x! */
- if (fname[len - 1] != 'x')
+ if (fname[len - 1] != 'x') {
+ UTMPX_UNLOCK;
return 0;
+ }
(void)strlcpy(utfile, fname, sizeof(utfile));
- endutxent();
+ _endutxent();
utfile_system = 0;
+ UTMPX_UNLOCK;
return 1;
}
.Xr system 3
.Sh BUGS
This version of
-.Fn workexp
+.Fn wordexp
ignores the value of the
.Fa flags
argument.
/*
- * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2005, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <unistd.h>
#include <paths.h>
#include <strings.h>
+#include <spawn.h>
#include <sys/errno.h>
// For _NSGetEnviron() -- which gives us a pointer to environ
extern int errno;
pthread_once_t re_init_c = PTHREAD_ONCE_INIT;
-static regex_t re_cmd, re_goodchars, re_subcmd_syntax_err_kludge;
+static regex_t re_cmd, re_goodchars, re_subcmd_syntax_err_kludge, re_quoted_string;
-/* Similar to popen, but catures stderr for you. Doesn't interoperate
+/* Similar to popen, but captures stderr for you. Doesn't interoperate
with pclose. Call wait4 on your own */
pid_t popen_oe(char *cmd, FILE **out, FILE **err) {
int out_pipe[2], err_pipe[2];
char *argv[4];
pid_t pid;
+ posix_spawn_file_actions_t file_actions;
+ int errrtn;
+ if ((errrtn = posix_spawn_file_actions_init(&file_actions)) != 0) {
+ errno = errrtn;
+ return 0;
+ }
if (pipe(out_pipe) < 0) {
+ posix_spawn_file_actions_destroy(&file_actions);
return 0;
}
if (pipe(err_pipe) < 0) {
+ posix_spawn_file_actions_destroy(&file_actions);
close(out_pipe[0]);
close(out_pipe[1]);
return 0;
}
+ if (out_pipe[1] != STDOUT_FILENO) {
+ posix_spawn_file_actions_adddup2(&file_actions, out_pipe[1], STDOUT_FILENO);
+ posix_spawn_file_actions_addclose(&file_actions, out_pipe[1]);
+ }
+ posix_spawn_file_actions_addclose(&file_actions, out_pipe[0]);
+ if (err_pipe[1] != STDERR_FILENO) {
+ posix_spawn_file_actions_adddup2(&file_actions, err_pipe[1], STDERR_FILENO);
+ posix_spawn_file_actions_addclose(&file_actions, err_pipe[1]);
+ }
+ posix_spawn_file_actions_addclose(&file_actions, err_pipe[0]);
+
argv[0] = "sh";
argv[1] = "-c";
argv[2] = cmd;
argv[3] = NULL;
- switch(pid = vfork()) {
- case -1:
- close(out_pipe[0]);
- close(out_pipe[1]);
- close(err_pipe[0]);
- close(err_pipe[1]);
- return 0;
- case 0:
- if (out_pipe[1] != STDOUT_FILENO) {
- dup2(out_pipe[1], STDOUT_FILENO);
- close(out_pipe[1]);
- }
- close(out_pipe[0]);
- if (err_pipe[1] != STDERR_FILENO) {
- dup2(err_pipe[1], STDERR_FILENO);
- close(err_pipe[1]);
- }
- close(err_pipe[0]);
- execve(_PATH_BSHELL, argv, *_NSGetEnviron());
- _exit(127);
- default:
- *out = fdopen(out_pipe[0], "r");
- assert(*out);
- close(out_pipe[1]);
- *err = fdopen(err_pipe[0], "r");
- assert(*err);
- close(err_pipe[1]);
-
- return pid;
+ errrtn = posix_spawn(&pid, _PATH_BSHELL, &file_actions, NULL, argv, *_NSGetEnviron());
+ posix_spawn_file_actions_destroy(&file_actions);
+
+ if (errrtn != 0) {
+ close(out_pipe[0]);
+ close(out_pipe[1]);
+ close(err_pipe[0]);
+ close(err_pipe[1]);
+ errno = errrtn;
+ return 0;
}
+
+ *out = fdopen(out_pipe[0], "r");
+ assert(*out);
+ close(out_pipe[1]);
+ *err = fdopen(err_pipe[0], "r");
+ assert(*err);
+ close(err_pipe[1]);
+
+ return pid;
}
void re_init(void) {
- int rc = regcomp(&re_cmd, "(^|[^\\])(`|\\$\\()", REG_EXTENDED|REG_NOSUB);
+ int rc = regcomp(&re_cmd, "(^|[^\\])(`|\\$\\([^(])", REG_EXTENDED|REG_NOSUB);
/* XXX I'm not sure the { } stuff is correct,
it may be overly restrictave */
- char *rx = "^([^\\\"'|&;<>(){}]"
+ char *rx = "^([^\\\"'|&;<>(){}\n]"
"|\\\\."
- "|'([^']|\\\\')*'"
- "|\"([^\"]|\\\\\")*\""
- "|`([^`]|\\\\`)*`"
- "|\\$(([^)]|\\\\))*\\)" /* can't do nesting in a regex */
+ "|'(\\\\\\\\|\\\\'|[^'])*'"
+ "|\"(\\\\\\\\|\\\\\"|[^\"])*\""
+ "|`(\\\\\\\\|\\\\`|[^`])*`"
+ "|\\$\\(\\(([^)]|\\\\)*\\)\\)" /* can't do nesting in a regex */
+ "|\\$\\(([^)]|\\\\)*\\)" /* can't do nesting in a regex */
"|\\$\\{[^}]*\\}"
/* XXX: { } ? */
")*$";
rc = regcomp(&re_subcmd_syntax_err_kludge,
"command substitution.*syntax error", REG_EXTENDED|REG_NOSUB);
+
+ rc = regcomp(&re_quoted_string,
+ "(^|[^\\])'(\\\\\\\\|\\\\'|[^'])*'", REG_EXTENDED|REG_NOSUB);
}
/* Returns zero if it can't realloc */
return 0;
}
+static int
+cmd_search(const char *str) {
+ regoff_t first = 0;
+ regoff_t last = strlen(str);
+ regmatch_t m = {first, last};
+ int flags;
+
+ if (last == 0) return REG_NOMATCH; /* empty string */
+
+ flags = REG_STARTEND;
+ while(regexec(&re_quoted_string, str, 1, &m, flags) == 0) {
+ /*
+ * We have matched a single quoted string, from m.rm_so to m.rm_eo.
+ * So the (non-quote string) from first to m.rm_so needs to be
+ * checked for command substitution. Then we use REG_STARTEND to
+ * look for any other single quote strings after this one.
+ */
+ regmatch_t head = {first, m.rm_so};
+ if (regexec(&re_cmd, str, 1, &head, flags) == 0) {
+ return 0; /* found a command substitution */
+ }
+ flags = REG_NOTBOL | REG_STARTEND;
+ m.rm_so = first = m.rm_eo;
+ m.rm_eo = last;
+ }
+ /* Check the remaining string */
+ flags = REG_STARTEND;
+ if (m.rm_so > 0) flags |= REG_NOTBOL;
+ return regexec(&re_cmd, str, 1, &m, flags);
+}
+
/* XXX this is _not_ designed to be fast */
-/* wordexp is also rife with security "chalenges", unless you pass it
+/* wordexp is also rife with security "challenges", unless you pass it
WRDE_NOCMD it *must* support subshell expansion, and even if you
don't beause it has to support so much of the standard shell (all
the odd little variable expansion options for example) it is hard
about 20 chars */
size_t cbuf_l = 1024;
char *cbuf = NULL;
- /* Put a NUL byte between eaach word, and at the end */
+ /* Put a NUL byte between each word, and at the end */
char *cmd = "/usr/bin/perl -e 'print join(chr(0), @ARGV), chr(0)' -- ";
size_t wordv_l = 0, wordv_i = 0;
int rc;
pthread_once(&re_init_c, re_init);
if (flags & WRDE_NOCMD) {
- /* Thi attmpts to match any backticks or $(...)'s, but there may be
+ /* This attempts to match any backticks or $(...)'s, but there may be
other ways to do subshell expansion that the standard doesn't
- cover, but I don't know of any -- failures here aare a potential
+ cover, but I don't know of any -- failures here are a potential
security risk */
- rc = regexec(&re_cmd, words, 0, NULL, 0);
+ rc = cmd_search(words);
if (rc != REG_NOMATCH) {
/* Technically ==0 is WRDE_CMDSUB, and != REG_NOMATCH is
"some internal error", but failing to catch those here
}
bzero(pwe->we_wordv + wordv_i, pwe->we_offs * sizeof(char *));
wordv_i = wend;
+ } else {
+ pwe->we_offs = 0;
}
}
}
int i = 0, e = pwe->we_wordc + pwe->we_offs;
- for(i = 0; i < e; i++) {
+ for(i = pwe->we_offs; i < e; i++) {
free(pwe->we_wordv[i]);
}
free(pwe->we_wordv);
.if $(MACHINE_ARCH) != arm
GFLAGS += -force_cpusubtype_ALL
.endif
+.if ${GCC_42} != YES
+GFLAGS += -no-cpp-precomp
+.endif
.ifdef SDKROOT
GFLAGS += -isysroot '${SDKROOT}'
.endif # SDKROOT
# mcount cannot be compiled with profiling
gmon.po:
- ${CC} -arch ${CCARCH} ${GFLAGS} \
+ ${MYCC} -arch ${CCARCH} ${GFLAGS} \
-I${.CURDIR}/include \
-I${SYMROOT}/include \
-I${INCLUDEDIR} \
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2003, 2004, 2007, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
write(fd, &image_count, sizeof(uint32_t));
image_count++;
for(i = 1; i < image_count; i++){
- image_header = _dyld_get_image_header(i);
+ image_header = (intptr_t)_dyld_get_image_header(i);
write(fd, &image_header, sizeof(intptr_t));
image_name = _dyld_get_image_name(i);
write(fd, image_name, strlen(image_name) + 1);
/*
- * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
}
if (stacksize == 0) { /* main thread doesn't have pthread stack size */
- rlim_t rlim;
+ struct rlimit rlim;
if (0 == getrlimit(RLIMIT_STACK, &rlim))
- stacksize = rlim;
+ stacksize = rlim.rlim_cur;
}
uctx->uc_stack.ss_size = stacksize;
/*
- * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2007, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
/* Set context to next one in link */
/* XXX - what to do for error, abort? */
setcontext((const ucontext_t *)ucp->uc_link);
- abort(); /* should never get here */
+ LIBC_ABORT("setcontext failed"); /* should never get here */
}
}
void
-makecontext(ucontext_t *ucp, void (*start)(void), int argc, ...)
+makecontext(ucontext_t *ucp, void (*start)(), int argc, ...)
{
va_list ap;
char *stack_top;
+++ /dev/null
-.PATH: ${.CURDIR}/i386/mach
-
-MDSRCS += mach_absolute_time.s
+++ /dev/null
-/*
- * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-#include <machine/cpu_capabilities.h>
-
-
- .text
- .align 2
- .globl _mach_absolute_time
-_mach_absolute_time:
- movl $(_COMM_PAGE_NANOTIME), %eax
- jmpl *%eax
MDSRCS += \
init_cpu_capabilities.c \
get_cpu_capabilities.s \
+ pthread_mutex_lock.s \
pthread_set_self.s \
pthread_self.s \
pthread_getspecific.s \
--- /dev/null
+/*
+ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <machine/cpu_capabilities.h>
+
+
+ .text
+ .align 2
+ .globl __commpage_pthread_mutex_lock
+__commpage_pthread_mutex_lock:
+ movl $(_COMM_PAGE_MUTEX_LOCK), %eax
+ jmpl *%eax
# Long double is 80 bits
-GDTOA_FBSDSRCS+= gdtoa_strtopx.c machdep_ldisx.c
+GDTOA_FBSDSRCS+= gdtoa-strtopx.c machdep_ldisx.c
strncpy.s \
strncmp.s \
memcmp.s \
- bcmp.s \
memset.s
+
+SUPPRESSSRCS += bcmp.c
+++ /dev/null
-/*
- * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-/*
- * bcmp() is implemented in memcmp.s, as it is equivalent to memcmp() in OSX.
- * (The two symbols, bcmp and memcmp, have the same value.)
- * This empty file is here to prevent the Free BSD machine independent version
- * from building.
- */
LEAF(_bzero,0)
movl $(_COMM_PAGE_BZERO), %eax
jmpl *%eax
+
+X_LEAF(___bzero, _bzero)
+++ /dev/null
-/*
- * Copyright (c) 1999-2005 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-/*
- * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved
- *
- * File: SYS.h
- *
- * Definition of the user side of the UNIX system call interface
- * for i386.
- *
- * HISTORY
- * 12-3-92 Bruce Martin (Bruce_Martin@next.com)
- * Created.
- */
-
-/*
- * Headers
- */
-#include <sys/syscall.h>
-#include <architecture/i386/asm_help.h>
-#include <mach/i386/syscall_sw.h>
-
-/*
- * We have two entry points. int's is used for syscalls which need to preserve
- * %ecx across the call, or return a 64-bit value in %eax:%edx. sysenter is used
- * for the majority of syscalls which just return a value in %eax.
- */
-
-#define UNIX_SYSCALL_SYSENTER SYSENTER_PAD call __sysenter_trap
-
-/*
- * This is the same as UNIX_SYSCALL, but it can call an alternate error
- * return function. It's generic to support potential future callers.
- */
-#define UNIX_SYSCALL_ERR(name, nargs,error_ret) \
- .globl error_ret ;\
-LEAF(_##name, 0) ;\
- movl $ SYS_##name, %eax ;\
- UNIX_SYSCALL_SYSENTER ;\
- jnb 2f ;\
- BRANCH_EXTERN(error_ret) ;\
-2:
-
-#define UNIX_SYSCALL(name, nargs) \
- .globl cerror ;\
-LEAF(_##name, 0) ;\
- movl $ SYS_##name, %eax ;\
- UNIX_SYSCALL_SYSENTER ;\
- jnb 2f ;\
- BRANCH_EXTERN(cerror) ;\
-2:
-
-#define UNIX_SYSCALL_INT(name, nargs) \
- .globl cerror ;\
-LEAF(_##name, 0) ;\
- movl $ SYS_##name, %eax ;\
- UNIX_SYSCALL_TRAP ;\
- jnb 2f ;\
- BRANCH_EXTERN(cerror) ;\
-2:
-
-#define UNIX_SYSCALL_NONAME(name, nargs) \
- .globl cerror ;\
- movl $ SYS_##name, %eax ;\
- UNIX_SYSCALL_SYSENTER ;\
- jnb 2f ;\
- BRANCH_EXTERN(cerror) ;\
-2:
-
-#define UNIX_SYSCALL_INT_NONAME(name, nargs) \
- .globl cerror ;\
- movl $ SYS_##name, %eax ;\
- UNIX_SYSCALL_TRAP ;\
- jnb 2f ;\
- BRANCH_EXTERN(cerror) ;\
-2:
-
-#define PSEUDO(pseudo, name, nargs) \
-LEAF(_##pseudo, 0) ;\
- UNIX_SYSCALL_NONAME(name, nargs)
-
-#define PSEUDO_ERR(pseudo, name, nargs, error_ret) \
- .globl error_ret ;\
-LEAF(_##pseudo, 0) ;\
- movl $ SYS_##name, %eax ;\
- UNIX_SYSCALL_SYSENTER ;\
- jnb 2f ;\
- BRANCH_EXTERN(error_ret) ;\
-2:
*/
/* Copyright 1998 Apple Computer, Inc. */
-#include <SYS.h>
+#include <architecture/i386/asm_help.h>
#define __APPLE_API_PRIVATE
#include <machine/cpu_capabilities.h>
*/
#include <architecture/i386/asm_help.h>
-#include <SYS.h>
// The FP control word is actually two bytes, but there's no harm in
// using four bytes for it and keeping the struct aligned.
.include "${.CURDIR}/include/arpa/Makefile.inc"
.include "${.CURDIR}/include/libkern/Makefile.inc"
.include "${.CURDIR}/include/protocols/Makefile.inc"
-.include "${.CURDIR}/include/machine/Makefile.inc"
.include "${.CURDIR}/include/malloc/Makefile.inc"
.ifdef FEATURE_LEGACY_NXZONE_APIS
.include "${.CURDIR}/include/objc/Makefile.inc"
INC_INSTHDRS := ${INC_INSTHDRS:S/^/${.CURDIR}\/include\//}
INSTHDRS += ${INC_INSTHDRS}
+LOCALHDRS += ${.CURDIR}/include/spawn_private.h
+
.include "Makefile.nbsd_begin"
NBSDHDRS = utmpx.h
.include "Makefile.nbsd_end"
/*
- * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999-2000, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
NSDocumentationDirectory = 8, // documentation (Library/Documentation)
NSDocumentDirectory = 9, // documents (Documents)
NSCoreServiceDirectory = 10, // location of core services (System/Library/CoreServices)
+ NSAutosavedInformationDirectory = 11, // location of user's directory for use with autosaving (~/Documents/Autosaved)
NSDesktopDirectory = 12, // location of user's Desktop (Desktop)
NSCachesDirectory = 13, // location of discardable cache files (Library/Caches)
NSApplicationSupportDirectory = 14, // location of application support files (plug-ins, etc) (Library/Application Support)
NSDownloadsDirectory = 15, // location of user's Downloads directory (Downloads)
+ NSInputMethodsDirectory = 16, // input methods (Library/Input Methods)
+ NSMoviesDirectory = 17, // location of user's Movies directory (~/Movies)
+ NSMusicDirectory = 18, // location of user's Music directory (~/Music)
+ NSPicturesDirectory = 19, // location of user's Pictures directory (~/Pictures)
+ NSPrinterDescriptionDirectory = 20, // location of system's PPDs directory (Library/Printers/PPDs)
+ NSSharedPublicDirectory = 21, // location of user's Public sharing directory (~/Public)
+ NSPreferencePanesDirectory = 22, // location of the PreferencePanes directory for use with System Preferences (Library/PreferencePanes)
NSAllApplicationsDirectory = 100, // all directories where applications can occur (Applications, Applications/Utilities, Developer/Applications, ...)
NSAllLibrariesDirectory = 101 // all directories where resources can occur (Library, Developer)
} NSSearchPathDirectory;
/*
- * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004, 2008, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <sys/_types.h>
+#if __GNUC__ > 2 || __GNUC__ == 2 && __GNUC_MINOR__ >= 7
+#define __strfmonlike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__strfmon__, fmtarg, firstvararg)))
+#define __strftimelike(fmtarg) \
+ __attribute__((__format__ (__strftime__, fmtarg, 0)))
+#else
+#define __strfmonlike(fmtarg, firstvararg)
+#define __strftimelike(fmtarg)
+#endif
+
typedef int __darwin_nl_item;
typedef int __darwin_wctrans_t;
#ifdef __LP64__
#endif
#define __DARWIN_WEOF ((__darwin_wint_t)-1)
+#ifndef _FORTIFY_SOURCE
+# if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && ((__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__-0) < 1050)
+# define _FORTIFY_SOURCE 0
+# else
+# define _FORTIFY_SOURCE 2 /* on by default */
+# endif
+#endif
+
#endif /* __TYPES_H_ */
+/* $NetBSD: tftp.h,v 1.8 2003/08/07 09:44:12 agc Exp $ */
+
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
+ * 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* @(#)tftp.h 8.1 (Berkeley) 6/2/93
*/
-#ifndef _TFTP_H_
-#define _TFTP_H_
+#ifndef _ARPA_TFTP_H_
+#define _ARPA_TFTP_H_
/*
* Trivial File Transfer Protocol (IEN-133)
*/
-#define SEGSIZE 512 /* data segment size */
+#define SEGSIZE 512 /* data segment size */
+#define MAXSEGSIZE 65464 /* maximum negotiated data segment size */
+
+#define PKTSIZE SEGSIZE + 4
+#define MAXPKTSIZE MAXSEGSIZE + 4
/*
* Packet types.
#define DATA 03 /* data packet */
#define ACK 04 /* acknowledgement */
#define ERROR 05 /* error code */
+#define OACK 06 /* option acknowledgement */
struct tftphdr {
- unsigned short th_opcode; /* packet type */
+ short th_opcode; /* packet type */
union {
- unsigned short tu_block; /* block # */
- unsigned short tu_code; /* error code */
+ unsigned short tu_block; /* block # */
+ short tu_code; /* error code */
char tu_stuff[1]; /* request packet stuff */
} th_u;
char th_data[1]; /* data or error string */
#define EBADID 5 /* unknown transfer ID */
#define EEXISTS 6 /* file already exists */
#define ENOUSER 7 /* no such user */
+#define EOPTNEG 8 /* option negotiation failed */
-#endif /* !_TFTP_H_ */
+#endif /* _ARPA_TFTP_H_ */
/*
- * Copyright (c) 2004 - 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2004 - 2007 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
typedef struct __aslmsg *aslmsg;
typedef struct __aslresponse *aslresponse;
+/*! @header
+ * These routines provide an interface to the Apple System Log facility.
+ * The API allows client applications to create flexible, structured messages
+ * and send them to the syslogd server. Messages received by the server are
+ * saved in a data store, subject to input filtering constraints.
+ * This API also permits clients to create queries and search the message
+ * data store for matching messages.
+ */
+
/*
- * Log levels of the message
+ * NOTE FOR HeaderDoc
+ *
+ * These are added to allow headerdoc2html to process
+ * the prototypes of asl_log and asl_vlog correctly.
+ * The "-p" option to headerdoc2html is required.
+ */
+#ifndef __DARWIN_LDBL_COMPAT2
+/*! @parseOnly */
+#define __DARWIN_LDBL_COMPAT2(a)
+#endif
+#ifndef __printflike
+/*! @parseOnly */
+#define __printflike(a,b)
+#endif
+
+/*! @defineblock Log Message Priority Levels
+ * Log levels of the message.
*/
#define ASL_LEVEL_EMERG 0
#define ASL_LEVEL_ALERT 1
#define ASL_LEVEL_NOTICE 5
#define ASL_LEVEL_INFO 6
#define ASL_LEVEL_DEBUG 7
+/*! @/defineblock */
-/*
- * Corresponding level strings
+/*! @defineblock Log Message Priority Level Strings
+ * Strings corresponding to log levels.
*/
#define ASL_STRING_EMERG "Emergency"
#define ASL_STRING_ALERT "Alert"
#define ASL_STRING_NOTICE "Notice"
#define ASL_STRING_INFO "Info"
#define ASL_STRING_DEBUG "Debug"
+/*! @/defineblock */
-/*
- * Attribute value comparison operations
+/*! @defineblock Attribute Matching
+ * Attribute value comparison operations.
*/
#define ASL_QUERY_OP_CASEFOLD 0x0010
#define ASL_QUERY_OP_PREFIX 0x0020
#define ASL_QUERY_OP_LESS_EQUAL 0x0005
#define ASL_QUERY_OP_NOT_EQUAL 0x0006
#define ASL_QUERY_OP_TRUE 0x0007
+/*! @/defineblock */
-/*
- * Attributes of all messages.
- * The following attributes are attached to log messages,
- * and are preserved in the order listed.
- * Additional attributes may be added as desired, and are
- * appended in the order that they are defined.
+/*! @defineblock Message Attributes
+ *
+ * These attributes are known by ASL, and are generally
+ * associated with all log messages.
+ * Additional attributes may be added as desired.
*/
#define ASL_KEY_TIME "Time" /* Timestamp. Set automatically */
#define ASL_KEY_TIME_NSEC "TimeNanoSec" /* Nanosecond time. */
#define ASL_KEY_SESSION "Session" /* Session (set by the launchd). */
#define ASL_KEY_REF_PID "RefPID" /* Reference PID for messages proxied by launchd */
#define ASL_KEY_REF_PROC "RefProc" /* Reference process for messages proxied by launchd */
+/*! @/defineblock */
-/*
- * Message Types
+/*! @defineblock aslmsg Types
+ * Message type argument passed to asl_new().
*/
#define ASL_TYPE_MSG 0
#define ASL_TYPE_QUERY 1
+/*! @/defineblock */
-/* Macros to create bitmasks for filter settings - see asl_set_filter */
-#define ASL_FILTER_MASK(level) (1 << (level))
-#define ASL_FILTER_MASK_UPTO(level) ((1 << ((level) + 1)) - 1)
-
-/* Individual filter masks */
+/*! @defineblock Filter Masks
+ * Used in client-side filtering, which determines which
+ * messages are sent by the client to the syslogd server.
+ */
#define ASL_FILTER_MASK_EMERG 0x01
#define ASL_FILTER_MASK_ALERT 0x02
#define ASL_FILTER_MASK_CRIT 0x04
#define ASL_FILTER_MASK_NOTICE 0x20
#define ASL_FILTER_MASK_INFO 0x40
#define ASL_FILTER_MASK_DEBUG 0x80
+/*! @/defineblock */
-/* Options to asl_open */
+/*! @defineblock Filter Mask Macros
+ * Macros to create bitmasks for filter settings - see asl_set_filter().
+ */
+#define ASL_FILTER_MASK(level) (1 << (level))
+#define ASL_FILTER_MASK_UPTO(level) ((1 << ((level) + 1)) - 1)
+/*! @/defineblock */
+
+/*! @defineblock Client Creation Options
+ * Options for asl_open().
+ */
#define ASL_OPT_STDERR 0x00000001
#define ASL_OPT_NO_DELAY 0x00000002
#define ASL_OPT_NO_REMOTE 0x00000004
+/*! @/defineblock */
__BEGIN_DECLS
-/*
- * asl_open: initialize a syslog connection
+/*!
+ * Initialize a connection to the ASL server.
+ *
* This call is optional in most cases. The library will perform any
* necessary initializations on the fly. A call to asl_open() is required
* if optional settings must be made before messages are sent to the server.
* messages are not sent to the server by default.
*
* Options (defined above) may be set using the opts parameter. They are:
+ *
* ASL_OPT_STDERR - adds stderr as an output file descriptor
+ *
* ASL_OPT_NO_DELAY - connects to the server immediately
+ *
* ASL_OPT_NO_REMOTE - disables the remote-control mechanism for adjusting
* filter levers for processes using e.g. syslog -c ...
+ *
+ * @param ident
+ * (input) Sender name
+ * @param facility
+ * (input) Facility name
+ * @param opts
+ * (input) Options (see asl_open Options)
+ * @result Returns an ASL client handle
*/
aslclient asl_open(const char *ident, const char *facility, uint32_t opts);
-/*
- * Shuts down the current connection to the server.
+/*!
+ * Shuts down a connection to the server.
+ *
+ * @param asl
+ * (input) An ASL client handle
*/
void asl_close(aslclient asl);
-/*
- * asl_add_file: write log messages to the given file descriptor
+/*!
+ * Write log messages to the given file descriptor.
+ *
* Log messages will be written to this file as well as to the server.
- */
+ *
+ * @param asl
+ * (input) An ASL client handle
+ * @param fd
+ * (input) A file descriptor
+ * @result Returns 0 on success, non-zero on failure
+*/
int asl_add_log_file(aslclient asl, int fd);
-/*
- * asl_remove_file: stop writing log messages to the given file descriptor
+/*!
+ * Stop writing log messages to the given file descriptor.
* The file descripter is not closed by this routine.
+ *
+ * @param asl
+ * (input) An ASL client handle
+ * @param fd
+ * (input) A file descriptor
+ * @result Returns 0 on success, non-zero on failure
*/
int asl_remove_log_file(aslclient asl, int fd);
-/*
- * Set a filter for messages being sent to the server
+/*!
+ * Set a filter for messages being sent to the server.
* The filter is a bitmask representing priorities. The ASL_FILTER_MASK
* macro may be used to convert a priority level into a bitmask for that
* level. The ASL_FILTER_MASK_UPTO macro creates a bitmask for all
* sent to any file descripters added with asl_add_log_file().
* The default setting is ASL_FILTER_MASK_UPTO(ASL_LEVEL_NOTICE).
* Returns the previous filter value.
+ *
+ * @param asl
+ * (input) An ASL client handle
+ * @param f
+ * (input) A filter value
+ * @result Returns the previous filter value
*/
int asl_set_filter(aslclient asl, int f);
/*
- * asl_key: examine attribute keys
- * returns the key of the nth attribute in a message (beginning at zero)
- * returns NULL if the message has fewer attributes
+ * Examine attribute keys.
+ *
+ * @param msg
+ * (input) An ASL message
+ * @param n
+ * (input) An index value
+ * @result Returns the key of the nth attribute in a message (beginning at zero),
+ * or NULL if n is greater than the largest message index.
*/
const char *asl_key(aslmsg msg, uint32_t n);
-/*
- * asl_new: create a new log message.
+/*!
+ * Create a new log message or query message.
+ *
+ * @param type
+ * (input) Message type (see aslmsg Types)
+ * @result Returns a newly allocated asmsg of the specified type
*/
aslmsg asl_new(uint32_t type);
-/*
- * asl_set: set attributes of a message
- * msg: an aslmsg
- * key: attribute key
- * value: attribute value
- * returns 0 for success, non-zero for failure
+/*!
+ * Set or re-set a message attribute.
+ *
+ * @param msg
+ * (input) An aslmsg
+ * @param key
+ * (input) Attribute key
+ * @param value
+ * (input) Attribute value
+ * @result returns 0 for success, non-zero for failure
*/
int asl_set(aslmsg msg, const char *key, const char *value);
-/*
- * asl_unset: remove attributes of a message
- * msg: an aslmsg
- * key: attribute key
+/*!
+ * Remove a message attribute.
+ *
+ * @param msg
+ * (input) An aslmsg
+ * @param key
+ * (input) Attribute key
* returns 0 for success, non-zero for failure
*/
int asl_unset(aslmsg msg, const char *key);
-/*
- * asl_get: get attribute values from a message
- * msg: an aslmsg
- * key: attribute key
- * returns the attribute value
- * returns NULL if the message does not contain the key
+/*!
+ * Get the value of a message attribute.
+ *
+ * @param msg
+ * (input) An aslmsg
+ * @param key
+ * (input) Attribute key
+ * @result Returns the attribute value, or NULL if the message does not contain the key
*/
const char *asl_get(aslmsg msg, const char *key);
-/*
- * asl_log: log a message with a particular log level
- * msg: an aslmsg
- * msg may be NULL, in which case a new message will be
- * created and sent using default attributes.
- * level: the log level
- * format: A formating string followed by a list of arguments, like printf()
- * returns 0 for success, non-zero for failure
+/*!
+ * Log a message with a particular log level.
+ *
+ * @param asl
+ * (input) An ASL client handle
+ * @param msg
+ * (input) An aslmsg (default attributes will be supplied if msg is NULL)
+ * @param level
+ * (input) Log level (ASL_LEVEL_DEBUG to ASL_LEVEL_EMERG)
+ * @param format
+ * (input) A printf() - style format string followed by a list of arguments
+ * @result Returns 0 for success, non-zero for failure
*/
#ifdef __DARWIN_LDBL_COMPAT2
int asl_log(aslclient asl, aslmsg msg, int level, const char *format, ...) __DARWIN_LDBL_COMPAT2(asl_log) __printflike(4, 5);
int asl_log(aslclient asl, aslmsg msg, int level, const char *format, ...) __printflike(4, 5);
#endif
-/*
- * asl_vlog: Similar to asl_log, but taking a va_list instead of a list of
- * arguments.
- * msg: an aslmsg
- * msg may be NULL, in which case a new message will be
- * created and sent using default attributes.
- * level: the log level of the associated message
- * format: A formating string followed by a list of arguments, like vprintf()
- * returns 0 for success, non-zero for failure
+/*!
+ * Log a message with a particular log level.
+ * Similar to asl_log, but takes a va_list argument.
+ *
+ * @param asl
+ * (input) An ASL client handle
+ * @param msg
+ * (input) An aslmsg (default attributes will be supplied if msg is NULL)
+ * @param level
+ * (input) Log level (ASL_LEVEL_DEBUG to ASL_LEVEL_EMERG)
+ * @param format
+ * (input) A printf() - style format string followed by a list of arguments
+ * @param ap
+ * (input) A va_list containing the values for the format string
+ * @result Returns 0 for success, non-zero for failure
*/
#ifdef __DARWIN_LDBL_COMPAT2
int asl_vlog(aslclient asl, aslmsg msg, int level, const char *format, va_list ap) __DARWIN_LDBL_COMPAT2(asl_vlog) __printflike(4, 0);
int asl_vlog(aslclient asl, aslmsg msg, int level, const char *format, va_list ap) __printflike(4, 0);
#endif
-/*
- * asl_send: send a message
+/*!
+ * Log a message.
+ *
* This routine may be used instead of asl_log() or asl_vlog() if asl_set()
* has been used to set all of a message's attributes.
- * msg: an aslmsg
- * returns 0 for success, non-zero for failure
+ *
+ * @param asl
+ * (input) An ASL client handle
+ * @param msg
+ * (input) An aslmsg
+ * @result Returns 0 for success, non-zero for failure
*/
int asl_send(aslclient asl, aslmsg msg);
-/*
- * asl_free: free a message
- * msg: an aslmsg to free
+/*!
+ * Free a message. Frees all the attribute keys and values.
+ *
+ * @param msg
+ * (input) An aslmsg to free
*/
void asl_free(aslmsg msg);
-/*
- * asl_set_query: set arbitrary parameters of a query
- * Similar to als_set, but allows richer query operations.
+/*!
+ * Set arbitrary parameters of a query.
+ * This is similar to asl_set, but allows richer query operations.
* See ASL_QUERY_OP_* above.
- * msg: an aslmsg
- * key: attribute key
- * value: attribute value
- * op: an operation from the set above.
- * returns 0 for success, non-zero for failure
+ *
+ * @param msg
+ * (input) An aslmsg
+ * @param key
+ * (input) Attribute key
+ * @param value
+ * (input) Attribute value
+ * @param op
+ * (input) An operation (ASL_QUERY_OP_*)
+ * @result Returns 0 for success, non-zero for failure
*/
int asl_set_query(aslmsg msg, const char *key, const char *value, uint32_t op);
-/*
- * asl_search: Search for messages matching the criteria described
- * by the aslmsg . The caller should set the attributes to match using
- * asl_set_query() or asl_set(). The operation ASL_QUERY_OP_EQUAL is
- * used for attributes set with asl_set().
- * a: an aslmsg
- * returns: a set of messages that can be iterated over using aslresp_next(),
- * and the values can be retrieved using aslresp_get.
+/*!
+ * Search for messages matching the criteria described by the aslmsg.
+ * The caller should set the attributes to match using asl_set_query() or asl_set().
+ * The operatoin ASL_QUERY_OP_EQUAL is used for attributes set with asl_set().
+ *
+ * @param msg
+ * (input) An aslmsg to match
+ * @result Returns a set of messages accessable using aslresponse_next(),
*/
aslresponse asl_search(aslclient asl, aslmsg msg);
-/*
- * aslresponse_next: Iterate over responses returned from asl_search()
- * r: a response returned from asl_search();
- * returns: The next log message (an aslmsg) or NULL on failure
+/*!
+ * Iterate over responses returned from asl_search().
+ *
+ * @param r
+ * (input) An aslresponse returned by asl_search()
+ * @result Returns the next message (an aslmsg) in the response, or NULL when there are no more messages
*/
aslmsg aslresponse_next(aslresponse r);
-/*
- * aslresponse_free: Free a response returned from asl_search()
- * r: a response returned from asl_search()
+/*!
+ * Free a response returned from asl_search().
+ * @param r
+ * (input) An aslresponse returned by asl_search()
*/
void aslresponse_free(aslresponse r);
/*
- * Copyright (c) 2000, 2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000, 2005, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
__DARWIN_CTYPE_static_inline int
__maskrune(__darwin_ct_rune_t _c, unsigned long _f)
{
- return _CurrentRuneLocale->__runetype[_c && 0xff] & _f;
+ return _DefaultRuneLocale.__runetype[_c & 0xff] & _f;
}
//Begin-Libc
#elif defined(__LIBC__)
__isctype(__darwin_ct_rune_t _c, unsigned long _f)
{
#ifdef USE_ASCII
- return !!(_DefaultRuneLocale.__runetype[_c & 0xff] & _f);
+ return !!(__maskrune(_c, _f));
#else /* USE_ASCII */
return (_c < 0 || _c >= _CACHED_RUNES) ? 0 :
!!(_DefaultRuneLocale.__runetype[_c] & _f);
__DARWIN_CTYPE_static_inline __darwin_ct_rune_t
__toupper(__darwin_ct_rune_t _c)
{
- return _CurrentRuneLocale->__mapupper[_c & 0xff];
+ return _DefaultRuneLocale.__mapupper[_c & 0xff];
}
__DARWIN_CTYPE_static_inline __darwin_ct_rune_t
__tolower(__darwin_ct_rune_t _c)
{
- return _CurrentRuneLocale->__maplower[_c & 0xff];
+ return _DefaultRuneLocale.__maplower[_c & 0xff];
}
//Begin-Libc
#elif defined(__LIBC__)
__current_locale()->__lc_ctype->_CurrentRuneLocale.__maplower[_c];
}
//End-Libc
-#else /* USE_ASCII */
+#else /* !USE_ASCII */
__BEGIN_DECLS
__darwin_ct_rune_t __toupper(__darwin_ct_rune_t);
__darwin_ct_rune_t __tolower(__darwin_ct_rune_t);
/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000, 2002-2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#define _DIRENT_H_
/*
- * The kernel defines the format of directory entries returned by
- * the getdirentries(2) system call.
+ * The kernel defines the format of directory entries
*/
#include <_types.h>
#include <sys/dirent.h>
+#include <Availability.h>
struct _telldir; /* forward reference */
typedef struct {
int __dd_fd; /* file descriptor associated with directory */
long __dd_loc; /* offset in current buffer */
- long __dd_size; /* amount of data returned by getdirentries */
+ long __dd_size; /* amount of data returned */
char *__dd_buf; /* data buffer */
int __dd_len; /* size of data buffer */
- long __dd_seek; /* magic cookie returned by getdirentries */
+ long __dd_seek; /* magic cookie returned */
long __dd_rewind; /* magic cookie for rewinding */
int __dd_flags; /* flags for readdir */
__darwin_pthread_mutex_t __dd_lock; /* for thread locking */
#endif /* !LIBC_ALIAS_CLOSEDIR */
//End-Libc
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
-int getdirentries(int, char *, int, long *);
+int getdirentries(int, char *, int, long *)
+//Begin-Libc
+#ifndef __LIBC__
+//End-Libc
+#if __DARWIN_64_BIT_INO_T
+/*
+ * getdirentries() doesn't work when 64-bit inodes is in effect, so we
+ * generate a link error.
+ */
+ __asm("_getdirentries_is_not_available_when_64_bit_inodes_are_in_effect")
+#else /* !__DARWIN_64_BIT_INO_T */
+ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0,__MAC_10_6,__IPHONE_2_0,__IPHONE_2_0)
+#endif /* __DARWIN_64_BIT_INO_T */
+//Begin-Libc
+#endif /* !__LIBC__ */
+//End-Libc
+;
#endif /* not POSIX */
//Begin-Libc
#ifndef LIBC_ALIAS_OPENDIR
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
int scandir(const char *, struct dirent ***,
int (*)(struct dirent *), int (*)(const void *, const void *)) __DARWIN_INODE64(scandir);
+#ifdef __BLOCKS__
+int scandir_b(const char *, struct dirent ***,
+ int (^)(struct dirent *), int (^)(const void *, const void *)) __DARWIN_INODE64(scandir_b);
+#endif /* __BLOCKS__ */
#endif /* not POSIX */
//Begin-Libc
#ifndef LIBC_ALIAS_SEEKDIR
/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000, 2003, 2004, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
void vwarnx(const char *, __darwin_va_list) __DARWIN_LDBL_COMPAT(vwarnx);
void err_set_file(void *);
void err_set_exit(void (*)(int));
+#ifdef __BLOCKS__
+void err_set_exit_b(void (^)(int));
+#endif /* __BLOCKS__ */
__END_DECLS
/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000, 2003-2006, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
int fts_rfd; /* fd for root */
int fts_pathlen; /* sizeof(path) */
int fts_nitems; /* elements in the sort array */
- int (*fts_compar)(); /* compare function */
+#ifdef __BLOCKS__
+ union {
+#endif /* __BLOCKS__ */
+ int (*fts_compar)(); /* compare function */
+#ifdef __BLOCKS__
+ int (^fts_compar_b)(); /* compare block */
+ };
+#endif /* __BLOCKS__ */
#define FTS_COMFOLLOW 0x001 /* follow command line symlinks */
#define FTS_LOGICAL 0x002 /* logical walk */
#define FTS_NAMEONLY 0x100 /* (private) child names only */
#define FTS_STOP 0x200 /* (private) unrecoverable error */
+#ifdef __BLOCKS__
+#define FTS_BLOCK_COMPAR 0x80000000 /* fts_compar is a block */
+#endif /* __BLOCKS__ */
int fts_options; /* fts_open options, global flags */
} FTS;
int (*)(const FTSENT **, const FTSENT **)) LIBC_INODE64(fts_open);
#endif /* !LIBC_ALIAS_FTS_OPEN */
//End-Libc
+#ifdef __BLOCKS__
+//Begin-Libc
+#ifndef LIBC_ALIAS_FTS_OPEN_B
+//End-Libc
+FTS *fts_open_b(char * const *, int,
+ int (^)(const FTSENT **, const FTSENT **)) __DARWIN_INODE64(fts_open_b);
+//Begin-Libc
+#else /* LIBC_ALIAS_FTS_OPEN */
+FTS *fts_open_b(char * const *, int,
+ int (^)(const FTSENT **, const FTSENT **)) LIBC_INODE64(fts_open_b);
+#endif /* !LIBC_ALIAS_FTS_OPEN */
+//End-Libc
+#endif /* __BLOCKS__ */
//Begin-Libc
#ifndef LIBC_ALIAS_FTS_READ
//End-Libc
int gl_flags; /* Copy of flags parameter to glob. */
char **gl_pathv; /* List of paths matching pattern. */
/* Copy of errfunc parameter to glob. */
- int (*gl_errfunc)(const char *, int);
+#ifdef __BLOCKS__
+ union {
+#endif /* __BLOCKS__ */
+ int (*gl_errfunc)(const char *, int);
+#ifdef __BLOCKS__
+ int (^gl_errblk)(const char *, int);
+ };
+#endif /* __BLOCKS__ */
/*
* Alternate filesystem access methods for glob; replacement
#define GLOB_QUOTE 0x0400 /* Quote special chars with \. */
#define GLOB_TILDE 0x0800 /* Expand tilde names from the passwd file. */
#define GLOB_LIMIT 0x1000 /* limit number of returned paths */
+#ifdef __BLOCKS__
+#define _GLOB_ERR_BLOCK 0x80000000 /* (internal) error callback is a block */
+#endif /* __BLOCKS__ */
/* source compatibility, these are the old names */
#define GLOB_MAXPATH GLOB_LIMIT
glob_t * __restrict) LIBC_INODE64(glob);
#endif /* !LIBC_ALIAS_GLOB */
//End-Libc
+#ifdef __BLOCKS__
+//Begin-Libc
+#ifndef LIBC_ALIAS_GLOB_B
+//End-Libc
+int glob_b(const char * __restrict, int, int (^)(const char *, int),
+ glob_t * __restrict) __DARWIN_INODE64(glob_b);
+//Begin-Libc
+#else /* LIBC_ALIAS_GLOB_B */
+int glob_b(const char * __restrict, int, int (^)(const char *, int),
+ glob_t * __restrict) LIBC_INODE64(glob_b);
+#endif /* !LIBC_ALIAS_GLOB_B */
+//End-Libc
+#endif /* __BLOCKS__ */
void globfree(glob_t *);
__END_DECLS
int32_t OSAtomicAdd32( int32_t __theAmount, volatile int32_t *__theValue );
int32_t OSAtomicAdd32Barrier( int32_t __theAmount, volatile int32_t *__theValue );
-inline static
+__inline static
int32_t OSAtomicIncrement32( volatile int32_t *__theValue )
{ return OSAtomicAdd32( 1, __theValue); }
-inline static
+__inline static
int32_t OSAtomicIncrement32Barrier( volatile int32_t *__theValue )
{ return OSAtomicAdd32Barrier( 1, __theValue); }
-inline static
+__inline static
int32_t OSAtomicDecrement32( volatile int32_t *__theValue )
{ return OSAtomicAdd32( -1, __theValue); }
-inline static
+__inline static
int32_t OSAtomicDecrement32Barrier( volatile int32_t *__theValue )
{ return OSAtomicAdd32Barrier( -1, __theValue); }
-#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__)
+#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__)
int64_t OSAtomicAdd64( int64_t __theAmount, volatile int64_t *__theValue );
int64_t OSAtomicAdd64Barrier( int64_t __theAmount, volatile int64_t *__theValue );
-inline static
+__inline static
int64_t OSAtomicIncrement64( volatile int64_t *__theValue )
{ return OSAtomicAdd64( 1, __theValue); }
-inline static
+__inline static
int64_t OSAtomicIncrement64Barrier( volatile int64_t *__theValue )
{ return OSAtomicAdd64Barrier( 1, __theValue); }
-inline static
+__inline static
int64_t OSAtomicDecrement64( volatile int64_t *__theValue )
{ return OSAtomicAdd64( -1, __theValue); }
-inline static
+__inline static
int64_t OSAtomicDecrement64Barrier( volatile int64_t *__theValue )
{ return OSAtomicAdd64Barrier( -1, __theValue); }
-#endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) */
+#endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
/* Boolean functions (and, or, xor.) These come in four versions for each operation:
bool OSAtomicCompareAndSwapLong( long __oldValue, long __newValue, volatile long *__theValue );
bool OSAtomicCompareAndSwapLongBarrier( long __oldValue, long __newValue, volatile long *__theValue );
-#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__)
+#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__)
bool OSAtomicCompareAndSwap64( int64_t __oldValue, int64_t __newValue, volatile int64_t *__theValue );
bool OSAtomicCompareAndSwap64Barrier( int64_t __oldValue, int64_t __newValue, volatile int64_t *__theValue );
-#endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) */
+#endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
/* Test and set. They return the original value of the bit, and operate on bit (0x80>>(n&7))
+++ /dev/null
-MACHINE_INSTHDRS += limits.h
-MACHINE_INSTHDRS := ${MACHINE_INSTHDRS:S/^/${.CURDIR}\/include\/machine\//}
+++ /dev/null
-/* This is the `system' limits.h, independent of any particular
- compiler. GCC provides its own limits.h which can be found in
- /usr/lib/gcc, although it is not very informative.
- This file is public domain. */
-#if defined (__ppc__) || defined (__ppc64__)
-#include <ppc/limits.h>
-#elif defined (__i386__) || defined(__x86_64__)
-#include <i386/limits.h>
-#elif defined (__arm__)
-#include <arm/limits.h>
-#else
-#error architecture not supported
-#endif
struct malloc_introspection_t *introspect;
unsigned version;
+
+ /* aligned memory allocation. The callback may be NULL. */
+ void *(*memalign)(struct _malloc_zone_t *zone, size_t alignment, size_t size);
+
+ /* free a pointer known to be in zone and known to have the given size. The callback may be NULL. */
+ void (*free_definite_size)(struct _malloc_zone_t *zone, void *ptr, size_t size);
} malloc_zone_t;
/********* Creation and destruction ************/
/* The initial zone */
extern malloc_zone_t *malloc_create_zone(vm_size_t start_size, unsigned flags);
- /* Create a new zone */
+ /* Creates a new zone with default behavior and registers it */
extern void malloc_destroy_zone(malloc_zone_t *zone);
/* Destroys zone and everything it allocated */
extern size_t malloc_good_size(size_t size);
/* Returns number of bytes greater than or equal to size that can be allocated without padding */
+extern void *malloc_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size);
+ /*
+ * Allocates a new pointer of size size whose address is an exact multiple of alignment.
+ * alignment must be a power of two and at least as large as sizeof(void *).
+ * zone must be non-NULL.
+ */
+
/********* Batch methods ************/
extern unsigned malloc_zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results, unsigned num_requested);
extern void malloc_zone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num);
/* frees all the pointers in to_be_freed; note that to_be_freed may be overwritten during the process; This function will always free even if the zone has no batch callback */
+/********* Functions for libcache ************/
+
+extern malloc_zone_t *malloc_default_purgeable_zone(void);
+ /* Returns a pointer to the default purgeable_zone. */
+
+extern void malloc_make_purgeable(void *ptr);
+ /* Make an allocation from the purgeable zone purgeable if possible. */
+
+extern int malloc_make_nonpurgeable(void *ptr);
+ /* Makes an allocation from the purgeable zone nonpurgeable.
+ * Returns zero if the contents were not purged since the last
+ * call to malloc_make_purgeable, else returns non-zero. */
+
/********* Functions for zone implementors ************/
extern void malloc_zone_register(malloc_zone_t *zone);
- /* Registers a freshly created zone;
- Should typically be called after a zone has been created */
+ /* Registers a custom malloc zone; Should typically be called after a
+ * malloc_zone_t has been filled in with custom methods by a client. See
+ * malloc_create_zone for creating additional malloc zones with the
+ * default allocation and free behavior. */
extern void malloc_zone_unregister(malloc_zone_t *zone);
/* De-registers a zone
#define MALLOC_PTR_IN_USE_RANGE_TYPE 1 /* for allocated pointers */
#define MALLOC_PTR_REGION_RANGE_TYPE 2 /* for region containing pointers */
#define MALLOC_ADMIN_REGION_RANGE_TYPE 4 /* for region used internally */
+#define MALLOC_ZONE_SPECIFIC_FLAGS 0xff00 /* bits reserved for zone-specific purposes */
typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
/* given a task and context, "records" the specified addresses */
void (*force_lock)(malloc_zone_t *zone); /* Forces locking zone */
void (*force_unlock)(malloc_zone_t *zone); /* Forces unlocking zone */
void (*statistics)(malloc_zone_t *zone, malloc_statistics_t *stats); /* Fills statistics */
+ boolean_t (*zone_locked)(malloc_zone_t *zone); /* Are any zone locks held */
} malloc_introspection_t;
extern void malloc_printf(const char *format, ...);
long c_volume; /* dump volume number */
#endif /* __LP64__ */
daddr_t c_tapea; /* logical block of this record */
- unsigned int c_inumber; /* number of inode */
+ unsigned int c_inumber; /* number of inode; truncation can occur for 64-bit ino_t */
#ifdef __LP64__
int c_magic; /* magic number (see above) */
int c_checksum; /* record checksum */
/*
- * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#define _SECURE__COMMON_H_
#undef _USE_FORTIFY_LEVEL
-#ifdef _FORTIFY_SOURCE && _FORTIFY_SOURCE > 0
-#if _FORTIFY_SOURCE > 1
-#define _USE_FORTIFY_LEVEL 2
+#if defined(_FORTIFY_SOURCE) && _FORTIFY_SOURCE > 0
+# if _FORTIFY_SOURCE > 1
+# define _USE_FORTIFY_LEVEL 2
+# else
+# define _USE_FORTIFY_LEVEL 1
+# endif
#else
-#define _USE_FORTIFY_LEVEL 1
-#endif
-#else
-#define _USE_FORTIFY_LEVEL 0
+# define _USE_FORTIFY_LEVEL 0
#endif
#define __darwin_obsz0(object) __builtin_object_size (object, 0)
__builtin___snprintf_chk (str, len, 0, __darwin_obsz(str), __VA_ARGS__)
extern int __vsprintf_chk (char * __restrict, int, size_t,
- const char * __restrict, va_list arg)
+ const char * __restrict, va_list)
__DARWIN_LDBL_COMPAT (__vsprintf_chk);
#define vsprintf(str, format, ap) \
__builtin___vsprintf_chk (str, 0, __darwin_obsz(str), format, ap)
extern int __vsnprintf_chk (char * __restrict, size_t, int, size_t,
- const char * __restrict, va_list arg)
+ const char * __restrict, va_list)
__DARWIN_LDBL_COMPAT (__vsnprintf_chk);
#define vsnprintf(str, len, format, ap) \
#ifndef _SECURE__STRING_H_
#define _SECURE__STRING_H_
+#include <sys/cdefs.h>
#include <secure/_common.h>
#if _USE_FORTIFY_LEVEL > 0
#undef memmove
#undef memset
#undef strcpy
+#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
#undef stpcpy
+#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */
#undef strncpy
#undef strcat
#undef strncat
? __builtin___memcpy_chk (dest, src, len, __darwin_obsz0 (dest)) \
: __inline_memcpy_chk (dest, src, len))
-static inline void *
+static __inline void *
__inline_memcpy_chk (void *__dest, const void *__src, size_t __len)
{
return __builtin___memcpy_chk (__dest, __src, __len, __darwin_obsz0(__dest));
? __builtin___memmove_chk (dest, src, len, __darwin_obsz0 (dest)) \
: __inline_memmove_chk (dest, src, len))
-static inline void *
+static __inline void *
__inline_memmove_chk (void *__dest, const void *__src, size_t __len)
{
return __builtin___memmove_chk (__dest, __src, __len, __darwin_obsz0(__dest));
? __builtin___memset_chk (dest, val, len, __darwin_obsz0 (dest)) \
: __inline_memset_chk (dest, val, len))
-static inline void *
+static __inline void *
__inline_memset_chk (void *__dest, int __val, size_t __len)
{
return __builtin___memset_chk (__dest, __val, __len, __darwin_obsz0(__dest));
? __builtin___strcpy_chk (dest, src, __darwin_obsz (dest)) \
: __inline_strcpy_chk (dest, src))
-static inline char *
+static __inline char *
__inline_strcpy_chk (char *__restrict __dest, const char *__restrict __src)
{
return __builtin___strcpy_chk (__dest, __src, __darwin_obsz(__dest));
}
+#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
#define stpcpy(dest, src) \
((__darwin_obsz0 (dest) != (size_t) -1) \
? __builtin___stpcpy_chk (dest, src, __darwin_obsz (dest)) \
: __inline_stpcpy_chk (dest, src))
-static inline char *
+static __inline char *
__inline_stpcpy_chk (char *__dest, const char *__src)
{
return __builtin___stpcpy_chk (__dest, __src, __darwin_obsz(__dest));
}
+#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */
#define strncpy(dest, src, len) \
((__darwin_obsz0 (dest) != (size_t) -1) \
? __builtin___strncpy_chk (dest, src, len, __darwin_obsz (dest)) \
: __inline_strncpy_chk (dest, src, len))
-static inline char *
+static __inline char *
__inline_strncpy_chk (char *__restrict __dest, const char *__restrict __src,
size_t __len)
{
? __builtin___strcat_chk (dest, src, __darwin_obsz (dest)) \
: __inline_strcat_chk (dest, src))
-static inline char *
+static __inline char *
__inline_strcat_chk (char *__restrict __dest, const char *__restrict __src)
{
return __builtin___strcat_chk (__dest, __src, __darwin_obsz(__dest));
#define strncat(dest, src, len) \
((__darwin_obsz0 (dest) != (size_t) -1) \
- ? __builtin___strcat_chk (dest, src, __darwin_obsz (dest)) \
+ ? __builtin___strncat_chk (dest, src, len, __darwin_obsz (dest)) \
: __inline_strncat_chk (dest, src, len))
-static inline char *
+static __inline char *
__inline_strncat_chk (char *__restrict __dest, const char *__restrict __src,
size_t __len)
{
/*
- * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2006, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
int posix_spawnattr_getbinpref_np(const posix_spawnattr_t * __restrict,
size_t, cpu_type_t *__restrict, size_t *__restrict);
+int posix_spawnattr_setauditsessionport_np(posix_spawnattr_t *__restrict,
+ mach_port_t);
int posix_spawnattr_setbinpref_np(posix_spawnattr_t * __restrict,
size_t, cpu_type_t *__restrict, size_t *__restrict);
-int posix_spawnattr_setspecialport_np(posix_spawnattr_t *__restrict,
- mach_port_t, int);
int posix_spawnattr_setexceptionports_np(posix_spawnattr_t *__restrict,
exception_mask_t, mach_port_t,
exception_behavior_t, thread_state_flavor_t);
-
+int posix_spawnattr_setspecialport_np(posix_spawnattr_t *__restrict,
+ mach_port_t, int);
__END_DECLS
#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */
--- /dev/null
+/*
+ * Copyright (c) 2006, 2008 Apple,Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _SPAWN_PRIVATE_H_
+#define _SPAWN_PRIVATE_H_
+
+#include <spawn.h>
+
+int posix_spawnattr_getpcontrol_np(const posix_spawnattr_t * __restrict, int * __restrict);
+int posix_spawnattr_setpcontrol_np(posix_spawnattr_t *, const int);
+
+#endif /* !defined _SPAWN_PRIVATE_H_*/
#ifndef __cplusplus
-#define false 0
-#define true 1
-
#define bool _Bool
#if __STDC_VERSION__ < 199901L && __GNUC__ < 3
typedef int _Bool;
#endif
+#define false (bool)0
+#define true (bool)1
+
#endif /* !__cplusplus */
#endif /* !_STDBOOL_H_ */
/*
- * Copyright (c) 2000, 2005, 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000, 2005, 2007, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
int fgetc(FILE *);
int fgetpos(FILE * __restrict, fpos_t *);
char *fgets(char * __restrict, int, FILE *);
-FILE *fopen(const char * __restrict, const char * __restrict);
+#if defined(__DARWIN_10_6_AND_LATER) && (defined(_DARWIN_UNLIMITED_STREAMS) || defined(_DARWIN_C_SOURCE))
+FILE *fopen(const char * __restrict, const char * __restrict) __DARWIN_EXTSN(fopen);
+#else /* < 10.6 || !_DARWIN_UNLIMITED_STREAMS && !_DARWIN_C_SOURCE */
+//Begin-Libc
+#ifndef LIBC_ALIAS_FOPEN
+//End-Libc
+FILE *fopen(const char * __restrict, const char * __restrict) __DARWIN_10_6_AND_LATER_ALIAS(__DARWIN_ALIAS(fopen));
+//Begin-Libc
+#else /* LIBC_ALIAS_FOPEN */
+FILE *fopen(const char * __restrict, const char * __restrict) LIBC_ALIAS(fopen);
+#endif /* !LIBC_ALIAS_FOPEN */
+//End-Libc
+#endif /* >= 10.6 &&_(DARWIN_UNLIMITED_STREAMS || _DARWIN_C_SOURCE) */
int fprintf(FILE * __restrict, const char * __restrict, ...) __DARWIN_LDBL_COMPAT(fprintf);
int fputc(int, FILE *);
//Begin-Libc
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
char *ctermid_r(char *);
#endif /* not POSIX */
-FILE *fdopen(int, const char *);
+#if defined(__DARWIN_10_6_AND_LATER) && (defined(_DARWIN_UNLIMITED_STREAMS) || defined(_DARWIN_C_SOURCE))
+FILE *fdopen(int, const char *) __DARWIN_EXTSN(fdopen);
+#else /* < 10.6 || !_DARWIN_UNLIMITED_STREAMS && !_DARWIN_C_SOURCE */
+//Begin-Libc
+#ifndef LIBC_ALIAS_FDOPEN
+//End-Libc
+FILE *fdopen(int, const char *) __DARWIN_10_6_AND_LATER_ALIAS(__DARWIN_ALIAS(fdopen));
+//Begin-Libc
+#else /* LIBC_ALIAS_FDOPEN */
+FILE *fdopen(int, const char *) LIBC_ALIAS(fdopen);
+#endif /* !LIBC_ALIAS_FDOPEN */
+//End-Libc
+#endif /* >= 10.6 &&_(DARWIN_UNLIMITED_STREAMS || _DARWIN_C_SOURCE) */
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
char *fgetln(FILE *, size_t *);
#endif /* not POSIX */
int getw(FILE *);
#endif /* not POSIX */
int pclose(FILE *);
-FILE *popen(const char *, const char *);
+#if defined(__DARWIN_10_6_AND_LATER) && (defined(_DARWIN_UNLIMITED_STREAMS) || defined(_DARWIN_C_SOURCE))
+FILE *popen(const char *, const char *) __DARWIN_EXTSN(popen);
+#else /* < 10.6 || !_DARWIN_UNLIMITED_STREAMS && !_DARWIN_C_SOURCE */
+//Begin-Libc
+#ifndef LIBC_ALIAS_POPEN
+//End-Libc
+FILE *popen(const char *, const char *) __DARWIN_10_6_AND_LATER_ALIAS(__DARWIN_ALIAS(popen));
+//Begin-Libc
+#else /* LIBC_ALIAS_POPEN */
+FILE *popen(const char *, const char *) LIBC_ALIAS(popen);
+#endif /* !LIBC_ALIAS_POPEN */
+//End-Libc
+#endif /* >= 10.6 &&_(DARWIN_UNLIMITED_STREAMS || _DARWIN_C_SOURCE) */
int putc_unlocked(int, FILE *);
int putchar_unlocked(int);
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
&& defined(_USE_EXTENDED_LOCALES_) && !defined(MB_CUR_MAX_L)
#define MB_CUR_MAX_L(x) (___mb_cur_max_l(x))
#endif
+//Begin-Libc
+/* f must be a literal string */
+#define LIBC_ABORT(f,...) abort_report_np("%s:%s:%u: " f, __FILE__, __func__, __LINE__, ## __VA_ARGS__)
+//End-Libc
__BEGIN_DECLS
void abort(void) __dead2;
+//Begin-Libc
+__private_extern__
+void abort_report_np(const char *, ...) __dead2 __printflike(1, 2);
+//End-Libc
int abs(int) __pure2;
int atexit(void (*)(void));
double atof(const char *);
int mblen(const char *, size_t);
size_t mbstowcs(wchar_t * __restrict , const char * __restrict, size_t);
int mbtowc(wchar_t * __restrict, const char * __restrict, size_t);
+int posix_memalign(void **, size_t, size_t);
void qsort(void *, size_t, size_t,
int (*)(const void *, const void *));
int rand(void);
arc4random(void);
void arc4random_addrandom(unsigned char *dat, int datlen);
void arc4random_stir(void);
+#ifdef __BLOCKS__
+int atexit_b(void (^)(void));
+void *bsearch_b(const void *, const void *, size_t,
+ size_t, int (^)(const void *, const void *));
+#endif /* __BLOCKS__ */
/* getcap(3) functions */
char *cgetcap(char *, const char *, int);
int heapsort(void *, size_t, size_t,
int (*)(const void *, const void *));
+#ifdef __BLOCKS__
+int heapsort_b(void *, size_t, size_t,
+ int (^)(const void *, const void *));
+#endif /* __BLOCKS__ */
int mergesort(void *, size_t, size_t,
int (*)(const void *, const void *));
+#ifdef __BLOCKS__
+int mergesort_b(void *, size_t, size_t,
+ int (^)(const void *, const void *));
+#endif /* __BLOCKS__ */
+void psort(void *, size_t, size_t,
+ int (*)(const void *, const void *));
+#ifdef __BLOCKS__
+void psort_b(void *, size_t, size_t,
+ int (^)(const void *, const void *));
+#endif /* __BLOCKS__ */
+void psort_r(void *, size_t, size_t, void *,
+ int (*)(void *, const void *, const void *));
+#ifdef __BLOCKS__
+void qsort_b(void *, size_t, size_t,
+ int (^)(const void *, const void *));
+#endif /* __BLOCKS__ */
void qsort_r(void *, size_t, size_t, void *,
int (*)(void *, const void *, const void *));
int radixsort(const unsigned char **, int, const unsigned char *,
#define LIBC_EXTSN(sym) __asm("_" __STRING(sym) LIBC_SUF_EXTSN)
#define LIBC_EXTSN_C(sym) __asm("_" __STRING(sym) LIBC_SUF_EXTSN LIBC_SUF_NON_CANCELABLE)
+extern int __pthread_tsd_first;
+extern int pthread_key_init_np(int, void (*)(void *));
+
+#define __LIBC_PTHREAD_KEY(x) (__pthread_tsd_first + (x))
+
+/*
+ * Libc pthread key assignments
+ */
+#define __LIBC_PTHREAD_KEY_XLOCALE __LIBC_PTHREAD_KEY(0)
+#define __LIBC_PTHREAD_KEY_TTYNAME __LIBC_PTHREAD_KEY(1)
+#define __LIBC_PTHREAD_KEY_LOCALTIME __LIBC_PTHREAD_KEY(2)
+#define __LIBC_PTHREAD_KEY_GMTIME __LIBC_PTHREAD_KEY(3)
+#define __LIBC_PTHREAD_KEY_GDTOA_BIGINT __LIBC_PTHREAD_KEY(4)
+#define __LIBC_PTHREAD_KEY_PARSEFLOAT __LIBC_PTHREAD_KEY(5)
+
#endif /* _LIBC_SYS_CDEFS_H_ */
/*
- * Copyright (c) 2002 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2002, 2008, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* @APPLE_LICENSE_HEADER_END@
*/
+/*
+ * These routines are DEPRECATED and should not be used.
+ */
#ifndef _UCONTEXT_H_
#define _UCONTEXT_H_
#include <sys/cdefs.h>
+
+//Begin-Libc
+#ifdef __LIBC__
+#include <sys/ucontext.h>
+__BEGIN_DECLS
+int getcontext(ucontext_t *);
+void makecontext(ucontext_t *, void (*)(), int, ...);
+int setcontext(const ucontext_t *);
+int swapcontext(ucontext_t * __restrict, const ucontext_t * __restrict);
+__END_DECLS
+#else /* !__LIBC__ */
+//End-Libc
+#ifdef _XOPEN_SOURCE
#include <sys/ucontext.h>
__BEGIN_DECLS
int getcontext(ucontext_t *);
-void makecontext(ucontext_t *, void (*)(void), int, ...);
+void makecontext(ucontext_t *, void (*)(), int, ...);
int setcontext(const ucontext_t *);
int swapcontext(ucontext_t * __restrict, const ucontext_t * __restrict);
__END_DECLS
+#else /* !_XOPEN_SOURCE */
+#error ucontext routines are deprecated, and require _XOPEN_SOURCE to be defined
+#endif /* _XOPEN_SOURCE */
+//Begin-Libc
+#endif /* __LIBC__ */
+//End-Libc
#endif /* _UCONTEXT_H_ */
/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000, 2002-2006, 2008, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
gid_t getegid(void);
uid_t geteuid(void);
gid_t getgid(void);
+#if defined(_DARWIN_UNLIMITED_GETGROUPS) || defined(_DARWIN_C_SOURCE)
+int getgroups(int, gid_t []) __DARWIN_EXTSN(getgroups);
+#else /* !_DARWIN_UNLIMITED_GETGROUPS && !_DARWIN_C_SOURCE */
int getgroups(int, gid_t []);
+#endif /* _DARWIN_UNLIMITED_GETGROUPS || _DARWIN_C_SOURCE */
long gethostid(void);
int gethostname(char *, size_t);
char *getlogin(void);
int getdtablesize(void);
int getdomainname(char *, int);
int getgrouplist(const char *, int, int *, int *);
+int gethostuuid(uuid_t, const struct timespec *);
mode_t getmode(const void *, mode_t);
int getpagesize(void) __pure2;
char *getpass(const char *);
int setkey(const char *);
#endif /* __DARWIN_UNIX03 */
int setlogin(const char *);
-void *setmode(const char *);
+//Begin-Libc
+#ifndef LIBC_ALIAS_SETMODE
+//End-Libc
+void *setmode(const char *) __DARWIN_ALIAS(setmode);
+//Begin-Libc
+#else /* LIBC_ALIAS_SETMODE */
+void *setmode(const char *) LIBC_ALIAS(setmode);
+#endif /* !LIBC_ALIAS_SETMODE */
+//End-Libc
int setrgid(gid_t);
int setruid(uid_t);
int setsgroups_np(int, const uuid_t);
/* HFS & HFS Plus semantics system calls go here */
#ifdef __LP64__
+int fgetattrlist(int,void*,void*,size_t,unsigned int);
+int fsetattrlist(int,void*,void*,size_t,unsigned int);
//Begin-Libc
#ifndef LIBC_ALIAS_GETATTRLIST
//End-Libc
//End-Libc
int exchangedata(const char*,const char*,unsigned int);
int getdirentriesattr(int,void*,void*,size_t,unsigned int*,unsigned int*,unsigned int*,unsigned int);
-int searchfs(const char*,void*,void*,unsigned int,unsigned int,void*);
-int fsctl(const char *,unsigned int,void*,unsigned int);
#else /* __LP64__ */
+int fgetattrlist(int,void*,void*,size_t,unsigned long);
+int fsetattrlist(int,void*,void*,size_t,unsigned long);
//Begin-Libc
#ifndef LIBC_ALIAS_GETATTRLIST
//End-Libc
//End-Libc
int exchangedata(const char*,const char*,unsigned long);
int getdirentriesattr(int,void*,void*,size_t,unsigned long*,unsigned long*,unsigned long*,unsigned long);
-int searchfs(const char*,void*,void*,unsigned long,unsigned long,void*);
-int fsctl(const char *,unsigned long,void*,unsigned long);
#endif /* __LP64__ */
+struct fssearchblock;
+struct searchstate;
+
+int searchfs(const char *, struct fssearchblock *, unsigned long *, unsigned int, unsigned int, struct searchstate *);
+int fsctl(const char *,unsigned long,void*,unsigned int);
+int ffsctl(int,unsigned long,void*,unsigned int);
+
extern int optreset;
#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */
*/
#include <_types.h>
-#include <Availability.h>
#ifndef _TIME_T
#define _TIME_T
time_t ll_time;
char ll_line[UT_LINESIZE];
char ll_host[UT_HOSTSIZE];
-} __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0,__MAC_10_5,__IPHONE_NA,__IPHONE_NA);
+} __deprecated;
struct utmp {
char ut_line[UT_LINESIZE];
char ut_name[UT_NAMESIZE];
char ut_host[UT_HOSTSIZE];
long ut_time;
-} __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0,__MAC_10_5,__IPHONE_NA,__IPHONE_NA);
+} __deprecated;
#endif /* !_UTMP_H_ */
#define LC_NUMERIC_MASK (1 << 4)
#define LC_TIME_MASK (1 << 5)
-#define _LC_LAST_MASK (1 << (6 - 1))
+#define _LC_NUM_MASK 6
+#define _LC_LAST_MASK (1 << (_LC_NUM_MASK - 1))
#define LC_GLOBAL_LOCALE ((locale_t)-1)
/*
- * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2005, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#define _XLOCALE__MONETARY_H_
__BEGIN_DECLS
-ssize_t strfmon_l(char *, size_t, locale_t, const char *, ...);
+ssize_t strfmon_l(char *, size_t, locale_t, const char *, ...)
+ __strfmonlike(4, 5);
__END_DECLS
#endif /* _XLOCALE__MONETARY_H_ */
/*
- * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2005, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
__BEGIN_DECLS
int asprintf_l(char **, locale_t, const char *, ...)
- __DARWIN_LDBL_COMPAT2(asprintf_l);
+ __DARWIN_LDBL_COMPAT2(asprintf_l) __printflike(3, 4);
int fprintf_l(FILE * __restrict, locale_t, const char * __restrict, ...)
- __DARWIN_LDBL_COMPAT2(fprintf_l);
+ __DARWIN_LDBL_COMPAT2(fprintf_l) __printflike(3, 4);
int fscanf_l(FILE * __restrict, locale_t, const char * __restrict, ...)
- __DARWIN_LDBL_COMPAT2(fscanf_l);
+ __DARWIN_LDBL_COMPAT2(fscanf_l) __scanflike(3, 4);
int printf_l(locale_t, const char * __restrict, ...)
- __DARWIN_LDBL_COMPAT2(printf_l);
+ __DARWIN_LDBL_COMPAT2(printf_l) __printflike(2, 3);
int scanf_l(locale_t, const char * __restrict, ...)
- __DARWIN_LDBL_COMPAT2(scanf_l);
+ __DARWIN_LDBL_COMPAT2(scanf_l) __scanflike(2, 3);
int snprintf_l(char * __restrict, size_t, locale_t,
- const char * __restrict, ...) __DARWIN_LDBL_COMPAT2(snprintf_l);
+ const char * __restrict, ...)
+ __DARWIN_LDBL_COMPAT2(snprintf_l) __printflike(4, 5);
int sprintf_l(char * __restrict, locale_t, const char * __restrict, ...)
- __DARWIN_LDBL_COMPAT2(sprintf_l);
+ __DARWIN_LDBL_COMPAT2(sprintf_l) __printflike(3, 4);
int sscanf_l(const char * __restrict, locale_t, const char * __restrict,
- ...) __DARWIN_LDBL_COMPAT2(sscanf_l);
+ ...) __DARWIN_LDBL_COMPAT2(sscanf_l) __scanflike(3, 4);
int vasprintf_l(char **, locale_t, const char *, va_list)
- __DARWIN_LDBL_COMPAT2(vasprintf_l);
+ __DARWIN_LDBL_COMPAT2(vasprintf_l) __printflike(3, 0);
int vfprintf_l(FILE * __restrict, locale_t, const char * __restrict,
- va_list) __DARWIN_LDBL_COMPAT2(vfprintf_l);
+ va_list) __DARWIN_LDBL_COMPAT2(vfprintf_l) __printflike(3, 0);
int vfscanf_l(FILE * __restrict, locale_t, const char * __restrict,
- va_list) __DARWIN_LDBL_COMPAT2(vfscanf_l);
+ va_list) __DARWIN_LDBL_COMPAT2(vfscanf_l) __scanflike(3, 0);
int vprintf_l(locale_t, const char * __restrict, va_list)
- __DARWIN_LDBL_COMPAT2(vprintf_l);
+ __DARWIN_LDBL_COMPAT2(vprintf_l) __printflike(2, 0);
int vscanf_l(locale_t, const char * __restrict, va_list)
- __DARWIN_LDBL_COMPAT2(vscanf_l);
+ __DARWIN_LDBL_COMPAT2(vscanf_l) __scanflike(2, 0);
int vsnprintf_l(char * __restrict, size_t, locale_t,
const char * __restrict, va_list)
- __DARWIN_LDBL_COMPAT2(vsnprintf_l);
+ __DARWIN_LDBL_COMPAT2(vsnprintf_l) __printflike(4, 0);
int vsprintf_l(char * __restrict, locale_t, const char * __restrict,
- va_list) __DARWIN_LDBL_COMPAT2(vsprintf_l);
+ va_list) __DARWIN_LDBL_COMPAT2(vsprintf_l) __printflike(3, 0);
int vsscanf_l(const char * __restrict, locale_t, const char * __restrict,
- va_list) __DARWIN_LDBL_COMPAT2(vsscanf_l);
+ va_list) __DARWIN_LDBL_COMPAT2(vsscanf_l) __scanflike(3, 0);
__END_DECLS
#endif /* _XLOCALE__STDIO_H_ */
/*
- * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2005, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
//End-Libc
size_t strftime_l(char * __restrict, size_t, const char * __restrict,
const struct tm * __restrict, locale_t)
- __DARWIN_ALIAS(strftime_l);
+ __DARWIN_ALIAS(strftime_l) __strftimelike(3);
//Begin-Libc
#else /* LIBC_ALIAS_STRFTIME_L */
size_t strftime_l(char * __restrict, size_t, const char * __restrict,
const struct tm * __restrict, locale_t)
- LIBC_ALIAS(strftime_l);
+ LIBC_ALIAS(strftime_l) __strftimelike(3);
#endif /* !LIBC_ALIAS_STRFTIME_L */
//End-Libc
//Begin-Libc
#ifndef LIBC_ALIAS_STRPTIME_L
//End-Libc
char *strptime_l(const char * __restrict, const char * __restrict,
- struct tm * __restrict, locale_t) __DARWIN_ALIAS(strptime_l);
+ struct tm * __restrict, locale_t)
+ __DARWIN_ALIAS(strptime_l) __strftimelike(2);
//Begin-Libc
#else /* LIBC_ALIAS_STRPTIME_L */
char *strptime_l(const char * __restrict, const char * __restrict,
- struct tm * __restrict, locale_t) LIBC_ALIAS(strptime_l);
+ struct tm * __restrict, locale_t)
+ LIBC_ALIAS(strptime_l) __strftimelike(2);
#endif /* !LIBC_ALIAS_STRPTIME_L */
//End-Libc
__END_DECLS
---- ldpart.c.orig 2004-11-25 11:38:17.000000000 -0800
-+++ ldpart.c 2005-02-13 01:59:35.000000000 -0800
+--- ldpart.c.orig 2008-06-03 18:15:42.000000000 -0700
++++ ldpart.c 2008-06-18 13:23:20.000000000 -0700
@@ -27,6 +27,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/locale/ldpart.c,v 1.15 2004/04/25 19:56:50 ache Exp $");
#include "namespace.h"
#include <sys/types.h>
#include <sys/stat.h>
-@@ -44,9 +46,9 @@
+@@ -44,9 +46,9 @@ __FBSDID("$FreeBSD: src/lib/libc/locale/
static int split_lines(char *, const char *);
char **locale_buf,
const char *category_filename,
int locale_buf_size_max,
-@@ -60,20 +62,6 @@
+@@ -60,20 +62,6 @@ __part_load_locale(const char *name,
struct stat st;
size_t namesize, bufsize;
/*
* Slurp the locale file into the cache.
*/
-@@ -164,3 +152,9 @@
+@@ -115,9 +103,7 @@ __part_load_locale(const char *name,
+ num_lines = split_lines(p, plim);
+ if (num_lines >= locale_buf_size_max)
+ num_lines = locale_buf_size_max;
+- else if (num_lines >= locale_buf_size_min)
+- num_lines = locale_buf_size_min;
+- else {
++ else if (num_lines < locale_buf_size_min) {
+ errno = EFTYPE;
+ goto bad_lbuf;
+ }
+@@ -164,3 +150,9 @@ split_lines(char *p, const char *plim)
return (i);
}
---- localeconv.c.orig 2008-03-15 10:50:38.000000000 -0700
-+++ localeconv.c 2008-03-26 16:49:24.000000000 -0700
+--- localeconv.c.orig 2008-10-09 11:37:42.000000000 -0700
++++ localeconv.c 2008-10-10 01:37:33.000000000 -0700
@@ -38,11 +38,71 @@ static char sccsid[] = "@(#)localeconv.c
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/locale/localeconv.c,v 1.13 2003/06/26 10:46:16 phantom Exp $");
/*
* The localeconv() function constructs a struct lconv from the current
* monetary and numeric locales.
-@@ -52,25 +112,37 @@ __FBSDID("$FreeBSD: src/lib/libc/locale/
+@@ -52,25 +112,28 @@ __FBSDID("$FreeBSD: src/lib/libc/locale/
* lconv structure are computed only when the monetary or numeric
* locale has been changed.
*/
+localeconv_l(locale_t loc)
{
- static struct lconv ret;
-+ struct __xlocale_st_localeconv *lc;
++ struct lconv *lc;
+
+ NORMALIZE_LOCALE(loc);
-+ if (loc->__lc_localeconv && !loc->__mlocale_changed && !loc->__nlocale_changed)
-+ return &loc->__lc_localeconv->__ret;
-+
-+ lc = (struct __xlocale_st_localeconv *)malloc(sizeof(struct __xlocale_st_localeconv));
-+ lc->__refcount = 1;
-+ lc->__free_extra = NULL;
-+ if (loc->__lc_localeconv)
-+ lc->__ret = loc->__lc_localeconv->__ret;
-+ else {
-+ loc->__mlocale_changed = 1;
-+ loc->__nlocale_changed = 1;
-+ }
- if (__mlocale_changed) {
+ if (loc->__mlocale_changed) {
++ XL_LOCK(loc);
++ if (loc->__mlocale_changed) {
/* LC_MONETARY part */
struct lc_monetary_T * mptr;
++ struct lconv *lc = &loc->__lc_localeconv;
-#define M_ASSIGN_STR(NAME) (ret.NAME = (char*)mptr->NAME)
-#define M_ASSIGN_CHAR(NAME) (ret.NAME = mptr->NAME[0])
-+#define M_ASSIGN_STR(NAME) (lc->__ret.NAME = (char*)mptr->NAME)
-+#define M_ASSIGN_CHAR(NAME) (lc->__ret.NAME = mptr->NAME[0])
++#define M_ASSIGN_STR(NAME) (lc->NAME = (char*)mptr->NAME)
++#define M_ASSIGN_CHAR(NAME) (lc->NAME = mptr->NAME[0])
- mptr = __get_current_monetary_locale();
+ mptr = __get_current_monetary_locale(loc);
M_ASSIGN_STR(int_curr_symbol);
M_ASSIGN_STR(currency_symbol);
M_ASSIGN_STR(mon_decimal_point);
-@@ -92,21 +164,41 @@ localeconv()
+@@ -92,21 +155,45 @@ localeconv()
M_ASSIGN_CHAR(int_n_sep_by_space);
M_ASSIGN_CHAR(int_p_sign_posn);
M_ASSIGN_CHAR(int_n_sign_posn);
- __mlocale_changed = 0;
+ loc->__mlocale_changed = 0;
++ }
++ XL_UNLOCK(loc);
}
- if (__nlocale_changed) {
+ if (loc->__nlocale_changed) {
++ XL_LOCK(loc);
++ if (loc->__nlocale_changed) {
/* LC_NUMERIC part */
struct lc_numeric_T * nptr;
++ struct lconv *lc = &loc->__lc_localeconv;
-#define N_ASSIGN_STR(NAME) (ret.NAME = (char*)nptr->NAME)
-+#define N_ASSIGN_STR(NAME) (lc->__ret.NAME = (char*)nptr->NAME)
++#define N_ASSIGN_STR(NAME) (lc->NAME = (char*)nptr->NAME)
- nptr = __get_current_numeric_locale();
+ nptr = __get_current_numeric_locale(loc);
N_ASSIGN_STR(grouping);
- __nlocale_changed = 0;
+ loc->__nlocale_changed = 0;
++ }
++ XL_UNLOCK(loc);
}
- return (&ret);
-+ XL_RELEASE(loc->__lc_localeconv);
-+ loc->__lc_localeconv = lc;
-+
-+ return (&lc->__ret);
++ return &loc->__lc_localeconv;
+}
+
+/*
---- nl_langinfo.c.orig 2005-03-15 23:26:13.000000000 -0800
-+++ nl_langinfo.c 2005-03-15 23:38:20.000000000 -0800
+--- nl_langinfo.c.orig 2008-01-15 11:30:57.000000000 -0800
++++ nl_langinfo.c 2008-01-17 23:58:24.000000000 -0800
@@ -27,6 +27,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/locale/nl_langinfo.c,v 1.17 2003/06/26 10:46:16 phantom Exp $");
#include <langinfo.h>
#include <limits.h>
#include <locale.h>
-@@ -36,20 +38,22 @@
+@@ -36,62 +38,66 @@
#include "lnumeric.h"
#include "lmessages.h"
#include "lmonetary.h"
if ((cs = strchr(s, '.')) != NULL)
ret = cs + 1;
else if (strcmp(s, "C") == 0 ||
-@@ -58,40 +62,40 @@
+ strcmp(s, "POSIX") == 0)
+ ret = "US-ASCII";
++ else if (strcmp(s, "UTF-8") == 0)
++ ret = "UTF-8";
}
break;
case D_T_FMT:
break;
case ERA:
/* XXX: need to be implemented */
-@@ -114,16 +118,16 @@
+@@ -114,16 +120,16 @@
ret = "";
break;
case RADIXCHAR:
break;
/*
* YESSTR and NOSTR items marked with LEGACY are available, but not
-@@ -131,25 +135,25 @@
+@@ -131,25 +137,25 @@
* they're subject to remove in future specification editions.
*/
case YESSTR: /* LEGACY */
psn = '.';
} else
psn = pos ? '-' : '+';
-@@ -166,10 +170,19 @@
+@@ -166,10 +172,19 @@
}
break;
case D_MD_ORDER: /* FreeBSD local extension */
---- setlocale.c.orig 2004-11-25 11:38:19.000000000 -0800
-+++ setlocale.c 2005-04-27 13:37:01.000000000 -0700
-@@ -41,6 +41,8 @@
+--- setlocale.c.orig 2008-01-24 17:13:46.000000000 -0800
++++ setlocale.c 2008-02-17 13:23:02.000000000 -0800
+@@ -41,6 +41,8 @@ static char sccsid[] = "@(#)setlocale.c
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/locale/setlocale.c,v 1.50 2004/01/31 19:15:32 ache Exp $");
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
-@@ -56,7 +58,7 @@
+@@ -56,7 +58,7 @@ __FBSDID("$FreeBSD: src/lib/libc/locale/
#include "lmessages.h" /* for __messages_load_locale() */
#include "setlocale.h"
#include "ldpart.h"
/*
* Category names for getenv()
-@@ -99,15 +101,16 @@
+@@ -99,15 +101,18 @@ static char current_locale_string[_LC_LA
static char *currentlocale(void);
static char *loadlocale(int);
-static const char *__get_locale_env(int);
+__private_extern__ const char *__get_locale_env(int);
++
++#define UNLOCK_AND_RETURN(x) {XL_UNLOCK(&__global_locale); return (x);}
char *
setlocale(category, locale)
if (category < LC_ALL || category >= _LC_LAST) {
errno = EINVAL;
-@@ -193,6 +196,9 @@
+@@ -118,6 +123,7 @@ setlocale(category, locale)
+ return (category != LC_ALL ?
+ current_categories[category] : currentlocale());
+
++ XL_LOCK(&__global_locale);
+ /*
+ * Default to the current locale for everything.
+ */
+@@ -133,7 +139,7 @@ setlocale(category, locale)
+ env = __get_locale_env(i);
+ if (strlen(env) > ENCODING_LEN) {
+ errno = EINVAL;
+- return (NULL);
++ UNLOCK_AND_RETURN (NULL);
+ }
+ (void)strcpy(new_categories[i], env);
+ }
+@@ -141,21 +147,21 @@ setlocale(category, locale)
+ env = __get_locale_env(category);
+ if (strlen(env) > ENCODING_LEN) {
+ errno = EINVAL;
+- return (NULL);
++ UNLOCK_AND_RETURN (NULL);
+ }
+ (void)strcpy(new_categories[category], env);
+ }
+ } else if (category != LC_ALL) {
+ if (strlen(locale) > ENCODING_LEN) {
+ errno = EINVAL;
+- return (NULL);
++ UNLOCK_AND_RETURN (NULL);
+ }
+ (void)strcpy(new_categories[category], locale);
+ } else {
+ if ((r = strchr(locale, '/')) == NULL) {
+ if (strlen(locale) > ENCODING_LEN) {
+ errno = EINVAL;
+- return (NULL);
++ UNLOCK_AND_RETURN (NULL);
+ }
+ for (i = 1; i < _LC_LAST; ++i)
+ (void)strcpy(new_categories[i], locale);
+@@ -164,14 +170,14 @@ setlocale(category, locale)
+ ;
+ if (!r[1]) {
+ errno = EINVAL;
+- return (NULL); /* Hmm, just slashes... */
++ UNLOCK_AND_RETURN (NULL); /* Hmm, just slashes... */
+ }
+ do {
+ if (i == _LC_LAST)
+ break; /* Too many slashes... */
+ if ((len = r - locale) > ENCODING_LEN) {
+ errno = EINVAL;
+- return (NULL);
++ UNLOCK_AND_RETURN (NULL);
+ }
+ (void)strlcpy(new_categories[i], locale,
+ len + 1);
+@@ -191,8 +197,11 @@ setlocale(category, locale)
+ }
+
if (category != LC_ALL)
- return (loadlocale(category));
+- return (loadlocale(category));
++ UNLOCK_AND_RETURN (loadlocale(category));
+ save__numeric_fp_cvt = __global_locale.__numeric_fp_cvt;
+ save__lc_numeric_loc = __global_locale.__lc_numeric_loc;
for (i = 1; i < _LC_LAST; ++i) {
(void)strcpy(saved_categories[i], current_categories[i]);
if (loadlocale(i) == NULL) {
-@@ -205,10 +211,14 @@
+@@ -205,11 +214,15 @@ setlocale(category, locale)
(void)loadlocale(j);
}
}
+ __global_locale.__lc_numeric_loc = save__lc_numeric_loc;
+ XL_RELEASE(save__lc_numeric_loc);
errno = saverr;
- return (NULL);
+- return (NULL);
++ UNLOCK_AND_RETURN (NULL);
}
}
+- return (currentlocale());
+ XL_RELEASE(save__lc_numeric_loc);
- return (currentlocale());
++ UNLOCK_AND_RETURN (currentlocale());
}
-@@ -237,7 +247,7 @@
+ static char *
+@@ -237,7 +250,7 @@ loadlocale(category)
{
char *new = new_categories[category];
char *old = current_categories[category];
int saved_errno;
if ((new[0] == '.' &&
-@@ -280,15 +290,26 @@
+@@ -280,15 +293,26 @@ loadlocale(category)
if (strcmp(new, old) == 0)
return (old);
__get_locale_env(category)
int category;
{
-@@ -315,7 +336,7 @@
+@@ -315,7 +339,7 @@ __get_locale_env(category)
/*
* Detect locale storage location and store its value to _PathLocale variable
*/
---- setrunelocale.c.orig 2008-04-28 16:25:31.000000000 -0700
-+++ setrunelocale.c 2008-04-28 17:02:02.000000000 -0700
+--- setrunelocale.c.orig 2008-05-12 17:37:36.000000000 -0700
++++ setrunelocale.c 2008-05-13 00:32:37.000000000 -0700
@@ -37,6 +37,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/locale/setrunelocale.c,v 1.44 2004/10/18 02:06:18 ache Exp $");
#include <runetype.h>
#include <errno.h>
#include <limits.h>
-@@ -49,67 +51,69 @@ __FBSDID("$FreeBSD: src/lib/libc/locale/
+@@ -49,67 +51,66 @@ __FBSDID("$FreeBSD: src/lib/libc/locale/
#include "mblocal.h"
#include "setlocale.h"
+extern int _UTF2_init(struct __xlocale_st_runelocale *); /* deprecated */
+extern struct __xlocale_st_runelocale *_Read_RuneMagi(FILE *);
+
-+extern void spin_lock(int *);
-+extern void spin_unlock(int *);
-+
+#ifdef LEGACY_RUNE_APIS
+/* depreciated interfaces */
+rune_t sgetrune(const char *, size_t, char const **);
- mbstate_t * __restrict);
+ static struct __xlocale_st_runelocale *CachedRuneLocale;
+ extern int __mb_cur_max;
-+ static int cache_lock = 0;
++ static pthread_lock_t cache_lock = LOCK_INITIALIZER;
/*
* The "C" and "POSIX" locale are always here.
/*
* If the locale name is the same as our cache, use the cache.
*/
-+ spin_lock(&cache_lock);
++ LOCK(cache_lock);
if (CachedRuneLocale != NULL &&
- strcmp(encoding, ctype_encoding) == 0) {
- _CurrentRuneLocale = CachedRuneLocale;
+ _CurrentRuneLocale = &loc->__lc_ctype->_CurrentRuneLocale;
+ __mb_cur_max = loc->__lc_ctype->__mb_cur_max;
+ }
-+ spin_unlock(&cache_lock);
++ UNLOCK(cache_lock);
return (0);
}
-+ spin_unlock(&cache_lock);
++ UNLOCK(cache_lock);
/*
* Slurp the locale file into the cache.
-@@ -124,63 +128,81 @@ __setrunelocale(const char *encoding)
+@@ -124,63 +125,86 @@ __setrunelocale(const char *encoding)
if ((fp = fopen(name, "r")) == NULL)
return (errno == 0 ? ENOENT : errno);
- Cached__wcrtomb = __wcrtomb;
- Cached__wcsnrtombs = __wcsnrtombs;
- (void)strcpy(ctype_encoding, encoding);
-+ spin_lock(&cache_lock);
++ LOCK(cache_lock);
+ XL_RELEASE(CachedRuneLocale);
+ CachedRuneLocale = xrl;
+ XL_RETAIN(CachedRuneLocale);
-+ spin_unlock(&cache_lock);
++ UNLOCK(cache_lock);
} else
- free(rl);
+ XL_RELEASE(xrl);
-__wrap_setrunelocale(const char *locale)
+setrunelocale(const char *encoding)
+{
-+ return __setrunelocale(encoding, &__global_locale);
++ int ret;
++
++ XL_LOCK(&__global_locale);
++ ret = __setrunelocale(encoding, &__global_locale);
++ XL_UNLOCK(&__global_locale);
++ return ret;
+}
+#endif /* LEGACY_RUNE_APIS */
+
---- wcstod.c.orig 2007-03-16 01:15:20.000000000 -0700
-+++ wcstod.c 2007-03-16 03:03:41.000000000 -0700
-@@ -27,9 +27,12 @@
+--- wcstod.c.orig 2008-10-09 11:50:53.000000000 -0700
++++ wcstod.c 2008-10-29 00:50:24.000000000 -0700
+@@ -27,9 +27,31 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/locale/wcstod.c,v 1.4 2004/04/07 09:47:56 tjr Exp $");
#include <wchar.h>
#include <wctype.h>
+#include <_simple.h>
++
++/*
++ * __wcs_end_offset calculates the offset to the end within the wide character
++ * string, assuming numbers and letters are single bytes in multibyte
++ * representation, get the actual decimal string for localeconv_l. If the
++ * decimal point was within the string, compensate for the fact that the
++ * (possible more than one byte) decimal point one takes one wide character.
++ */
++__private_extern__ size_t
++__wcs_end_offset(const char * __restrict buf, const char * __restrict end, locale_t loc)
++{
++ const char *decimalpoint = localeconv_l(loc)->decimal_point;
++ size_t n = end - buf;
++ char *p;
++
++ if ((p = strnstr(buf, decimalpoint, n)) != NULL)
++ n -= strlen(decimalpoint) - 1;
++ return n;
++}
/*
* Convert a string to a double-precision number.
-@@ -41,42 +44,43 @@
- * for at least the digits, radix character and letters.
+@@ -38,45 +60,48 @@ __FBSDID("$FreeBSD: src/lib/libc/locale/
+ * have to duplicate the code of strtod() here, we convert the supplied
+ * wide character string to multibyte and call strtod() on the result.
+ * This assumes that the multibyte encoding is compatible with ASCII
+- * for at least the digits, radix character and letters.
++ * for at least the digits and letters. The radix character can be more
++ * than one byte.
*/
++
double
-wcstod(const wchar_t * __restrict nptr, wchar_t ** __restrict endptr)
+wcstod_l(const wchar_t * __restrict nptr, wchar_t ** __restrict endptr,
+ char mb[MB_CUR_MAX + 1];
+ const wchar_t *nptr0 = nptr;
+ const wchar_t *first;
-
-- while (iswspace(*nptr))
++
+ NORMALIZE_LOCALE(loc);
+ ctype = __numeric_ctype(loc);
-+
+
+- while (iswspace(*nptr))
+ while (iswspace_l(*nptr, ctype))
nptr++;
/*
* We only know where the number ended in the _multibyte_
-@@ -86,9 +90,15 @@
+@@ -86,9 +111,15 @@ wcstod(const wchar_t * __restrict nptr,
*/
if (endptr != NULL)
/* XXX Assume each wide char is one byte. */
- *endptr = (wchar_t *)nptr + (end - buf);
-+ *endptr = (end == buf) ? (wchar_t *)nptr0 : ((wchar_t *)first + (end - buf));
++ *endptr = (end == buf) ? (wchar_t *)nptr0 : ((wchar_t *)first + __wcs_end_offset(buf, end, loc));
- free(buf);
+ _simple_sfree(b);
---- wcstof.c.orig 2007-03-16 01:15:20.000000000 -0700
-+++ wcstof.c 2007-03-16 03:04:01.000000000 -0700
-@@ -27,44 +27,64 @@
+--- wcstof.c.orig 2008-10-09 11:50:52.000000000 -0700
++++ wcstof.c 2008-10-29 00:51:43.000000000 -0700
+@@ -27,44 +27,67 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/locale/wcstof.c,v 1.3 2004/04/07 09:47:56 tjr Exp $");
/*
* See wcstod() for comments as to the logic used.
*/
++
++extern size_t __wcs_end_offset(const char * __restrict buf, const char * __restrict end, locale_t loc);
++
float
-wcstof(const wchar_t * __restrict nptr, wchar_t ** __restrict endptr)
+wcstof_l(const wchar_t * __restrict nptr, wchar_t ** __restrict endptr,
if (endptr != NULL)
- *endptr = (wchar_t *)nptr + (end - buf);
-+ *endptr = (end == buf) ? (wchar_t *)nptr0 : ((wchar_t *)first + (end - buf));
++ *endptr = (end == buf) ? (wchar_t *)nptr0 : ((wchar_t *)first + __wcs_end_offset(buf, end, loc));
- free(buf);
+ _simple_sfree(b);
---- wcstold.c.orig 2007-03-16 01:15:20.000000000 -0700
-+++ wcstold.c 2007-03-16 03:04:39.000000000 -0700
-@@ -27,44 +27,64 @@
+--- wcstold.c.orig 2008-10-09 11:50:53.000000000 -0700
++++ wcstold.c 2008-10-29 00:51:34.000000000 -0700
+@@ -27,44 +27,67 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/locale/wcstold.c,v 1.4 2004/04/07 09:47:56 tjr Exp $");
/*
* See wcstod() for comments as to the logic used.
*/
++
++extern size_t __wcs_end_offset(const char * __restrict buf, const char * __restrict end, locale_t loc);
++
long double
-wcstold(const wchar_t * __restrict nptr, wchar_t ** __restrict endptr)
+wcstold_l(const wchar_t * __restrict nptr, wchar_t ** __restrict endptr,
if (endptr != NULL)
- *endptr = (wchar_t *)nptr + (end - buf);
-+ *endptr = (end == buf) ? (wchar_t *)nptr0 : ((wchar_t *)first + (end - buf));
++ *endptr = (end == buf) ? (wchar_t *)nptr0 : ((wchar_t *)first + __wcs_end_offset(buf, end, loc));
- free(buf);
+ _simple_sfree(b);
.for _cwd in ${CWD}
AUTOPATCHSRCS+= ${_cwd}/utf2-fbsd.c
${_cwd}/utf2-fbsd.c: ${_cwd}/FreeBSD/utf8.c
- cp ${.ALLSRC} ${.TARGET}
- patch ${.TARGET} ${.ALLSRC:S/utf8/utf2/}.patch
+ ${CP} ${.ALLSRC} ${.TARGET}
+ ${PATCH} ${.TARGET} ${.ALLSRC:S/utf8/utf2/}.patch
.endfor # _cwd
.else # !autopatch
LEGACYSRCS += wcsftime.c
+CFLAGS-collate-fbsd.c += -D_DARWIN_UNLIMITED_STREAMS
+CFLAGS-setrunelocale-fbsd.c += -D_DARWIN_UNLIMITED_STREAMS
+
# set the LIBC_ALIAS_* macros so we can decorate the symbol independent
# of other macro settings
CFLAGS-wcsftime-fbsd.c += -DLIBC_ALIAS_WCSFTIME -DLIBC_ALIAS_WCSFTIME_L
+# For LP64, we need to create rune32.h
+# This happens at installsrc time, not build time, so the compiler we need
+# is HOSTCC.
.ifmake autopatch
-# for LP64, we need to create rune32.h
-# the following is good enough for ppc, ppc64, i386 and x86_64
# This .for statement forces evaluation of ${CWD}
.for _cwd in ${CWD}
-_ARCH != arch
+# The following is good enough for ppc, ppc64, i386 and x86_64
+_ARCH != ${ARCH}
.if $(_ARCH) == x86_64
ARCH32 = i386
.else
ARCH32 = $(_ARCH:C/64$//)
.endif
${_cwd}/rune32.h: ${_cwd}/rune-fbsd.c
- ${CC} -arch ${ARCH32} -D_LIBC_NO_FEATURE_VERIFICATION -I${.CURDIR}/include -DRUNEOFF32 -o ${_cwd}/rune32 ${.ALLSRC}
+ ${HOSTCC} -arch ${ARCH32} -D_LIBC_NO_FEATURE_VERIFICATION -I${.CURDIR}/include -DRUNEOFF32 -o ${_cwd}/rune32 ${.ALLSRC}
${_cwd}/rune32 > ${.TARGET}
- rm -f ${_cwd}/rune32
+ ${RM} ${_cwd}/rune32
AUTOPATCHHDRS+= ${_cwd}/rune32.h
.endfor # _cwd
num_lines = split_lines(p, plim);
if (num_lines >= locale_buf_size_max)
num_lines = locale_buf_size_max;
- else if (num_lines >= locale_buf_size_min)
- num_lines = locale_buf_size_min;
- else {
+ else if (num_lines < locale_buf_size_min) {
errno = EFTYPE;
goto bad_lbuf;
}
struct lconv *
localeconv_l(locale_t loc)
{
- struct __xlocale_st_localeconv *lc;
+ struct lconv *lc;
NORMALIZE_LOCALE(loc);
- if (loc->__lc_localeconv && !loc->__mlocale_changed && !loc->__nlocale_changed)
- return &loc->__lc_localeconv->__ret;
-
- lc = (struct __xlocale_st_localeconv *)malloc(sizeof(struct __xlocale_st_localeconv));
- lc->__refcount = 1;
- lc->__free_extra = NULL;
- if (loc->__lc_localeconv)
- lc->__ret = loc->__lc_localeconv->__ret;
- else {
- loc->__mlocale_changed = 1;
- loc->__nlocale_changed = 1;
- }
if (loc->__mlocale_changed) {
+ XL_LOCK(loc);
+ if (loc->__mlocale_changed) {
/* LC_MONETARY part */
struct lc_monetary_T * mptr;
+ struct lconv *lc = &loc->__lc_localeconv;
-#define M_ASSIGN_STR(NAME) (lc->__ret.NAME = (char*)mptr->NAME)
-#define M_ASSIGN_CHAR(NAME) (lc->__ret.NAME = mptr->NAME[0])
+#define M_ASSIGN_STR(NAME) (lc->NAME = (char*)mptr->NAME)
+#define M_ASSIGN_CHAR(NAME) (lc->NAME = mptr->NAME[0])
mptr = __get_current_monetary_locale(loc);
M_ASSIGN_STR(int_curr_symbol);
M_ASSIGN_CHAR(int_p_sign_posn);
M_ASSIGN_CHAR(int_n_sign_posn);
loc->__mlocale_changed = 0;
+ }
+ XL_UNLOCK(loc);
}
if (loc->__nlocale_changed) {
+ XL_LOCK(loc);
+ if (loc->__nlocale_changed) {
/* LC_NUMERIC part */
struct lc_numeric_T * nptr;
+ struct lconv *lc = &loc->__lc_localeconv;
-#define N_ASSIGN_STR(NAME) (lc->__ret.NAME = (char*)nptr->NAME)
+#define N_ASSIGN_STR(NAME) (lc->NAME = (char*)nptr->NAME)
nptr = __get_current_numeric_locale(loc);
N_ASSIGN_STR(decimal_point);
N_ASSIGN_STR(thousands_sep);
N_ASSIGN_STR(grouping);
loc->__nlocale_changed = 0;
+ }
+ XL_UNLOCK(loc);
}
- XL_RELEASE(loc->__lc_localeconv);
- loc->__lc_localeconv = lc;
-
- return (&lc->__ret);
+ return &loc->__lc_localeconv;
}
/*
else if (strcmp(s, "C") == 0 ||
strcmp(s, "POSIX") == 0)
ret = "US-ASCII";
+ else if (strcmp(s, "UTF-8") == 0)
+ ret = "UTF-8";
}
break;
case D_T_FMT:
static char *loadlocale(int);
__private_extern__ const char *__get_locale_env(int);
+#define UNLOCK_AND_RETURN(x) {XL_UNLOCK(&__global_locale); return (x);}
+
char *
setlocale(category, locale)
int category;
return (category != LC_ALL ?
current_categories[category] : currentlocale());
+ XL_LOCK(&__global_locale);
/*
* Default to the current locale for everything.
*/
env = __get_locale_env(i);
if (strlen(env) > ENCODING_LEN) {
errno = EINVAL;
- return (NULL);
+ UNLOCK_AND_RETURN (NULL);
}
(void)strcpy(new_categories[i], env);
}
env = __get_locale_env(category);
if (strlen(env) > ENCODING_LEN) {
errno = EINVAL;
- return (NULL);
+ UNLOCK_AND_RETURN (NULL);
}
(void)strcpy(new_categories[category], env);
}
} else if (category != LC_ALL) {
if (strlen(locale) > ENCODING_LEN) {
errno = EINVAL;
- return (NULL);
+ UNLOCK_AND_RETURN (NULL);
}
(void)strcpy(new_categories[category], locale);
} else {
if ((r = strchr(locale, '/')) == NULL) {
if (strlen(locale) > ENCODING_LEN) {
errno = EINVAL;
- return (NULL);
+ UNLOCK_AND_RETURN (NULL);
}
for (i = 1; i < _LC_LAST; ++i)
(void)strcpy(new_categories[i], locale);
;
if (!r[1]) {
errno = EINVAL;
- return (NULL); /* Hmm, just slashes... */
+ UNLOCK_AND_RETURN (NULL); /* Hmm, just slashes... */
}
do {
if (i == _LC_LAST)
break; /* Too many slashes... */
if ((len = r - locale) > ENCODING_LEN) {
errno = EINVAL;
- return (NULL);
+ UNLOCK_AND_RETURN (NULL);
}
(void)strlcpy(new_categories[i], locale,
len + 1);
}
if (category != LC_ALL)
- return (loadlocale(category));
+ UNLOCK_AND_RETURN (loadlocale(category));
save__numeric_fp_cvt = __global_locale.__numeric_fp_cvt;
save__lc_numeric_loc = __global_locale.__lc_numeric_loc;
__global_locale.__lc_numeric_loc = save__lc_numeric_loc;
XL_RELEASE(save__lc_numeric_loc);
errno = saverr;
- return (NULL);
+ UNLOCK_AND_RETURN (NULL);
}
}
XL_RELEASE(save__lc_numeric_loc);
- return (currentlocale());
+ UNLOCK_AND_RETURN (currentlocale());
}
static char *
extern int _UTF2_init(struct __xlocale_st_runelocale *); /* deprecated */
extern struct __xlocale_st_runelocale *_Read_RuneMagi(FILE *);
-extern void spin_lock(int *);
-extern void spin_unlock(int *);
-
#ifdef LEGACY_RUNE_APIS
/* depreciated interfaces */
rune_t sgetrune(const char *, size_t, char const **);
int saverr, ret;
static struct __xlocale_st_runelocale *CachedRuneLocale;
extern int __mb_cur_max;
- static int cache_lock = 0;
+ static pthread_lock_t cache_lock = LOCK_INITIALIZER;
/*
* The "C" and "POSIX" locale are always here.
/*
* If the locale name is the same as our cache, use the cache.
*/
- spin_lock(&cache_lock);
+ LOCK(cache_lock);
if (CachedRuneLocale != NULL &&
strcmp(encoding, CachedRuneLocale->__ctype_encoding) == 0) {
XL_RELEASE(loc->__lc_ctype);
_CurrentRuneLocale = &loc->__lc_ctype->_CurrentRuneLocale;
__mb_cur_max = loc->__lc_ctype->__mb_cur_max;
}
- spin_unlock(&cache_lock);
+ UNLOCK(cache_lock);
return (0);
}
- spin_unlock(&cache_lock);
+ UNLOCK(cache_lock);
/*
* Slurp the locale file into the cache.
_CurrentRuneLocale = &loc->__lc_ctype->_CurrentRuneLocale;
__mb_cur_max = loc->__lc_ctype->__mb_cur_max;
}
- spin_lock(&cache_lock);
+ LOCK(cache_lock);
XL_RELEASE(CachedRuneLocale);
CachedRuneLocale = xrl;
XL_RETAIN(CachedRuneLocale);
- spin_unlock(&cache_lock);
+ UNLOCK(cache_lock);
} else
XL_RELEASE(xrl);
int
setrunelocale(const char *encoding)
{
- return __setrunelocale(encoding, &__global_locale);
+ int ret;
+
+ XL_LOCK(&__global_locale);
+ ret = __setrunelocale(encoding, &__global_locale);
+ XL_UNLOCK(&__global_locale);
+ return ret;
}
#endif /* LEGACY_RUNE_APIS */
#include <wctype.h>
#include <_simple.h>
+/*
+ * __wcs_end_offset calculates the offset to the end within the wide character
+ * string, assuming numbers and letters are single bytes in multibyte
+ * representation, get the actual decimal string for localeconv_l. If the
+ * decimal point was within the string, compensate for the fact that the
+ * (possible more than one byte) decimal point one takes one wide character.
+ */
+__private_extern__ size_t
+__wcs_end_offset(const char * __restrict buf, const char * __restrict end, locale_t loc)
+{
+ const char *decimalpoint = localeconv_l(loc)->decimal_point;
+ size_t n = end - buf;
+ char *p;
+
+ if ((p = strnstr(buf, decimalpoint, n)) != NULL)
+ n -= strlen(decimalpoint) - 1;
+ return n;
+}
+
/*
* Convert a string to a double-precision number.
*
* have to duplicate the code of strtod() here, we convert the supplied
* wide character string to multibyte and call strtod() on the result.
* This assumes that the multibyte encoding is compatible with ASCII
- * for at least the digits, radix character and letters.
+ * for at least the digits and letters. The radix character can be more
+ * than one byte.
*/
+
double
wcstod_l(const wchar_t * __restrict nptr, wchar_t ** __restrict endptr,
locale_t loc)
*/
if (endptr != NULL)
/* XXX Assume each wide char is one byte. */
- *endptr = (end == buf) ? (wchar_t *)nptr0 : ((wchar_t *)first + (end - buf));
+ *endptr = (end == buf) ? (wchar_t *)nptr0 : ((wchar_t *)first + __wcs_end_offset(buf, end, loc));
_simple_sfree(b);
/*
* See wcstod() for comments as to the logic used.
*/
+
+extern size_t __wcs_end_offset(const char * __restrict buf, const char * __restrict end, locale_t loc);
+
float
wcstof_l(const wchar_t * __restrict nptr, wchar_t ** __restrict endptr,
locale_t loc)
val = strtof_l(buf, &end, loc);
if (endptr != NULL)
- *endptr = (end == buf) ? (wchar_t *)nptr0 : ((wchar_t *)first + (end - buf));
+ *endptr = (end == buf) ? (wchar_t *)nptr0 : ((wchar_t *)first + __wcs_end_offset(buf, end, loc));
_simple_sfree(b);
/*
* See wcstod() for comments as to the logic used.
*/
+
+extern size_t __wcs_end_offset(const char * __restrict buf, const char * __restrict end, locale_t loc);
+
long double
wcstold_l(const wchar_t * __restrict nptr, wchar_t ** __restrict endptr,
locale_t loc)
val = strtold_l(buf, &end, loc);
if (endptr != NULL)
- *endptr = (end == buf) ? (wchar_t *)nptr0 : ((wchar_t *)first + (end - buf));
+ *endptr = (end == buf) ? (wchar_t *)nptr0 : ((wchar_t *)first + __wcs_end_offset(buf, end, loc));
_simple_sfree(b);
/*
- * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2005, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
0, NULL, \
{}, {}, {}, {}, {}, \
{}, {}, {}, {}, {}, \
+ LOCK_INITIALIZER, \
XMAGIC, \
1, 0, 0, 0, 0, 0, 1, 1, 0, \
NULL, \
}
/*
- * Make a copy of a locale_t, locking/unlocking the source as determined
- * by the lock flag. A NULL locale_t means to make a copy of the current
+ * Make a copy of a locale_t, locking/unlocking the source.
+ * A NULL locale_t means to make a copy of the current
* locale while LC_GLOBAL_LOCALE means to copy the global locale. If
* &__c_locale is passed (meaning a C locale is desired), just make
* a copy.
return NULL;
new->__refcount = 1;
new->__free_extra = (__free_extra_t)_releaselocale;
+ new->__lock = LOCK_INITIALIZER;
if (loc == NULL)
loc = __current_locale();
else if (loc == LC_GLOBAL_LOCALE)
*new = __c_locale;
return new;
}
+ XL_LOCK(loc);
_copylocale(new, loc);
+ XL_UNLOCK(loc);
/* __mbs_mblen is the first of NMBSTATET mbstate_t buffers */
bzero(&new->__mbs_mblen, offsetof(struct _xlocale, __magic)
- offsetof(struct _xlocale, __mbs_mblen));
XL_RETAIN(new->__lc_numeric_loc);
/* time */
XL_RETAIN(new->__lc_time);
- /* newale_t */
- XL_RETAIN(new->__lc_localeconv);
return new;
}
XL_RELEASE(loc->__lc_numeric_loc);
/* time */
XL_RELEASE(loc->__lc_time);
- /* locale_t */
- XL_RELEASE(loc->__lc_localeconv);
}
/*
querylocale(int mask, locale_t loc)
{
int m;
+ const char *ret;
if (_checklocale(loc) < 0 || (mask & LC_ALL_MASK) == 0) {
errno = EINVAL;
loc = __current_locale();
else if (loc == LC_GLOBAL_LOCALE)
loc = &__global_locale;
- for(m = 1; m <= _LC_LAST_MASK; m <<= 1) {
- if (m & mask) {
- switch(m) {
- case LC_COLLATE_MASK:
- return (loc->__collate_load_error ? C : loc->__lc_collate->__encoding);
- case LC_CTYPE_MASK:
- return loc->__lc_ctype->__ctype_encoding;
- case LC_MESSAGES_MASK:
- return (loc->_messages_using_locale ? loc->__lc_messages->_messages_locale_buf : C);
- case LC_MONETARY_MASK:
- return (loc->_monetary_using_locale ? loc->__lc_monetary->_monetary_locale_buf : C);
- case LC_NUMERIC_MASK:
- return (loc->_numeric_using_locale ? loc->__lc_numeric->_numeric_locale_buf : C);
- case LC_TIME_MASK:
- return (loc->_time_using_locale ? loc->__lc_time->_time_locale_buf : C);
- }
- }
+ m = ffs(mask);
+ if (m == 0 || m > _LC_NUM_MASK) {
+ errno = EINVAL;
+ return NULL;
+ }
+ XL_LOCK(loc);
+ switch(1 << (m - 1)) {
+ case LC_COLLATE_MASK:
+ ret = (loc->__collate_load_error ? C : loc->__lc_collate->__encoding);
+ break;
+ case LC_CTYPE_MASK:
+ ret = loc->__lc_ctype->__ctype_encoding;
+ break;
+ case LC_MESSAGES_MASK:
+ ret = (loc->_messages_using_locale ? loc->__lc_messages->_messages_locale_buf : C);
+ break;
+ case LC_MONETARY_MASK:
+ ret = (loc->_monetary_using_locale ? loc->__lc_monetary->_monetary_locale_buf : C);
+ break;
+ case LC_NUMERIC_MASK:
+ ret = (loc->_numeric_using_locale ? loc->__lc_numeric->_numeric_locale_buf : C);
+ break;
+ case LC_TIME_MASK:
+ ret = (loc->_time_using_locale ? loc->__lc_time->_time_locale_buf : C);
+ break;
+ default:
+ /* should never get here */
+ XL_UNLOCK(loc);
+ errno = EINVAL;
+ return NULL;
}
- /* should never get here */
- errno = EINVAL;
- return NULL;
+ XL_UNLOCK(loc);
+ return ret;
}
/*
return __locale_ptr(loc)->__lc_ctype->__mb_cur_max;
}
+static void
+__xlocale_release(void *loc)
+{
+ XL_RELEASE((locale_t)loc);
+}
+
/*
* Called from the Libc initializer to setup the thread-specific key.
*/
* part for libSystem. The libSystem part starts at __pthread_tsd_first = 10.
* dyld will set this value to 1.
*/
-extern int __pthread_tsd_first;
__private_extern__ void
__xlocale_init(void)
{
- if (__locale_key == (pthread_key_t)-1)
- __locale_key = __pthread_tsd_first;
+ if (__locale_key == (pthread_key_t)-1) {
+ __locale_key = __LIBC_PTHREAD_KEY_XLOCALE;
+ pthread_key_init_np(__locale_key, __xlocale_release);
+ }
}
/*
- * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2005, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <locale.h>
#include <libkern/OSAtomic.h>
#include <pthread.h>
+#include <pthread_spinlock.h>
#include <limits.h>
#include "setlocale.h"
#include "collate.h"
struct lc_time_T _time_locale;
};
-struct __xlocale_st_localeconv {
- __STRUCT_COMMON
- struct lconv __ret;
-};
-
/* the extended locale structure */
/* values for __numeric_fp_cvt */
#define LC_NUMERIC_FP_UNINITIALIZED 0
__darwin_mbstate_t __mbs_wcsnrtombs;
__darwin_mbstate_t __mbs_wcsrtombs;
__darwin_mbstate_t __mbs_wctomb;
+ pthread_lock_t __lock;
/* magic (Here up to the end is copied when duplicating locale_t's) */
int64_t __magic;
/* flags */
/* time */
struct __xlocale_st_time *__lc_time;
/* localeconv */
- struct __xlocale_st_localeconv *__lc_localeconv;
+ struct lconv __lc_localeconv;
};
#define NORMALIZE_LOCALE(x) if ((x) == NULL) { \
(x) = &__global_locale; \
}
-#define XL_RELEASE(x) if ((x) && (x)->__free_extra != XPERMANENT && OSAtomicDecrement32Barrier(&(x)->__refcount) <= 0) { \
+#define XL_LOCK(x) LOCK((x)->__lock);
+#define XL_RELEASE(x) if ((x) && (x)->__free_extra != XPERMANENT && OSAtomicDecrement32Barrier(&(x)->__refcount) == 0) { \
if ((x)->__free_extra) \
(*(x)->__free_extra)((x)); \
free((x)); \
}
#define XL_RETAIN(x) if ((x) && (x)->__free_extra != XPERMANENT) { OSAtomicIncrement32Barrier(&(x)->__refcount); }
+#define XL_UNLOCK(x) UNLOCK((x)->__lock);
__private_extern__ struct __xlocale_st_runelocale _DefaultRuneXLocale;
__private_extern__ struct _xlocale __global_locale;
static inline __attribute__((always_inline)) locale_t
__locale_ptr(locale_t __loc)
{
- return (__loc == LC_GLOBAL_LOCALE ? &__global_locale : __loc);
+ NORMALIZE_LOCALE(__loc);
+ return __loc;
}
__END_DECLS
--- /dev/null
+.\" Copyright (c) 1990, 1991, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software contributed to Berkeley by
+.\" the American National Standards Committee X3, on Information
+.\" Processing Systems.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. All advertising materials mentioning features or use of this software
+.\" must display the following acknowledgement:
+.\" This product includes software developed by the University of
+.\" California, Berkeley and its contributors.
+.\" 4. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" @(#)stdarg.3 8.1 (Berkeley) 6/5/93
+.\" $FreeBSD: src/share/man/man3/stdarg.3,v 1.15 2005/01/21 08:36:36 ru Exp $
+.\"
+.Dd October 25, 2002
+.Dt STDARG 3
+.Os
+.Sh NAME
+.Nm stdarg
+.Nd variable argument lists
+.Sh SYNOPSIS
+.In stdarg.h
+.Ft void
+.Fn va_start "va_list ap" last
+.Ft type
+.Fn va_arg "va_list ap" type
+.Ft void
+.Fn va_copy "va_list dest" "va_list src"
+.Ft void
+.Fn va_end "va_list ap"
+.Sh DESCRIPTION
+A function may be called with a varying number of arguments of varying
+types.
+The include file
+.In stdarg.h
+declares a type
+.Pq Em va_list
+and defines three macros for stepping
+through a list of arguments whose number and types are not known to
+the called function.
+.Pp
+The called function must declare an object of type
+.Em va_list
+which is used by the macros
+.Fn va_start ,
+.Fn va_arg ,
+.Fn va_copy ,
+and
+.Fn va_end .
+.Pp
+The
+.Fn va_start
+macro initializes
+.Fa ap
+for subsequent use by
+.Fn va_arg
+and
+.Fn va_end ,
+and must be called first.
+.Pp
+The parameter
+.Fa last
+is the name of the last parameter before the variable argument list,
+i.e., the last parameter of which the calling function knows the type.
+.Pp
+Because the address of this parameter is used in the
+.Fn va_start
+macro, it should not be declared as a register variable, or as a
+function or an array type.
+.Pp
+The
+.Fn va_start
+macro returns no value.
+.Pp
+The
+.Fn va_arg
+macro expands to an expression that has the type and value of the next
+argument in the call.
+The parameter
+.Fa ap
+is the
+.Em va_list Fa ap
+initialized by
+.Fn va_start .
+Each call to
+.Fn va_arg
+modifies
+.Fa ap
+so that the next call returns the next argument.
+The parameter
+.Fa type
+is a type name specified so that the type of a pointer to an
+object that has the specified type can be obtained simply by
+adding a *
+to
+.Fa type .
+.Pp
+If there is no next argument, or if
+.Fa type
+is not compatible with the type of the actual next argument
+(as promoted according to the default argument promotions),
+random errors will occur.
+.Pp
+The first use of the
+.Fn va_arg
+macro after that of the
+.Fn va_start
+macro returns the argument after
+.Fa last .
+Successive invocations return the values of the remaining
+arguments.
+.Pp
+The
+.Fn va_copy
+macro copies a variable argument list, previously initialized by
+.Fn va_start ,
+from
+.Fa src
+to
+.Fa dest .
+The state is preserved such that it is equivalent to calling
+.Fn va_start
+with the same second argument used with
+.Fa src ,
+and calling
+.Fn va_arg
+the same number of times as called with
+.Fa src .
+.Pp
+The
+.Fn va_copy
+macro returns no value.
+.Pp
+The
+.Fn va_end
+macro handles a normal return from the function whose variable argument
+list was initialized by
+.Fn va_start .
+.Pp
+The
+.Fn va_end
+macro returns no value.
+.Sh EXAMPLES
+The function
+.Em foo
+takes a string of format characters and prints out the argument
+associated with each format character based on the type.
+.Bd -literal -offset indent
+void foo(char *fmt, ...)
+{
+ va_list ap;
+ int d;
+ char c, *s;
+
+ va_start(ap, fmt);
+ while (*fmt)
+ switch(*fmt++) {
+ case 's': /* string */
+ s = va_arg(ap, char *);
+ printf("string %s\en", s);
+ break;
+ case 'd': /* int */
+ d = va_arg(ap, int);
+ printf("int %d\en", d);
+ break;
+ case 'c': /* char */
+ /* Note: char is promoted to int. */
+ c = va_arg(ap, int);
+ printf("char %c\en", c);
+ break;
+ }
+ va_end(ap);
+}
+.Ed
+.Sh COMPATIBILITY
+These macros are
+.Em not
+compatible with the historic macros they replace.
+A backward compatible version can be found in the include
+file
+.In varargs.h .
+.Sh STANDARDS
+The
+.Fn va_start ,
+.Fn va_arg ,
+.Fn va_copy ,
+and
+.Fn va_end
+macros conform to
+.St -isoC-99 .
+.Sh BUGS
+Unlike the
+.Em varargs
+macros, the
+.Nm
+macros do not permit programmers to
+code a function with no fixed arguments.
+This problem generates work mainly when converting
+.Em varargs
+code to
+.Nm
+code,
+but it also creates difficulties for variadic functions that
+wish to pass all of their arguments on to a function
+that takes a
+.Em va_list
+argument, such as
+.Xr vfprintf 3 .
--- /dev/null
+--- stdarg.3.orig 2008-07-30 02:46:51.000000000 -0700
++++ stdarg.3 2008-07-30 04:06:35.000000000 -0700
+@@ -74,13 +74,21 @@
+ .Pp
+ The
+ .Fn va_start
+-macro initializes
+-.Fa ap
+-for subsequent use by
++macro must be called first, and it initializes
++.Fa ap ,
++which can be passed to
+ .Fn va_arg
+-and
++for each argument to be processed.
++Calling
++.Fn va_end
++signals that there are no further arguments, and causes
++.Fa ap
++to be invalidated.
++Note that each call to
++.Fn va_start
++must be matched by a call to
+ .Fn va_end ,
+-and must be called first.
++from within the same function.
+ .Pp
+ The parameter
+ .Fa last
+@@ -93,10 +101,6 @@
+ function or an array type.
+ .Pp
+ The
+-.Fn va_start
+-macro returns no value.
+-.Pp
+-The
+ .Fn va_arg
+ macro expands to an expression that has the type and value of the next
+ argument in the call.
+@@ -136,34 +140,38 @@
+ .Pp
+ The
+ .Fn va_copy
+-macro copies a variable argument list, previously initialized by
++macro copies the state of the variable argument list,
++.Fa src ,
++previously initialized by
+ .Fn va_start ,
+-from
+-.Fa src
+-to
+-.Fa dest .
+-The state is preserved such that it is equivalent to calling
++to the variable argument list,
++.Fa dest ,
++which must not have been previously initialized by
++.Fn va_start ,
++without an intervening call to
++.Fn va_end .
++The state preserved in
++.Fa dest
++is equivalent to calling
+ .Fn va_start
+-with the same second argument used with
+-.Fa src ,
+-and calling
++and
+ .Fn va_arg
+-the same number of times as called with
++on
++.Fa dest
++in the same way as was used on
+ .Fa src .
+-.Pp
+-The
+-.Fn va_copy
+-macro returns no value.
+-.Pp
+-The
++The copied variable argument list can subsequently be passed to
++.Fn va_arg ,
++and must finally be passed to
+ .Fn va_end
+-macro handles a normal return from the function whose variable argument
+-list was initialized by
+-.Fn va_start .
++when through with it.
+ .Pp
+-The
+-.Fn va_end
+-macro returns no value.
++After a variable argument list is invalidated by
++.Fn va_end ,
++it can be reinitialized with
++.Fn va_start
++or made a copy of another variable argument list with
++.Fn va_copy .
+ .Sh EXAMPLES
+ The function
+ .Em foo
+@@ -172,11 +180,12 @@
+ .Bd -literal -offset indent
+ void foo(char *fmt, ...)
+ {
+- va_list ap;
++ va_list ap, ap2;
+ int d;
+ char c, *s;
+
+ va_start(ap, fmt);
++ va_copy(ap2, ap);
+ while (*fmt)
+ switch(*fmt++) {
+ case 's': /* string */
+@@ -194,6 +203,10 @@
+ break;
+ }
+ va_end(ap);
++ ...
++ /* use ap2 to iterate over the arguments again */
++ ...
++ va_end(ap2);
+ }
+ .Ed
+ .Sh COMPATIBILITY
# miscellaneous man pages
.PATH: ${.CURDIR}/man
+CWD := ${.CURDIR}/man
.if ${LIB} == "c"
-MAN3 += assert.3 bitstring.3 stdarg.3
+MAN2 += gethostuuid.2
+MAN3 += assert.3 bitstring.3
.ifdef FEATURE_LEGACY_UTMP_APIS
MAN5 += utmp.5
-MLINKS += utmp.5 lastlog.5
-MLINKS += utmp.5 wtmp.5
.endif
MAN7 += environ.7
+.include "Makefile.fbsd_begin"
+FBSDMAN3 = stdarg.3
+.include "Makefile.fbsd_end"
+
+
MLINKS += bitstring.3 bit_alloc.3
MLINKS += bitstring.3 bit_clear.3
MLINKS += bitstring.3 bit_decl.3
MLINKS += bitstring.3 bit_set.3
MLINKS += bitstring.3 bit_test.3
MLINKS += bitstring.3 bitstr_size.3
+.ifdef FEATURE_LEGACY_UTMP_APIS
+MLINKS += utmp.5 lastlog.5
+MLINKS += utmp.5 wtmp.5
+.endif
+
+MLINKS += stdarg.3 va_arg.3
+MLINKS += stdarg.3 va_copy.3
+MLINKS += stdarg.3 va_end.3
+MLINKS += stdarg.3 va_start.3
.endif
.Fd #include <bitstring.h>
.Ft bitstr_t *
.Fn bit_alloc "int nbits"
-.Fn bit_decl "bit_str name" "int nbits"
-.Fn bit_clear "bit_str name" "int bit"
-.Fn bit_ffc "bit_str name" "int nbits" "int *value"
-.Fn bit_ffs "bit_str name" "int nbits" "int *value"
-.Fn bit_nclear "bit_str name" "int start" "int stop"
-.Fn bit_nset "bit_str name" "int start" "int stop"
-.Fn bit_set "bit_str name" "int bit"
+.Fn bit_decl "bitstr_t *name" "int nbits"
+.Fn bit_clear "bitstr_t *name" "int bit"
+.Fn bit_ffc "bitstr_t *name" "int nbits" "int *value"
+.Fn bit_ffs "bitstr_t *name" "int nbits" "int *value"
+.Fn bit_nclear "bitstr_t *name" "int start" "int stop"
+.Fn bit_nset "bitstr_t *name" "int start" "int stop"
+.Fn bit_set "bitstr_t *name" "int bit"
+.Ft int
.Fn bitstr_size "int nbits"
-.Fn bit_test "bit_str name" "int bit"
+.Ft int
+.Fn bit_test "bitstr_t *name" "int bit"
.Sh DESCRIPTION
These macros operate on strings of bits.
.Pp
.Fa value
is set to \-1.
.Pp
-The arguments to these macros are evaluated only once and may safely
+The macros
+.Fn bit_clear ,
+.Fn bit_set
+and
+.Fn bit_test
+will evaluate the
+.Fa bit
+argument more than once, so avoid using pre- or post-, increment or decrement.
+The arguments to the other macros are evaluated only once and may safely
have side effects.
.Sh EXAMPLE
.Bd -literal -offset indent
--- /dev/null
+.Dd Nov 5, 2008
+.Dt GETHOSTUUID \&2 "Mac OS X System Calls Manual"
+.Os "Mac OS X"
+.Sh NAME
+.Nm gethostuuid
+.Nd return a unique identifier for the current machine
+.Sh SYNOPSIS
+.In unistd.h
+.Ft int
+.Fo gethostuuid
+.Fa "uuid_t id"
+.Fa "const struct timespec *wait"
+.Fc
+.Sh DESCRIPTION
+The
+.Fn gethostuuid
+function returns a 16-byte
+.Ft uuid_t
+specified by
+.Fa id ,
+that uniquely identifies the current machine.
+Be aware that the hardware identifiers that
+.Fn gethostuuid
+uses to generate the UUID can themselves be modified.
+.Pp
+The
+.Fa wait
+argument is a pointer to a
+.Ft "struct timespec"
+that specifies the maximum time to wait for the result.
+Setting the
+.Fa tv_sec
+and
+.Fa tv_nsec
+fields to zero means to wait indefinitely until it completes.
+.Sh RETURN VALUES
+The
+.Fn gethostuuid
+function returns zero on success or -1 on error.
+.Sh ERRORS
+The
+.Fn gethostuuid
+functions fails if:
+.Bl -tag -width Er
+.It Bq Er EFAULT
+.Fa wait
+points to memory that is not a valid part of the process
+address space.
+.It Bq Er EWOULDBLOCK
+The
+.Fa wait
+timeout expired before the UUID could be obtained.
+.El
+.Sh SEE ALSO
+.Xr uuid 3
-.\" $NetBSD: stdarg.3,v 1.3 1994/11/30 15:24:37 jtc Exp $
-.\"
.\" Copyright (c) 1990, 1991, 1993
.\" The Regents of the University of California. All rights reserved.
.\"
.\" SUCH DAMAGE.
.\"
.\" @(#)stdarg.3 8.1 (Berkeley) 6/5/93
+.\" $FreeBSD: src/share/man/man3/stdarg.3,v 1.15 2005/01/21 08:36:36 ru Exp $
.\"
-.Dd June 5, 1993
+.Dd October 25, 2002
.Dt STDARG 3
.Os
.Sh NAME
.Nm stdarg
.Nd variable argument lists
.Sh SYNOPSIS
-.Fd #include <stdarg.h>
+.In stdarg.h
.Ft void
.Fn va_start "va_list ap" last
.Ft type
.Fn va_arg "va_list ap" type
.Ft void
+.Fn va_copy "va_list dest" "va_list src"
+.Ft void
.Fn va_end "va_list ap"
.Sh DESCRIPTION
A function may be called with a varying number of arguments of varying
types.
The include file
-.Aq Pa stdarg.h
+.In stdarg.h
declares a type
.Pq Em va_list
and defines three macros for stepping
which is used by the macros
.Fn va_start ,
.Fn va_arg ,
+.Fn va_copy ,
and
.Fn va_end .
.Pp
The
.Fn va_start
-macro initializes
-.Fa ap
-for subsequent use by
+macro must be called first, and it initializes
+.Fa ap ,
+which can be passed to
.Fn va_arg
-and
+for each argument to be processed.
+Calling
+.Fn va_end
+signals that there are no further arguments, and causes
+.Fa ap
+to be invalidated.
+Note that each call to
+.Fn va_start
+must be matched by a call to
.Fn va_end ,
-and must be called first.
+from within the same function.
.Pp
The parameter
.Fa last
is the name of the last parameter before the variable argument list,
-i.e. the last parameter of which the calling function knows the type.
+i.e., the last parameter of which the calling function knows the type.
.Pp
Because the address of this parameter is used in the
.Fn va_start
function or an array type.
.Pp
The
-.Fn va_start
-macro returns no value.
-.Pp
-The
.Fn va_arg
macro expands to an expression that has the type and value of the next
argument in the call.
The parameter
.Fa ap
-is the
+is the
.Em va_list Fa ap
initialized by
.Fn va_start .
-Each call to
+Each call to
.Fn va_arg
modifies
.Fa ap
The parameter
.Fa type
is a type name specified so that the type of a pointer to an
-object that has the specified type can be obtained simply by
+object that has the specified type can be obtained simply by
adding a *
to
.Fa type .
.Pp
The first use of the
.Fn va_arg
-macro after that of the
+macro after that of the
.Fn va_start
-macro returns the argument after
+macro returns the argument after
.Fa last .
Successive invocations return the values of the remaining
arguments.
.Pp
The
+.Fn va_copy
+macro copies the state of the variable argument list,
+.Fa src ,
+previously initialized by
+.Fn va_start ,
+to the variable argument list,
+.Fa dest ,
+which must not have been previously initialized by
+.Fn va_start ,
+without an intervening call to
+.Fn va_end .
+The state preserved in
+.Fa dest
+is equivalent to calling
+.Fn va_start
+and
+.Fn va_arg
+on
+.Fa dest
+in the same way as was used on
+.Fa src .
+The copied variable argument list can subsequently be passed to
+.Fn va_arg ,
+and must finally be passed to
.Fn va_end
-macro handles a normal return from the function whose variable argument
-list was initialized by
-.Fn va_start .
+when through with it.
.Pp
-The
-.Fn va_end
-macro returns no value.
+After a variable argument list is invalidated by
+.Fn va_end ,
+it can be reinitialized with
+.Fn va_start
+or made a copy of another variable argument list with
+.Fn va_copy .
.Sh EXAMPLES
The function
.Em foo
.Bd -literal -offset indent
void foo(char *fmt, ...)
{
- va_list ap;
+ va_list ap, ap2;
int d;
- char c, *p, *s;
+ char c, *s;
va_start(ap, fmt);
+ va_copy(ap2, ap);
while (*fmt)
switch(*fmt++) {
case 's': /* string */
printf("int %d\en", d);
break;
case 'c': /* char */
- c = va_arg(ap, char);
+ /* Note: char is promoted to int. */
+ c = va_arg(ap, int);
printf("char %c\en", c);
break;
}
va_end(ap);
+ ...
+ /* use ap2 to iterate over the arguments again */
+ ...
+ va_end(ap2);
}
.Ed
+.Sh COMPATIBILITY
+These macros are
+.Em not
+compatible with the historic macros they replace.
+A backward compatible version can be found in the include
+file
+.In varargs.h .
.Sh STANDARDS
The
.Fn va_start ,
.Fn va_arg ,
+.Fn va_copy ,
and
.Fn va_end
macros conform to
-.St -ansiC .
-.Sh COMPATIBILITY
-These macros are
-.Em not
-compatible with the historic macros they replace.
-A backward compatible version can be found in the include
-file
-.Aq Pa varargs.h .
+.St -isoC-99 .
.Sh BUGS
Unlike the
.Em varargs
macros, the
-.Nm stdarg
+.Nm
macros do not permit programmers to
code a function with no fixed arguments.
This problem generates work mainly when converting
.Em varargs
code to
-.Nm stdarg
+.Nm
code,
but it also creates difficulties for variadic functions that
wish to pass all of their arguments on to a function
+.Sh LEGACY SYNOPSIS
+.Fd #include <sys/types.h>
+.Fd #include <sys/socket.h>
-+.Fd #include <sys/netinet/in.h>
-+.Fd #include <sys/arpa/inet.h>
++.Fd #include <netinet/in.h>
++.Fd #include <arpa/inet.h>
+.Pp
+These include files are necessary for all functions.
.Sh SEE ALSO
-/* $KAME: inet_addr.c,v 1.5 2001/08/20 02:32:40 itojun Exp $ */
-
/*
- * ++Copyright++ 1983, 1990, 1993
- * -
* Copyright (c) 1983, 1990, 1993
* The Regents of the University of California. All rights reserved.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- * -
+ */
+
+/*
* Portions Copyright (c) 1993 by Digital Equipment Corporation.
*
* Permission to use, copy, modify, and distribute this software for any
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
- * -
- * --Copyright--
+ */
+
+/*
+ * Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
+ * Portions Copyright (c) 1996-1999 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static char sccsid[] = "@(#)inet_addr.c 8.1 (Berkeley) 6/17/93";
+static const char sccsid[] = "@(#)inet_addr.c 8.1 (Berkeley) 6/17/93";
+static const char rcsid[] = "$Id: inet_addr.c,v 1.4.18.1 2005/04/27 05:00:52 sra Exp $";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/inet_addr.c,v 1.16 2002/04/19 04:46:20 suz Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/inet/inet_addr.c,v 1.4 2007/06/03 17:20:26 ume Exp $");
+
+#include "port_before.h"
+#include <sys/types.h>
#include <sys/param.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <ctype.h>
-#include <errno.h>
-#include <string.h>
-#include <stdlib.h>
-/*
- * ASCII internet address interpretation routine.
+#include "port_after.h"
+
+/*%
+ * Ascii internet address interpretation routine.
* The value returned is in network order.
*/
in_addr_t /* XXX should be struct in_addr :( */
-inet_addr(cp)
- const char *cp;
-{
+inet_addr(const char *cp) {
struct in_addr val;
if (inet_aton(cp, &val))
return (INADDR_NONE);
}
-/*
- * Check whether "cp" is a valid ASCII representation
+/*%
+ * Check whether "cp" is a valid ascii representation
* of an Internet address and convert to a binary address.
* Returns 1 if the address is valid, 0 if not.
* This replaces inet_addr, the return value from which
* cannot distinguish between failure and a local broadcast address.
*/
int
-inet_aton(cp, addr)
- const char *cp;
- struct in_addr *addr;
-{
- u_long parts[4];
- in_addr_t val;
- char *c;
- char *endptr;
- int gotend, n;
-
- c = (char *)cp;
- n = 0;
- /*
- * Run through the string, grabbing numbers until
- * the end of the string, or some error
- */
- gotend = 0;
- while (!gotend) {
- errno = 0;
- val = strtoul(c, &endptr, 0);
-
- if (errno == ERANGE) /* Fail completely if it overflowed. */
- return (0);
-
- /*
- * If the whole string is invalid, endptr will equal
- * c.. this way we can make sure someone hasn't
- * gone '.12' or something which would get past
- * the next check.
+inet_aton(const char *cp, struct in_addr *addr) {
+ u_long val;
+ int base, n;
+ char c;
+ u_int8_t parts[4];
+ u_int8_t *pp = parts;
+ int digit;
+
+ c = *cp;
+ for (;;) {
+ /*
+ * Collect number up to ``.''.
+ * Values are specified as for C:
+ * 0x=hex, 0=octal, isdigit=decimal.
*/
- if (endptr == c)
+ if (!isdigit((unsigned char)c))
return (0);
- parts[n] = val;
- c = endptr;
-
- /* Check the next character past the previous number's end */
- switch (*c) {
- case '.' :
- /* Make sure we only do 3 dots .. */
- if (n == 3) /* Whoops. Quit. */
- return (0);
- n++;
- c++;
- break;
-
- case '\0':
- gotend = 1;
- break;
-
- default:
- if (isspace((unsigned char)*c)) {
- gotend = 1;
- break;
+ val = 0; base = 10; digit = 0;
+ if (c == '0') {
+ c = *++cp;
+ if (c == 'x' || c == 'X')
+ base = 16, c = *++cp;
+ else {
+ base = 8;
+ digit = 1 ;
+ }
+ }
+ for (;;) {
+ if (isascii(c) && isdigit((unsigned char)c)) {
+ if (base == 8 && (c == '8' || c == '9'))
+ return (0);
+ val = (val * base) + (c - '0');
+ c = *++cp;
+ digit = 1;
+ } else if (base == 16 && isascii(c) &&
+ isxdigit((unsigned char)c)) {
+ val = (val << 4) |
+ (c + 10 - (islower((unsigned char)c) ? 'a' : 'A'));
+ c = *++cp;
+ digit = 1;
} else
- return (0); /* Invalid character, so fail */
+ break;
}
-
+ if (c == '.') {
+ /*
+ * Internet format:
+ * a.b.c.d
+ * a.b.c (with c treated as 16 bits)
+ * a.b (with b treated as 24 bits)
+ */
+ if (pp >= parts + 3 || val > 0xffU)
+ return (0);
+ *pp++ = val;
+ c = *++cp;
+ } else
+ break;
}
-
+ /*
+ * Check for trailing characters.
+ */
+ if (c != '\0' && (!isascii(c) || !isspace((unsigned char)c)))
+ return (0);
+ /*
+ * Did we get a valid digit?
+ */
+ if (!digit)
+ return (0);
/*
* Concoct the address according to
* the number of parts specified.
*/
-
+ n = pp - parts + 1;
switch (n) {
- case 0: /* a -- 32 bits */
- /*
- * Nothing is necessary here. Overflow checking was
- * already done in strtoul().
- */
+ case 1: /*%< a -- 32 bits */
break;
- case 1: /* a.b -- 8.24 bits */
- if (val > 0xffffff || parts[0] > 0xff)
+
+ case 2: /*%< a.b -- 8.24 bits */
+ if (val > 0xffffffU)
return (0);
val |= parts[0] << 24;
break;
- case 2: /* a.b.c -- 8.8.16 bits */
- if (val > 0xffff || parts[0] > 0xff || parts[1] > 0xff)
+ case 3: /*%< a.b.c -- 8.8.16 bits */
+ if (val > 0xffffU)
return (0);
val |= (parts[0] << 24) | (parts[1] << 16);
break;
- case 3: /* a.b.c.d -- 8.8.8.8 bits */
- if (val > 0xff || parts[0] > 0xff || parts[1] > 0xff ||
- parts[2] > 0xff)
+ case 4: /*%< a.b.c.d -- 8.8.8.8 bits */
+ if (val > 0xffU)
return (0);
val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8);
break;
}
-
if (addr != NULL)
addr->s_addr = htonl(val);
return (1);
__weak_reference(__inet_addr, inet_addr);
#undef inet_aton
__weak_reference(__inet_aton, inet_aton);
+
+/*! \file */
---- inet_addr.c.orig 2003-05-20 15:22:14.000000000 -0700
-+++ inet_addr.c 2005-02-24 17:08:54.000000000 -0800
-@@ -61,6 +61,8 @@
- #include <sys/cdefs.h>
- __FBSDID("$FreeBSD: src/lib/libc/net/inet_addr.c,v 1.16 2002/04/19 04:46:20 suz Exp $");
-
-+#include "xlocale_private.h"
+--- inet_addr.c.orig 2008-09-01 20:32:46.000000000 -0700
++++ inet_addr.c 2008-09-01 20:45:46.000000000 -0700
+@@ -68,6 +68,10 @@
+ static const char sccsid[] = "@(#)inet_addr.c 8.1 (Berkeley) 6/17/93";
+ static const char rcsid[] = "$Id: inet_addr.c,v 1.4.18.1 2005/04/27 05:00:52 sra Exp $";
+ #endif /* LIBC_SCCS and not lint */
+
- #include <sys/param.h>
-
- #include <netinet/in.h>
-@@ -103,6 +105,7 @@
- char *c;
- char *endptr;
- int gotend, n;
-+ locale_t loc = __current_locale();
-
- c = (char *)cp;
- n = 0;
-@@ -113,7 +116,7 @@
- gotend = 0;
- while (!gotend) {
- errno = 0;
-- val = strtoul(c, &endptr, 0);
-+ val = strtoul_l(c, &endptr, 0, loc);
-
- if (errno == ERANGE) /* Fail completely if it overflowed. */
- return (0);
-@@ -144,7 +147,7 @@
- break;
++/* the algorithms only can deal with ASCII, so we optimize for it */
++#define USE_ASCII
++
+ #include <sys/cdefs.h>
+ __FBSDID("$FreeBSD: src/lib/libc/inet/inet_addr.c,v 1.4 2007/06/03 17:20:26 ume Exp $");
- default:
-- if (isspace((unsigned char)*c)) {
-+ if (isspace_l((unsigned char)*c, loc)) {
- gotend = 1;
- break;
- } else
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static char sccsid[] = "@(#)inet_lnaof.c 8.1 (Berkeley) 6/4/93";
+static const char sccsid[] = "@(#)inet_lnaof.c 8.1 (Berkeley) 6/4/93";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/inet_lnaof.c,v 1.5 2002/03/22 21:52:29 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/inet/inet_lnaof.c,v 1.4 2007/06/03 17:20:26 ume Exp $");
+
+#include "port_before.h"
#include <sys/param.h>
#include <netinet/in.h>
#include <arpa/inet.h>
-/*
+#include "port_after.h"
+
+/*%
* Return the local network address portion of an
* internet address; handles class a/b/c network
* number formats.
*/
#undef inet_lnaof
__weak_reference(__inet_lnaof, inet_lnaof);
+
+/*! \file */
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static char sccsid[] = "@(#)inet_makeaddr.c 8.1 (Berkeley) 6/4/93";
+static const char sccsid[] = "@(#)inet_makeaddr.c 8.1 (Berkeley) 6/4/93";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/inet_makeaddr.c,v 1.4 2002/03/22 21:52:29 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/inet/inet_makeaddr.c,v 1.4 2007/06/03 17:20:26 ume Exp $");
+
+#include "port_before.h"
#include <sys/param.h>
#include <netinet/in.h>
#include <arpa/inet.h>
-/*
+#include "port_after.h"
+
+/*%
* Formulate an Internet address from network + host. Used in
* building addresses stored in the ifnet structure.
*/
inet_makeaddr(net, host)
in_addr_t net, host;
{
- in_addr_t addr;
+ struct in_addr a;
- if (net < 128)
- addr = (net << IN_CLASSA_NSHIFT) | (host & IN_CLASSA_HOST);
- else if (net < 65536)
- addr = (net << IN_CLASSB_NSHIFT) | (host & IN_CLASSB_HOST);
+ if (net < 128U)
+ a.s_addr = (net << IN_CLASSA_NSHIFT) | (host & IN_CLASSA_HOST);
+ else if (net < 65536U)
+ a.s_addr = (net << IN_CLASSB_NSHIFT) | (host & IN_CLASSB_HOST);
else if (net < 16777216L)
- addr = (net << IN_CLASSC_NSHIFT) | (host & IN_CLASSC_HOST);
+ a.s_addr = (net << IN_CLASSC_NSHIFT) | (host & IN_CLASSC_HOST);
else
- addr = net | host;
- addr = htonl(addr);
- return (*(struct in_addr *)&addr);
+ a.s_addr = net | host;
+ a.s_addr = htonl(a.s_addr);
+ return (a);
}
/*
*/
#undef inet_makeaddr
__weak_reference(__inet_makeaddr, inet_makeaddr);
+
+/*! \file */
/*
- * Copyright (c) 1996 by Internet Software Consortium.
+ * Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (c) 1996,1999 by Internet Software Consortium.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
- * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
- * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
- * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
- * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
- * SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static const char orig_rcsid[] = "From Id: inet_net_ntop.c,v 8.2 1996/08/08 06:54:44 vixie Exp";
+static const char rcsid[] = "$Id: inet_net_ntop.c,v 1.3.18.2 2006/06/20 02:51:32 marka Exp $";
#endif
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/inet_net_ntop.c,v 1.7 2002/03/22 21:52:29 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/inet/inet_net_ntop.c,v 1.4 2007/06/03 17:20:26 ume Exp $");
+
+#include "port_before.h"
#include <sys/types.h>
#include <sys/socket.h>
#include <string.h>
#include <stdlib.h>
+#include "port_after.h"
+
#ifdef SPRINTF_CHAR
# define SPRINTF(x) strlen(sprintf/**/x)
#else
static char * inet_net_ntop_ipv4(const u_char *src, int bits, char *dst,
size_t size);
+static char * inet_net_ntop_ipv6(const u_char *src, int bits, char *dst,
+ size_t size);
-/*
+/*%
* char *
* inet_net_ntop(af, src, bits, dst, size)
* convert network number from network to presentation format.
switch (af) {
case AF_INET:
return (inet_net_ntop_ipv4(src, bits, dst, size));
+ case AF_INET6:
+ return (inet_net_ntop_ipv6(src, bits, dst, size));
default:
errno = EAFNOSUPPORT;
return (NULL);
}
}
-/*
+/*%
* static char *
* inet_net_ntop_ipv4(src, bits, dst, size)
* convert IPv4 network number from network to presentation format.
* pointer to dst, or NULL if an error occurred (check errno).
* note:
* network byte order assumed. this means 192.5.5.240/28 has
- * 0x11110000 in its fourth octet.
+ * 0b11110000 in its fourth octet.
* author:
* Paul Vixie (ISC), July 1996
*/
errno = EINVAL;
return (NULL);
}
+
if (bits == 0) {
if (size < sizeof "0")
goto emsgsize;
*dst++ = '0';
+ size--;
*dst = '\0';
}
/* Format whole octets. */
for (b = bits / 8; b > 0; b--) {
- if (size < sizeof "255.")
+ if (size <= sizeof "255.")
goto emsgsize;
t = dst;
dst += SPRINTF((dst, "%u", *src++));
/* Format partial octet. */
b = bits % 8;
if (b > 0) {
- if (size < sizeof ".255")
+ if (size <= sizeof ".255")
goto emsgsize;
t = dst;
if (dst != odst)
}
/* Format CIDR /width. */
- if (size < sizeof "/32")
+ if (size <= sizeof "/32")
goto emsgsize;
dst += SPRINTF((dst, "/%u", bits));
return (odst);
return (NULL);
}
+/*%
+ * static char *
+ * inet_net_ntop_ipv6(src, bits, fakebits, dst, size)
+ * convert IPv6 network number from network to presentation format.
+ * generates CIDR style result always. Picks the shortest representation
+ * unless the IP is really IPv4.
+ * always prints specified number of bits (bits).
+ * return:
+ * pointer to dst, or NULL if an error occurred (check errno).
+ * note:
+ * network byte order assumed. this means 192.5.5.240/28 has
+ * 0b11110000 in its fourth octet.
+ * author:
+ * Vadim Kogan (UCB), June 2001
+ * Original version (IPv4) by Paul Vixie (ISC), July 1996
+ */
+
+static char *
+inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size) {
+ u_int m;
+ int b;
+ int p;
+ int zero_s, zero_l, tmp_zero_s, tmp_zero_l;
+ int i;
+ int is_ipv4 = 0;
+ unsigned char inbuf[16];
+ char outbuf[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255/128")];
+ char *cp;
+ int words;
+ u_char *s;
+
+ if (bits < 0 || bits > 128) {
+ errno = EINVAL;
+ return (NULL);
+ }
+
+ cp = outbuf;
+
+ if (bits == 0) {
+ *cp++ = ':';
+ *cp++ = ':';
+ *cp = '\0';
+ } else {
+ /* Copy src to private buffer. Zero host part. */
+ p = (bits + 7) / 8;
+ memcpy(inbuf, src, p);
+ memset(inbuf + p, 0, 16 - p);
+ b = bits % 8;
+ if (b != 0) {
+ m = ~0 << (8 - b);
+ inbuf[p-1] &= m;
+ }
+
+ s = inbuf;
+
+ /* how many words need to be displayed in output */
+ words = (bits + 15) / 16;
+ if (words == 1)
+ words = 2;
+
+ /* Find the longest substring of zero's */
+ zero_s = zero_l = tmp_zero_s = tmp_zero_l = 0;
+ for (i = 0; i < (words * 2); i += 2) {
+ if ((s[i] | s[i+1]) == 0) {
+ if (tmp_zero_l == 0)
+ tmp_zero_s = i / 2;
+ tmp_zero_l++;
+ } else {
+ if (tmp_zero_l && zero_l < tmp_zero_l) {
+ zero_s = tmp_zero_s;
+ zero_l = tmp_zero_l;
+ tmp_zero_l = 0;
+ }
+ }
+ }
+
+ if (tmp_zero_l && zero_l < tmp_zero_l) {
+ zero_s = tmp_zero_s;
+ zero_l = tmp_zero_l;
+ }
+
+ if (zero_l != words && zero_s == 0 && ((zero_l == 6) ||
+ ((zero_l == 5 && s[10] == 0xff && s[11] == 0xff) ||
+ ((zero_l == 7 && s[14] != 0 && s[15] != 1)))))
+ is_ipv4 = 1;
+
+ /* Format whole words. */
+ for (p = 0; p < words; p++) {
+ if (zero_l != 0 && p >= zero_s && p < zero_s + zero_l) {
+ /* Time to skip some zeros */
+ if (p == zero_s)
+ *cp++ = ':';
+ if (p == words - 1)
+ *cp++ = ':';
+ s++;
+ s++;
+ continue;
+ }
+
+ if (is_ipv4 && p > 5 ) {
+ *cp++ = (p == 6) ? ':' : '.';
+ cp += SPRINTF((cp, "%u", *s++));
+ /* we can potentially drop the last octet */
+ if (p != 7 || bits > 120) {
+ *cp++ = '.';
+ cp += SPRINTF((cp, "%u", *s++));
+ }
+ } else {
+ if (cp != outbuf)
+ *cp++ = ':';
+ cp += SPRINTF((cp, "%x", *s * 256 + s[1]));
+ s += 2;
+ }
+ }
+ }
+ /* Format CIDR /width. */
+ sprintf(cp, "/%u", bits);
+ if (strlen(outbuf) + 1 > size)
+ goto emsgsize;
+ strcpy(dst, outbuf);
+
+ return (dst);
+
+emsgsize:
+ errno = EMSGSIZE;
+ return (NULL);
+}
+
/*
* Weak aliases for applications that use certain private entry points,
* and fail to include <arpa/inet.h>.
*/
#undef inet_net_ntop
__weak_reference(__inet_net_ntop, inet_net_ntop);
+
+/*! \file */
/*
- * Copyright (c) 1996 by Internet Software Consortium.
+ * Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (c) 1996,1999 by Internet Software Consortium.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
- * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
- * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
- * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
- * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
- * SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static const char orig_rcsid[] = "From Id: inet_neta.c,v 8.2 1996/08/08 06:54:44 vixie Exp";
+static const char rcsid[] = "$Id: inet_neta.c,v 1.2.18.1 2005/04/27 05:00:53 sra Exp $";
#endif
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/inet_neta.c,v 1.9 2003/01/01 18:48:43 schweikh Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/inet/inet_neta.c,v 1.3 2007/06/03 17:20:26 ume Exp $");
+
+#include "port_before.h"
#include <sys/types.h>
#include <sys/socket.h>
#include <stdio.h>
#include <string.h>
+#include "port_after.h"
+
#ifdef SPRINTF_CHAR
# define SPRINTF(x) strlen(sprintf/**/x)
#else
# define SPRINTF(x) ((size_t)sprintf x)
#endif
-/*
+/*%
* char *
* inet_neta(src, dst, size)
* format an in_addr_t network number into presentation format.
*/
#undef inet_neta
__weak_reference(__inet_neta, inet_neta);
+
+/*! \file */
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static char sccsid[] = "@(#)inet_netof.c 8.1 (Berkeley) 6/4/93";
+static const char sccsid[] = "@(#)inet_netof.c 8.1 (Berkeley) 6/4/93";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/inet_netof.c,v 1.5 2002/03/22 21:52:29 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/inet/inet_netof.c,v 1.4 2007/06/03 17:20:26 ume Exp $");
+
+#include "port_before.h"
#include <sys/param.h>
#include <netinet/in.h>
#include <arpa/inet.h>
-/*
+#include "port_after.h"
+
+/*%
* Return the network number from an internet
* address; handles class a/b/c network #'s.
*/
*/
#undef inet_netof
__weak_reference(__inet_netof, inet_netof);
+
+/*! \file */
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static char sccsid[] = "@(#)inet_network.c 8.1 (Berkeley) 6/4/93";
+static const char sccsid[] = "@(#)inet_network.c 8.1 (Berkeley) 6/4/93";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/inet_network.c,v 1.9 2002/03/22 21:52:29 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/inet/inet_network.c,v 1.5 2008/01/14 22:55:20 cperciva Exp $");
+
+#include "port_before.h"
#include <sys/types.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <ctype.h>
-/*
+#include "port_after.h"
+
+/*%
* Internet network address interpretation routine.
* The library routines call this routine to interpret
* network numbers.
in_addr_t val, base, n;
char c;
in_addr_t parts[4], *pp = parts;
- int i;
+ int i, digit;
again:
- val = 0; base = 10;
+ val = 0; base = 10; digit = 0;
if (*cp == '0')
- base = 8, cp++;
+ digit = 1, base = 8, cp++;
if (*cp == 'x' || *cp == 'X')
base = 16, cp++;
while ((c = *cp) != 0) {
if (isdigit((unsigned char)c)) {
+ if (base == 8U && (c == '8' || c == '9'))
+ return (INADDR_NONE);
val = (val * base) + (c - '0');
cp++;
+ digit = 1;
continue;
}
- if (base == 16 && isxdigit((unsigned char)c)) {
- val = (val << 4) + (c + 10 - (islower((unsigned char)c) ? 'a' : 'A'));
+ if (base == 16U && isxdigit((unsigned char)c)) {
+ val = (val << 4) +
+ (c + 10 - (islower((unsigned char)c) ? 'a' : 'A'));
cp++;
+ digit = 1;
continue;
}
break;
}
+ if (!digit)
+ return (INADDR_NONE);
+ if (pp >= parts + 4 || val > 0xffU)
+ return (INADDR_NONE);
if (*cp == '.') {
- if (pp >= parts + 3)
- return (INADDR_NONE);
*pp++ = val, cp++;
goto again;
}
- if (*cp && !isspace((unsigned char)*cp))
+ if (*cp && !isspace(*cp&0xff))
return (INADDR_NONE);
*pp++ = val;
n = pp - parts;
+ if (n > 4U)
+ return (INADDR_NONE);
for (val = 0, i = 0; i < n; i++) {
val <<= 8;
val |= parts[i] & 0xff;
*/
#undef inet_network
__weak_reference(__inet_network, inet_network);
+
+/*! \file */
---- inet_network.c.orig 2003-05-20 15:22:14.000000000 -0700
-+++ inet_network.c 2005-02-24 16:50:11.000000000 -0800
-@@ -37,6 +37,8 @@
- #include <sys/cdefs.h>
- __FBSDID("$FreeBSD: src/lib/libc/net/inet_network.c,v 1.9 2002/03/22 21:52:29 obrien Exp $");
+--- inet_network.c.orig 2008-09-01 20:56:15.000000000 -0700
++++ inet_network.c 2008-09-01 20:57:20.000000000 -0700
+@@ -29,6 +29,10 @@
-+#include "xlocale_private.h"
+ #if defined(LIBC_SCCS) && !defined(lint)
+ static const char sccsid[] = "@(#)inet_network.c 8.1 (Berkeley) 6/4/93";
+
- #include <sys/types.h>
- #include <netinet/in.h>
- #include <arpa/inet.h>
-@@ -55,6 +57,7 @@
- char c;
- in_addr_t parts[4], *pp = parts;
- int i;
-+ locale_t loc = __current_locale();
-
- again:
- val = 0; base = 10;
-@@ -63,13 +66,13 @@
- if (*cp == 'x' || *cp == 'X')
- base = 16, cp++;
- while ((c = *cp) != 0) {
-- if (isdigit((unsigned char)c)) {
-+ if (isdigit_l((unsigned char)c, loc)) {
- val = (val * base) + (c - '0');
- cp++;
- continue;
- }
-- if (base == 16 && isxdigit((unsigned char)c)) {
-- val = (val << 4) + (c + 10 - (islower((unsigned char)c) ? 'a' : 'A'));
-+ if (base == 16 && isxdigit_l((unsigned char)c, loc)) {
-+ val = (val << 4) + (c + 10 - (islower_l((unsigned char)c, loc) ? 'a' : 'A'));
- cp++;
- continue;
- }
-@@ -81,7 +84,7 @@
- *pp++ = val, cp++;
- goto again;
- }
-- if (*cp && !isspace((unsigned char)*cp))
-+ if (*cp && !isspace_l((unsigned char)*cp, loc))
- return (INADDR_NONE);
- *pp++ = val;
- n = pp - parts;
++/* the algorithms only can deal with ASCII, so we optimize for it */
++#define USE_ASCII
++
+ #endif /* LIBC_SCCS and not lint */
+ #include <sys/cdefs.h>
+ __FBSDID("$FreeBSD: src/lib/libc/inet/inet_network.c,v 1.5 2008/01/14 22:55:20 cperciva Exp $");
-/*
+/*-
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static char sccsid[] = "@(#)inet_ntoa.c 8.1 (Berkeley) 6/4/93";
+static const char sccsid[] = "@(#)inet_ntoa.c 8.1 (Berkeley) 6/4/93";
+static const char rcsid[] = "$Id: inet_ntoa.c,v 1.1.352.1 2005/04/27 05:00:54 sra Exp $";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/inet_ntoa.c,v 1.6 2002/03/22 21:52:29 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/inet/inet_ntoa.c,v 1.6 2007/06/14 07:13:28 delphij Exp $");
+
+#include "port_before.h"
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
+
#include <stdio.h>
#include <string.h>
-/*
+#include "port_after.h"
+
+/*%
* Convert network-format internet address
* to base 256 d.d.d.d representation.
*/
-char *
-inet_ntoa(in)
- struct in_addr in;
-{
+/*const*/ char *
+inet_ntoa(struct in_addr in) {
static char ret[18];
strcpy(ret, "[inet_ntoa error]");
return (ret);
}
+char *
+inet_ntoa_r(struct in_addr in, char *buf, socklen_t size)
+{
+
+ (void) inet_ntop(AF_INET, &in, buf, size);
+ return (buf);
+}
+
/*
* Weak aliases for applications that use certain private entry points,
* and fail to include <arpa/inet.h>.
*/
#undef inet_ntoa
__weak_reference(__inet_ntoa, inet_ntoa);
+__weak_reference(__inet_ntoa_r, inet_ntoa_r);
+
+/*! \file */
--- /dev/null
+--- inet_ntoa.c.orig 2008-09-01 21:00:28.000000000 -0700
++++ inet_ntoa.c 2008-09-01 21:00:53.000000000 -0700
+@@ -59,6 +59,7 @@ inet_ntoa(struct in_addr in) {
+ return (ret);
+ }
+
++#if 0
+ char *
+ inet_ntoa_r(struct in_addr in, char *buf, socklen_t size)
+ {
+@@ -66,6 +67,7 @@ inet_ntoa_r(struct in_addr in, char *buf
+ (void) inet_ntop(AF_INET, &in, buf, size);
+ return (buf);
+ }
++#endif
+
+ /*
+ * Weak aliases for applications that use certain private entry points,
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
static char sccsid[] = "@(#)linkaddr.c 8.1 (Berkeley) 6/4/93";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/linkaddr.c,v 1.3 2002/03/21 18:49:23 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/net/linkaddr.c,v 1.4 2007/01/09 00:28:02 imp Exp $");
#include <sys/types.h>
#include <sys/socket.h>
/*
- * Copyright (c) 1996, 1998 by Internet Software Consortium.
+ * Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (c) 1996-1999 by Internet Software Consortium.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
- * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
- * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
- * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
- * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
- * SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#if defined(LIBC_SCCS) && !defined(lint)
+static const char rcsid[] = "$Id: nsap_addr.c,v 1.3.18.2 2005/07/28 07:38:08 marka Exp $";
+#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/nsap_addr.c,v 1.9 2002/03/22 21:52:29 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/inet/nsap_addr.c,v 1.3 2007/06/03 17:20:26 ume Exp $");
+
+#include "port_before.h"
#include <sys/types.h>
#include <sys/param.h>
#include <sys/socket.h>
+
#include <netinet/in.h>
#include <arpa/inet.h>
#include <arpa/nameser.h>
+
#include <ctype.h>
#include <resolv.h>
+#include <resolv_mt.h>
+
+#include "port_after.h"
static char
-xtob(c)
- int c;
-{
+xtob(int c) {
return (c - (((c >= '0') && (c <= '9')) ? '0' : '7'));
}
u_int
-inet_nsap_addr(ascii, binary, maxlen)
- const char *ascii;
- u_char *binary;
- int maxlen;
-{
+inet_nsap_addr(const char *ascii, u_char *binary, int maxlen) {
u_char c, nib;
u_int len = 0;
+ if (ascii[0] != '0' || (ascii[1] != 'x' && ascii[1] != 'X'))
+ return (0);
+ ascii += 2;
+
while ((c = *ascii++) != '\0' && len < (u_int)maxlen) {
if (c == '.' || c == '+' || c == '/')
continue;
}
char *
-inet_nsap_ntoa(binlen, binary, ascii)
- int binlen;
- const u_char *binary;
- char *ascii;
-{
+inet_nsap_ntoa(int binlen, const u_char *binary, char *ascii) {
int nib;
int i;
- static char tmpbuf[255*3];
+ char *tmpbuf = inet_nsap_ntoa_tmpbuf;
char *start;
if (ascii)
start = tmpbuf;
}
+ *ascii++ = '0';
+ *ascii++ = 'x';
+
if (binlen > 255)
binlen = 255;
__weak_reference(__inet_nsap_addr, inet_nsap_addr);
#undef inet_nsap_ntoa
__weak_reference(__inet_nsap_ntoa, inet_nsap_ntoa);
+
+/*! \file */
---- nsap_addr.c.orig 2003-05-20 15:22:14.000000000 -0700
-+++ nsap_addr.c 2005-02-24 16:48:08.000000000 -0800
-@@ -18,6 +18,8 @@
- #include <sys/cdefs.h>
- __FBSDID("$FreeBSD: src/lib/libc/net/nsap_addr.c,v 1.9 2002/03/22 21:52:29 obrien Exp $");
+--- nsap_addr.c.orig 2008-09-01 21:04:36.000000000 -0700
++++ nsap_addr.c 2008-09-01 21:11:10.000000000 -0700
+@@ -17,6 +17,10 @@
-+#include "xlocale_private.h"
+ #if defined(LIBC_SCCS) && !defined(lint)
+ static const char rcsid[] = "$Id: nsap_addr.c,v 1.3.18.2 2005/07/28 07:38:08 marka Exp $";
++
++/* the algorithms only can deal with ASCII, so we optimize for it */
++#define USE_ASCII
+
- #include <sys/types.h>
- #include <sys/param.h>
- #include <sys/socket.h>
-@@ -26,6 +28,7 @@
- #include <arpa/nameser.h>
+ #endif /* LIBC_SCCS and not lint */
+ #include <sys/cdefs.h>
+ __FBSDID("$FreeBSD: src/lib/libc/inet/nsap_addr.c,v 1.3 2007/06/03 17:20:26 ume Exp $");
+@@ -33,10 +37,12 @@ __FBSDID("$FreeBSD: src/lib/libc/inet/ns
+
#include <ctype.h>
#include <resolv.h>
-+#include <stdlib.h>
+-#include <resolv_mt.h>
++//#include <resolv_mt.h>
- static char
- xtob(c)
-@@ -42,20 +45,21 @@
- {
- u_char c, nib;
- u_int len = 0;
-+ locale_t loc = __current_locale();
+ #include "port_after.h"
- while ((c = *ascii++) != '\0' && len < (u_int)maxlen) {
- if (c == '.' || c == '+' || c == '/')
- continue;
- if (!isascii(c))
- return (0);
-- if (islower(c))
-- c = toupper(c);
-- if (isxdigit(c)) {
-+ if (islower_l(c, loc))
-+ c = toupper_l(c, loc);
-+ if (isxdigit_l(c, loc)) {
- nib = xtob(c);
- c = *ascii++;
- if (c != '\0') {
-- c = toupper(c);
-- if (isxdigit(c)) {
-+ c = toupper_l(c, loc);
-+ if (isxdigit_l(c, loc)) {
- *binary++ = (nib << 4) | xtob(c);
- len++;
- } else
-@@ -78,9 +82,14 @@
- {
++#include <stdlib.h>
++
+ static char
+ xtob(int c) {
+ return (c - (((c >= '0') && (c <= '9')) ? '0' : '7'));
+@@ -82,9 +88,13 @@ char *
+ inet_nsap_ntoa(int binlen, const u_char *binary, char *ascii) {
int nib;
int i;
-- static char tmpbuf[255*3];
-+ static char *tmpbuf = NULL;
+- char *tmpbuf = inet_nsap_ntoa_tmpbuf;
++ char *tmpbuf = NULL;
char *start;
+ if (tmpbuf == NULL) {
+ tmpbuf = malloc(255*3);
-+ if (tmpbuf == NULL)
-+ return NULL;
++ if (tmpbuf == NULL) return NULL;
+ }
if (ascii)
start = ascii;
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
static char sccsid[] = "@(#)recv.c 8.2 (Berkeley) 2/21/94";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/recv.c,v 1.3 2002/03/22 21:52:29 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/net/recv.c,v 1.4 2007/01/09 00:28:02 imp Exp $");
#include "namespace.h"
#include <sys/types.h>
---- recv.c.orig 2006-09-16 19:12:41.000000000 -0700
-+++ recv.c 2006-09-17 00:11:11.000000000 -0700
-@@ -44,11 +44,21 @@
+--- recv.c.orig 2008-09-01 21:12:58.000000000 -0700
++++ recv.c 2008-09-01 21:13:04.000000000 -0700
+@@ -40,11 +40,21 @@ __FBSDID("$FreeBSD: src/lib/libc/net/rec
#include <stddef.h>
#include "un-namespace.h"
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
static char sccsid[] = "@(#)send.c 8.2 (Berkeley) 2/21/94";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/send.c,v 1.3 2002/03/22 21:52:30 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/net/send.c,v 1.4 2007/01/09 00:28:02 imp Exp $");
#include "namespace.h"
#include <sys/types.h>
---- send.c.orig 2006-09-16 19:12:41.000000000 -0700
-+++ send.c 2006-09-17 00:07:27.000000000 -0700
-@@ -44,11 +44,21 @@
+--- send.c.orig 2008-09-01 21:13:33.000000000 -0700
++++ send.c 2008-09-01 21:13:41.000000000 -0700
+@@ -40,11 +40,21 @@ __FBSDID("$FreeBSD: src/lib/libc/net/sen
#include <stddef.h>
#include "un-namespace.h"
.Sh LEGACY SYNOPSIS
.Fd #include <sys/types.h>
.Fd #include <sys/socket.h>
-.Fd #include <sys/netinet/in.h>
-.Fd #include <sys/arpa/inet.h>
+.Fd #include <netinet/in.h>
+.Fd #include <arpa/inet.h>
.Pp
These include files are necessary for all functions.
.Sh SEE ALSO
-/* $KAME: inet_addr.c,v 1.5 2001/08/20 02:32:40 itojun Exp $ */
-
/*
- * ++Copyright++ 1983, 1990, 1993
- * -
* Copyright (c) 1983, 1990, 1993
* The Regents of the University of California. All rights reserved.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- * -
+ */
+
+/*
* Portions Copyright (c) 1993 by Digital Equipment Corporation.
*
* Permission to use, copy, modify, and distribute this software for any
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
- * -
- * --Copyright--
+ */
+
+/*
+ * Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
+ * Portions Copyright (c) 1996-1999 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static char sccsid[] = "@(#)inet_addr.c 8.1 (Berkeley) 6/17/93";
+static const char sccsid[] = "@(#)inet_addr.c 8.1 (Berkeley) 6/17/93";
+static const char rcsid[] = "$Id: inet_addr.c,v 1.4.18.1 2005/04/27 05:00:52 sra Exp $";
#endif /* LIBC_SCCS and not lint */
+
+/* the algorithms only can deal with ASCII, so we optimize for it */
+#define USE_ASCII
+
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/inet_addr.c,v 1.16 2002/04/19 04:46:20 suz Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/inet/inet_addr.c,v 1.4 2007/06/03 17:20:26 ume Exp $");
-#include "xlocale_private.h"
+#include "port_before.h"
+#include <sys/types.h>
#include <sys/param.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <ctype.h>
-#include <errno.h>
-#include <string.h>
-#include <stdlib.h>
-/*
- * ASCII internet address interpretation routine.
+#include "port_after.h"
+
+/*%
+ * Ascii internet address interpretation routine.
* The value returned is in network order.
*/
in_addr_t /* XXX should be struct in_addr :( */
-inet_addr(cp)
- const char *cp;
-{
+inet_addr(const char *cp) {
struct in_addr val;
if (inet_aton(cp, &val))
return (INADDR_NONE);
}
-/*
- * Check whether "cp" is a valid ASCII representation
+/*%
+ * Check whether "cp" is a valid ascii representation
* of an Internet address and convert to a binary address.
* Returns 1 if the address is valid, 0 if not.
* This replaces inet_addr, the return value from which
* cannot distinguish between failure and a local broadcast address.
*/
int
-inet_aton(cp, addr)
- const char *cp;
- struct in_addr *addr;
-{
- u_long parts[4];
- in_addr_t val;
- char *c;
- char *endptr;
- int gotend, n;
- locale_t loc = __current_locale();
-
- c = (char *)cp;
- n = 0;
- /*
- * Run through the string, grabbing numbers until
- * the end of the string, or some error
- */
- gotend = 0;
- while (!gotend) {
- errno = 0;
- val = strtoul_l(c, &endptr, 0, loc);
-
- if (errno == ERANGE) /* Fail completely if it overflowed. */
- return (0);
-
- /*
- * If the whole string is invalid, endptr will equal
- * c.. this way we can make sure someone hasn't
- * gone '.12' or something which would get past
- * the next check.
+inet_aton(const char *cp, struct in_addr *addr) {
+ u_long val;
+ int base, n;
+ char c;
+ u_int8_t parts[4];
+ u_int8_t *pp = parts;
+ int digit;
+
+ c = *cp;
+ for (;;) {
+ /*
+ * Collect number up to ``.''.
+ * Values are specified as for C:
+ * 0x=hex, 0=octal, isdigit=decimal.
*/
- if (endptr == c)
+ if (!isdigit((unsigned char)c))
return (0);
- parts[n] = val;
- c = endptr;
-
- /* Check the next character past the previous number's end */
- switch (*c) {
- case '.' :
- /* Make sure we only do 3 dots .. */
- if (n == 3) /* Whoops. Quit. */
- return (0);
- n++;
- c++;
- break;
-
- case '\0':
- gotend = 1;
- break;
-
- default:
- if (isspace_l((unsigned char)*c, loc)) {
- gotend = 1;
- break;
+ val = 0; base = 10; digit = 0;
+ if (c == '0') {
+ c = *++cp;
+ if (c == 'x' || c == 'X')
+ base = 16, c = *++cp;
+ else {
+ base = 8;
+ digit = 1 ;
+ }
+ }
+ for (;;) {
+ if (isascii(c) && isdigit((unsigned char)c)) {
+ if (base == 8 && (c == '8' || c == '9'))
+ return (0);
+ val = (val * base) + (c - '0');
+ c = *++cp;
+ digit = 1;
+ } else if (base == 16 && isascii(c) &&
+ isxdigit((unsigned char)c)) {
+ val = (val << 4) |
+ (c + 10 - (islower((unsigned char)c) ? 'a' : 'A'));
+ c = *++cp;
+ digit = 1;
} else
- return (0); /* Invalid character, so fail */
+ break;
}
-
+ if (c == '.') {
+ /*
+ * Internet format:
+ * a.b.c.d
+ * a.b.c (with c treated as 16 bits)
+ * a.b (with b treated as 24 bits)
+ */
+ if (pp >= parts + 3 || val > 0xffU)
+ return (0);
+ *pp++ = val;
+ c = *++cp;
+ } else
+ break;
}
-
+ /*
+ * Check for trailing characters.
+ */
+ if (c != '\0' && (!isascii(c) || !isspace((unsigned char)c)))
+ return (0);
+ /*
+ * Did we get a valid digit?
+ */
+ if (!digit)
+ return (0);
/*
* Concoct the address according to
* the number of parts specified.
*/
-
+ n = pp - parts + 1;
switch (n) {
- case 0: /* a -- 32 bits */
- /*
- * Nothing is necessary here. Overflow checking was
- * already done in strtoul().
- */
+ case 1: /*%< a -- 32 bits */
break;
- case 1: /* a.b -- 8.24 bits */
- if (val > 0xffffff || parts[0] > 0xff)
+
+ case 2: /*%< a.b -- 8.24 bits */
+ if (val > 0xffffffU)
return (0);
val |= parts[0] << 24;
break;
- case 2: /* a.b.c -- 8.8.16 bits */
- if (val > 0xffff || parts[0] > 0xff || parts[1] > 0xff)
+ case 3: /*%< a.b.c -- 8.8.16 bits */
+ if (val > 0xffffU)
return (0);
val |= (parts[0] << 24) | (parts[1] << 16);
break;
- case 3: /* a.b.c.d -- 8.8.8.8 bits */
- if (val > 0xff || parts[0] > 0xff || parts[1] > 0xff ||
- parts[2] > 0xff)
+ case 4: /*%< a.b.c.d -- 8.8.8.8 bits */
+ if (val > 0xffU)
return (0);
val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8);
break;
}
-
if (addr != NULL)
addr->s_addr = htonl(val);
return (1);
__weak_reference(__inet_addr, inet_addr);
#undef inet_aton
__weak_reference(__inet_aton, inet_aton);
+
+/*! \file */
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static char sccsid[] = "@(#)inet_network.c 8.1 (Berkeley) 6/4/93";
+static const char sccsid[] = "@(#)inet_network.c 8.1 (Berkeley) 6/4/93";
+
+/* the algorithms only can deal with ASCII, so we optimize for it */
+#define USE_ASCII
+
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/inet_network.c,v 1.9 2002/03/22 21:52:29 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/inet/inet_network.c,v 1.5 2008/01/14 22:55:20 cperciva Exp $");
-#include "xlocale_private.h"
+#include "port_before.h"
#include <sys/types.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <ctype.h>
-/*
+#include "port_after.h"
+
+/*%
* Internet network address interpretation routine.
* The library routines call this routine to interpret
* network numbers.
in_addr_t val, base, n;
char c;
in_addr_t parts[4], *pp = parts;
- int i;
- locale_t loc = __current_locale();
+ int i, digit;
again:
- val = 0; base = 10;
+ val = 0; base = 10; digit = 0;
if (*cp == '0')
- base = 8, cp++;
+ digit = 1, base = 8, cp++;
if (*cp == 'x' || *cp == 'X')
base = 16, cp++;
while ((c = *cp) != 0) {
- if (isdigit_l((unsigned char)c, loc)) {
+ if (isdigit((unsigned char)c)) {
+ if (base == 8U && (c == '8' || c == '9'))
+ return (INADDR_NONE);
val = (val * base) + (c - '0');
cp++;
+ digit = 1;
continue;
}
- if (base == 16 && isxdigit_l((unsigned char)c, loc)) {
- val = (val << 4) + (c + 10 - (islower_l((unsigned char)c, loc) ? 'a' : 'A'));
+ if (base == 16U && isxdigit((unsigned char)c)) {
+ val = (val << 4) +
+ (c + 10 - (islower((unsigned char)c) ? 'a' : 'A'));
cp++;
+ digit = 1;
continue;
}
break;
}
+ if (!digit)
+ return (INADDR_NONE);
+ if (pp >= parts + 4 || val > 0xffU)
+ return (INADDR_NONE);
if (*cp == '.') {
- if (pp >= parts + 3)
- return (INADDR_NONE);
*pp++ = val, cp++;
goto again;
}
- if (*cp && !isspace_l((unsigned char)*cp, loc))
+ if (*cp && !isspace(*cp&0xff))
return (INADDR_NONE);
*pp++ = val;
n = pp - parts;
+ if (n > 4U)
+ return (INADDR_NONE);
for (val = 0, i = 0; i < n; i++) {
val <<= 8;
val |= parts[i] & 0xff;
*/
#undef inet_network
__weak_reference(__inet_network, inet_network);
+
+/*! \file */
+++ /dev/null
-./inet_ntoa.c
\ No newline at end of file
--- /dev/null
+/*-
+ * Copyright (c) 1983, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static const char sccsid[] = "@(#)inet_ntoa.c 8.1 (Berkeley) 6/4/93";
+static const char rcsid[] = "$Id: inet_ntoa.c,v 1.1.352.1 2005/04/27 05:00:54 sra Exp $";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/inet/inet_ntoa.c,v 1.6 2007/06/14 07:13:28 delphij Exp $");
+
+#include "port_before.h"
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <stdio.h>
+#include <string.h>
+
+#include "port_after.h"
+
+/*%
+ * Convert network-format internet address
+ * to base 256 d.d.d.d representation.
+ */
+/*const*/ char *
+inet_ntoa(struct in_addr in) {
+ static char ret[18];
+
+ strcpy(ret, "[inet_ntoa error]");
+ (void) inet_ntop(AF_INET, &in, ret, sizeof ret);
+ return (ret);
+}
+
+#if 0
+char *
+inet_ntoa_r(struct in_addr in, char *buf, socklen_t size)
+{
+
+ (void) inet_ntop(AF_INET, &in, buf, size);
+ return (buf);
+}
+#endif
+
+/*
+ * Weak aliases for applications that use certain private entry points,
+ * and fail to include <arpa/inet.h>.
+ */
+#undef inet_ntoa
+__weak_reference(__inet_ntoa, inet_ntoa);
+__weak_reference(__inet_ntoa_r, inet_ntoa_r);
+
+/*! \file */
/*
- * Copyright (c) 1996, 1998 by Internet Software Consortium.
+ * Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (c) 1996-1999 by Internet Software Consortium.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
- * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
- * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
- * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
- * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
- * SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#if defined(LIBC_SCCS) && !defined(lint)
+static const char rcsid[] = "$Id: nsap_addr.c,v 1.3.18.2 2005/07/28 07:38:08 marka Exp $";
+
+/* the algorithms only can deal with ASCII, so we optimize for it */
+#define USE_ASCII
+
+#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/nsap_addr.c,v 1.9 2002/03/22 21:52:29 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/inet/nsap_addr.c,v 1.3 2007/06/03 17:20:26 ume Exp $");
-#include "xlocale_private.h"
+#include "port_before.h"
#include <sys/types.h>
#include <sys/param.h>
#include <sys/socket.h>
+
#include <netinet/in.h>
#include <arpa/inet.h>
#include <arpa/nameser.h>
+
#include <ctype.h>
#include <resolv.h>
+//#include <resolv_mt.h>
+
+#include "port_after.h"
+
#include <stdlib.h>
static char
-xtob(c)
- int c;
-{
+xtob(int c) {
return (c - (((c >= '0') && (c <= '9')) ? '0' : '7'));
}
u_int
-inet_nsap_addr(ascii, binary, maxlen)
- const char *ascii;
- u_char *binary;
- int maxlen;
-{
+inet_nsap_addr(const char *ascii, u_char *binary, int maxlen) {
u_char c, nib;
u_int len = 0;
- locale_t loc = __current_locale();
+
+ if (ascii[0] != '0' || (ascii[1] != 'x' && ascii[1] != 'X'))
+ return (0);
+ ascii += 2;
while ((c = *ascii++) != '\0' && len < (u_int)maxlen) {
if (c == '.' || c == '+' || c == '/')
continue;
if (!isascii(c))
return (0);
- if (islower_l(c, loc))
- c = toupper_l(c, loc);
- if (isxdigit_l(c, loc)) {
+ if (islower(c))
+ c = toupper(c);
+ if (isxdigit(c)) {
nib = xtob(c);
c = *ascii++;
if (c != '\0') {
- c = toupper_l(c, loc);
- if (isxdigit_l(c, loc)) {
+ c = toupper(c);
+ if (isxdigit(c)) {
*binary++ = (nib << 4) | xtob(c);
len++;
} else
}
char *
-inet_nsap_ntoa(binlen, binary, ascii)
- int binlen;
- const u_char *binary;
- char *ascii;
-{
+inet_nsap_ntoa(int binlen, const u_char *binary, char *ascii) {
int nib;
int i;
- static char *tmpbuf = NULL;
+ char *tmpbuf = NULL;
char *start;
if (tmpbuf == NULL) {
tmpbuf = malloc(255*3);
- if (tmpbuf == NULL)
- return NULL;
+ if (tmpbuf == NULL) return NULL;
}
if (ascii)
start = ascii;
start = tmpbuf;
}
+ *ascii++ = '0';
+ *ascii++ = 'x';
+
if (binlen > 255)
binlen = 255;
__weak_reference(__inet_nsap_addr, inet_nsap_addr);
#undef inet_nsap_ntoa
__weak_reference(__inet_nsap_ntoa, inet_nsap_ntoa);
+
+/*! \file */
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
static char sccsid[] = "@(#)recv.c 8.2 (Berkeley) 2/21/94";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/recv.c,v 1.3 2002/03/22 21:52:29 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/net/recv.c,v 1.4 2007/01/09 00:28:02 imp Exp $");
#include "namespace.h"
#include <sys/types.h>
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
static char sccsid[] = "@(#)send.c 8.2 (Berkeley) 2/21/94";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/net/send.c,v 1.3 2002/03/22 21:52:30 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/net/send.c,v 1.4 2007/01/09 00:28:02 imp Exp $");
#include "namespace.h"
#include <sys/types.h>
FBSDHDRS= msgcat.h
.include "Makefile.fbsd_end"
+CFLAGS-msgcat-fbsd.c += -D_DARWIN_UNLIMITED_STREAMS
+
# Install msgcat.h for usage by gencat (in adv_cmds)
LOCALHDRS+= ${.CURDIR}/nls/msgcat.h
MLINKS+= acl_set.3 acl_set_fd.3 \
acl_set.3 acl_set_fd_np.3 \
acl_set.3 acl_set_file.3 \
- acl_set.3 acl_set_link.3
+ acl_set.3 acl_set_link_np.3
MLINKS+= acl_valid.3 acl_valid_fd_np.3 \
.Xr acl_set_tag_type 3 ,
.Xr acl_to_text 3 ,
.Xr acl_valid 3 ,
-.Xr posix1e 3 ,
-.Xr acl 9
+.Xr posix1e 3
+.\".Xr acl 9
.Sh UNSUPPORTED FUNCTIONS
.Xr acl_calc_mask 3 ,
.Fn acl_delete_def_file
acl_set_qualifier(acl_entry_t entry, const void *tag_qualifier_p)
{
acl_tag_t tag_type;
- int error;
_ACL_VALIDATE_ENTRY(entry);
- if ((error = acl_get_tag_type(entry, &tag_type)) != 0)
- return(error);
+ if (acl_get_tag_type(entry, &tag_type) != 0)
+ return(-1);
switch(tag_type) {
case ACL_EXTENDED_ALLOW:
case ACL_EXTENDED_DENY:
bcopy(tag_qualifier_p, &entry->ae_applicable, sizeof(guid_t));
- error = 0;
break;
default:
- error = EINVAL;
+ errno = EINVAL;
+ return(-1);
}
- return(error);
+ return(0);
}
int
entry->ae_tag = tag_type;
break;
default:
- return(EINVAL);
+ errno = EINVAL;
+ return(-1);
}
return(0);
}
#include "aclvar.h"
static acl_t acl_get_file1(const char *path, acl_type_t acl_type, int follow);
-static int acl_set_file1(const char *path, acl_type_t acl_type, acl_t acl, int follow);
int
acl_delete_fd_np(int filedes, acl_type_t type)
return(acl_set_fd_np(fd, acl, ACL_TYPE_EXTENDED));
}
-static int
-acl_set_file1(const char *path, acl_type_t acl_type, acl_t acl, int follow)
+int
+acl_set_file(const char *path, acl_type_t acl_type, acl_t acl)
{
filesec_t fsec;
int error;
- if (follow == 0) { /* XXX this requires some thought - can links have ACLs? */
- errno = ENOTSUP;
- return(-1);
- }
-
if ((fsec = filesec_init()) == NULL)
return(-1);
if (filesec_set_property(fsec, FILESEC_ACL, &acl) != 0) {
return((error == 0) ? 0 : -1);
}
-int
-acl_set_file(const char *path, acl_type_t acl_type, acl_t acl)
-{
- return(acl_set_file1(path, acl_type, acl, 1));
-}
-
int
acl_set_link_np(const char *path, acl_type_t acl_type, acl_t acl)
{
- return(acl_set_file1(path, acl_type, acl, 0));
+ struct stat s;
+
+ if(lstat(path, &s) < 0)
+ return(-1);
+ if(S_ISLNK(s.st_mode)) {
+ errno = ENOTSUP;
+ return(-1);
+ }
+ return(acl_set_file(path, acl_type, acl));
}
/*
where as
.Fn acl_set_fd_np
allows the setting of ACLs of any type.
+.Pp
The
.Fn acl_set_link_np
function acts on a symlink rather than its target, if the target of the
path is a symlink.
+In the case of a symlink, this function will return an error
+because ACLs are not currently supported on symlinks.
+Note that the implementation is not atomic, and so the target could change
+between the time it is checked as not being a symlink, and the
+time the ACL is set.
.Sh RETURN VALUES
.Rv -std
.Sh ERRORS
.\"
.\" $FreeBSD: src/lib/libc/posix1e/acl_set_qualifier.3,v 1.5 2002/12/18 12:45:09 ru Exp $
.\"
-.Dd March 10, 2001
+.Dd Sept 2, 2008
.Dt ACL_SET_QUALIFIER 3
.Os
.Sh NAME
.Fa entry_d
to the value referred to by
.Fa tag_qualifier_p .
+.Pp
+The tag type of the ACL entry, as previously set with
+.Xr acl_set_tag_type 3 ,
+must be either
+.Dv ACL_EXTENDED_ALLOW
+or
+.Dv ACL_EXTENDED_DENY ,
+and
+.Fa tag_qualifier_p
+should be a pointer to a global unique identifier (of type
+.Ft guid_t ) .
.Sh RETURN VALUES
.Rv -std acl_set_qualifier
.Sh ERRORS
.Sh SEE ALSO
.Xr acl 3 ,
.Xr acl_get_qualifier 3 ,
+.Xr acl_set_tag_type 3 ,
.Xr posix1e 3
.Sh STANDARDS
POSIX.1e is described in IEEE POSIX.1e draft 17.
int i, error = 0, need_tag, ug_tag;
char *buf, *orig_buf;
char *entry, *field, *sub;
- uuid_t *uu;
+ uuid_t *uu = NULL;
struct passwd *tpass = NULL;
struct group *tgrp = NULL;
acl_entry_t acl_entry;
/* field 1: <user|group> */
field = strsep(&entry, ":");
- if((uu = calloc(1, sizeof(uuid_t))) == NULL)
+ if(uu)
+ bzero(uu, sizeof(uuid_t));
+ else if((uu = calloc(1, sizeof(uuid_t))) == NULL)
{
error = errno;
goto exit;
acl_set_qualifier(acl_entry, *uu);
}
exit:
+ if(uu)
+ free(uu);
free(orig_buf);
if (error)
{
{
_ACL_VALIDATE_ACL(acl);
- return(_ACL_HEADER_SIZE + acl->a_entries * _ACL_ENTRY_SIZE);
+ return(KAUTH_FILESEC_SIZE(acl->a_entries));
}
/*
- * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004, 2008 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <sys/kauth.h>
-#define _ACL_HEADER_SIZE sizeof(struct kauth_filesec)
-#define _ACL_ENTRY_SIZE sizeof(struct kauth_ace)
-
/*
* Internal access control list entry representation.
*/
/*
- * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
}
if (stacksize == 0) { /* main thread doesn't have pthread stack size */
- rlim_t rlim;
+ struct rlimit rlim;
if (0 == getrlimit(RLIMIT_STACK, &rlim))
- stacksize = rlim;
+ stacksize = rlim.rlim_cur;
}
uctx->uc_stack.ss_size = stacksize;
/*
- * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
setcontext((const ucontext_t *)ucp->uc_link);
- abort(); /* should never return from above call */
+ LIBC_ABORT("setcontext failed"); /* should never return from above call */
}
}
void
-makecontext(ucontext_t *ucp, void (*start)(void), int argc, ...)
+makecontext(ucontext_t *ucp, void (*start)(), int argc, ...)
{
mcontext_t mc;
char *sp;
+++ /dev/null
-.PATH: ${.CURDIR}/ppc/mach
-MDSRCS += mach_absolute_time.s
+++ /dev/null
-/*
- * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-#define __APPLE_API_PRIVATE
-#include <machine/cpu_capabilities.h>
-#undef __APPLE_API_PRIVATE
-
-.text
-.align 4
-.globl _mach_absolute_time
-_mach_absolute_time:
- ba _COMM_PAGE_ABSOLUTE_TIME
/*
- * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2003, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
int _cpu_capabilities = 0;
int _cpu_has_altivec = 0; // DEPRECATED: use _cpu_capabilities instead
+extern int _get_cpu_capabilities(void);
+
__private_extern__ void
_init_cpu_capabilities( void )
{
.text
.align 5
.globl _bzero
+ .globl ___bzero
// *************
// * B Z E R O *
// This function has migrated to the commpage.
_bzero: // void bzero(void *b, size_t len);
+___bzero:
ba _COMM_PAGE_BZERO
/* int32_t OSAtomicAdd32( int32_t theAmount, int32_t *theValue ); */
-#if defined(__ppc__)
MI_ENTRY_POINT(_OSAtomicAdd32)
ba _COMM_PAGE_ATOMIC_ADD32
-#elif defined(__ppc64__)
-MI_ENTRY_POINT(_OSAtomicAdd32)
- mflr r12 // save return address
- bla _COMM_PAGE_ATOMIC_ADD32
- mtlr r12
- extsw r3,r3 // sign extend return value
- blr
-#else
-#error undefined architecture
-#endif
/* int32_t OSAtomicOr32( int32_t theMask, int32_t *theValue ); */
cmpwi r3,0 // did swap occur?
beq-- 1b // compare-and-swap failed, try again
mtlr r12 // restore return adddress
-#if defined(__ppc__)
mr r3,r4 // return new value
-#elif defined(__ppc64__)
- extsw r3,r4 // sign extend return value
-#else
-#error undefined architecture
-#endif
blr
+++ /dev/null
-/*
- * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved.
- *
- * File: SYS.h
- *
- * Definition of the user side of the UNIX system call interface
- * for M98K.
- *
- * Errors are flagged by the location of the trap return (ie., which
- * instruction is executed upon rfi):
- *
- * SC PC + 4: Error (typically branch to cerror())
- * SC PC + 8: Success
- *
- * HISTORY
- * 18-Nov-92 Ben Fathi (benf@next.com)
- * Ported to m98k.
- *
- * 9-Jan-92 Peter King (king@next.com)
- * Created.
- */
-
-/*
- * Header files.
- */
-#include <architecture/ppc/mode_independent_asm.h>
-#import <sys/syscall.h>
-
-/* From rhapsody kernel mach/ppc/syscall_sw.h */
-#define kernel_trap_args_0
-#define kernel_trap_args_1
-#define kernel_trap_args_2
-#define kernel_trap_args_3
-#define kernel_trap_args_4
-#define kernel_trap_args_5
-#define kernel_trap_args_6
-#define kernel_trap_args_7
-
-/*
- * simple_kernel_trap -- Mach system calls with 8 or less args
- * Args are passed in a0 - a7, system call number in r0.
- * Do a "sc" instruction to enter kernel.
- */
-#define simple_kernel_trap(trap_name, trap_number) \
- .globl _##trap_name @\
-_##trap_name: @\
- li r0,trap_number @\
- sc @\
- blr
-
-#define kernel_trap_0(trap_name,trap_number) \
- simple_kernel_trap(trap_name,trap_number)
-
-#define kernel_trap_1(trap_name,trap_number) \
- simple_kernel_trap(trap_name,trap_number)
-
-#define kernel_trap_2(trap_name,trap_number) \
- simple_kernel_trap(trap_name,trap_number)
-
-#define kernel_trap_3(trap_name,trap_number) \
- simple_kernel_trap(trap_name,trap_number)
-
-#define kernel_trap_4(trap_name,trap_number) \
- simple_kernel_trap(trap_name,trap_number)
-
-#define kernel_trap_5(trap_name,trap_number) \
- simple_kernel_trap(trap_name,trap_number)
-
-#define kernel_trap_6(trap_name,trap_number) \
- simple_kernel_trap(trap_name,trap_number)
-
-#define kernel_trap_7(trap_name,trap_number) \
- simple_kernel_trap(trap_name,trap_number)
-
-#define kernel_trap_8(trap_name,trap_number) \
- simple_kernel_trap(trap_name,trap_number)
-
-#define kernel_trap_9(trap_name,trap_number) \
- simple_kernel_trap(trap_name,trap_number)
-
-/* End of rhapsody kernel mach/ppc/syscall_sw.h */
-
-/*
- * Macros.
- */
-
-/*
- * This is the same as SYSCALL, but it can call an alternate error
- * return function. It's generic to support potential future callers.
- */
-#define SYSCALL_ERR(name, nargs, error_ret) \
- .globl error_ret @\
- MI_ENTRY_POINT(_##name) @\
- kernel_trap_args_##nargs @\
- li r0,SYS_##name @\
- sc @\
- b 1f @\
- blr @\
-1: MI_BRANCH_EXTERNAL(error_ret)
-
-#define SYSCALL(name, nargs) \
- .globl cerror @\
- MI_ENTRY_POINT(_##name) @\
- kernel_trap_args_##nargs @\
- li r0,SYS_##name @\
- sc @\
- b 1f @\
- blr @\
-1: MI_BRANCH_EXTERNAL(cerror)
-
-
-#define SYSCALL_NONAME(name, nargs) \
- .globl cerror @\
- kernel_trap_args_##nargs @\
- li r0,SYS_##name @\
- sc @\
- b 1f @\
- b 2f @\
-1: MI_BRANCH_EXTERNAL(cerror) @\
-2:
-
-
-#define PSEUDO(pseudo, name, nargs) \
- .globl _##pseudo @\
- .text @\
- .align 2 @\
-_##pseudo: @\
- SYSCALL_NONAME(name, nargs)
-
-#define PSEUDO_ERR(pseudo, name, nargs, error_ret) \
- .globl _##pseudo @\
- .globl error_ret @\
- .text @\
- .align 2 @\
-_##pseudo: @\
- kernel_trap_args_##nargs @\
- li r0,SYS_##name @\
- sc @\
- b 1f @\
- blr @\
-1: MI_BRANCH_EXTERNAL(error_ret)
-
-
-#undef END
-#import <mach/ppc/syscall_sw.h>
*/
#include <architecture/ppc/mode_independent_asm.h>
-#include "SYS.h"
#include "_setjmp.h"
/*
*/
/* Copyright 1998 Apple Computer, Inc. */
-#include "SYS.h"
+#include <architecture/ppc/mode_independent_asm.h>
#define __APPLE_API_PRIVATE
#include <machine/cpu_capabilities.h>
*/
#include <architecture/ppc/mode_independent_asm.h>
-#include "SYS.h"
#include "_setjmp.h"
/*
+++ /dev/null
-# searching ppc directory as a fallback to avoid unnecessary code duplication
-.PATH: ${.CURDIR}/ppc/mach
-
-MDSRCS += mach_absolute_time.s
.ifdef FEATURE_PLOCKSTAT
${SYMROOTINC}/plockstat.h: ${.CURDIR}/pthreads/plockstat.d
- mkdir -p ${SYMROOTINC}
- dtrace -o ${.TARGET} -C -h -s ${.ALLSRC}
+ ${MKDIR} ${SYMROOTINC}
+ ${DTRACE} -o ${.TARGET} -C -h -s ${.ALLSRC}
.for _src in pthread_cond.c pthread_mutex.c pthread_rwlock.c
${_src:R}.${OBJSUFFIX}: ${SYMROOTINC}/plockstat.h
CFLAGS-pthread_mutex.c += -DLIBC_ALIAS_PTHREAD_MUTEXATTR_DESTROY
CFLAGS-pthread_rwlock.c += -DLIBC_ALIAS_PTHREAD_RWLOCK_DESTROY -DLIBC_ALIAS_PTHREAD_RWLOCK_INIT -DLIBC_ALIAS_PTHREAD_RWLOCK_RDLOCK -DLIBC_ALIAS_PTHREAD_RWLOCK_TRYRDLOCK -DLIBC_ALIAS_PTHREAD_RWLOCK_TRYWRLOCK -DLIBC_ALIAS_PTHREAD_RWLOCK_UNLOCK -DLIBC_ALIAS_PTHREAD_RWLOCK_WRLOCK
+.if defined(CCARCH) && ${CCARCH} == armv6
+CFLAGS-pthread.c += -mno-thumb
+CFLAGS-pthread_cancelable.c += -mno-thumb
+CFLAGS-pthread_cond.c += -mno-thumb
+CFLAGS-pthread_mutex.c += -mno-thumb
+CFLAGS-pthread_rwlock.c += -mno-thumb
+CFLAGS-pthread_tsd.c += -mno-thumb
+.endif
+
STRIP_HDRS += pthread.h
.if ${LIB} == "c"
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#endif
+extern int _pthread_setcancelstate_internal(int state, int *oldstate, int conforming);
+extern int __pthread_sigmask(int, const sigset_t *, sigset_t *);
+
#ifndef BUILDING_VARIANT /* [ */
__private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
+int32_t workq_targetconc[WORKQ_NUM_PRIOQUEUE];
+
/* Per-thread kernel support */
extern void _pthread_set_self(pthread_t);
extern void mig_init(int);
static int _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread);
static kern_return_t _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread);
-void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero);
+static void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero);
static void _pthread_tsd_reinit(pthread_t t);
static int _new_pthread_create_suspended(pthread_t *thread,
const pthread_attr_t *attr,
/* Same implementation as LOCK, but without the __is_threaded check */
int _spin_tries = 0;
+extern kern_return_t syscall_thread_switch(mach_port_name_t, int, mach_msg_timeout_t);
__private_extern__ void _spin_lock_retry(pthread_lock_t *lock)
{
int tries = _spin_tries;
static OSSpinLock __workqueue_list_lock = OS_SPINLOCK_INIT;
static void _pthread_exit(pthread_t self, void *value_ptr);
-int _pthread_setcancelstate_internal(int state, int *oldstate, int conforming);
static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming);
static pthread_attr_t _pthread_attr_default = {0};
static void _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr);
-static int handle_removeitem(pthread_workqueue_t workq, pthread_workitem_t item);
static int kernel_workq_setup = 0;
static volatile int32_t kernel_workq_count = 0;
static volatile unsigned int user_workq_count = 0;
#define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */
static int wqreadyprio = 0; /* current highest prio queue ready with items */
+static int __pthread_workqueue_affinity = 1; /* 0 means no affinity */
__private_extern__ struct __pthread_workitem_pool __pthread_workitem_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head);
__private_extern__ struct __pthread_workqueue_pool __pthread_workqueue_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head);
struct _pthread_workqueue_head __pthread_workq0_head;
struct _pthread_workqueue_head __pthread_workq1_head;
struct _pthread_workqueue_head __pthread_workq2_head;
-struct _pthread_workqueue_head __pthread_workq3_head;
-struct _pthread_workqueue_head __pthread_workq4_head;
-pthread_workqueue_head_t __pthread_wq_head_tbl[WQ_NUM_PRIO_QS] = {&__pthread_workq0_head, &__pthread_workq1_head, &__pthread_workq2_head, &__pthread_workq3_head, &__pthread_workq4_head};
+pthread_workqueue_head_t __pthread_wq_head_tbl[WQ_NUM_PRIO_QS] = {&__pthread_workq0_head, &__pthread_workq1_head, &__pthread_workq2_head};
static void workqueue_list_lock(void);
static void workqueue_list_unlock(void);
static int post_nextworkitem(pthread_workqueue_t workq);
static void _pthread_workq_return(pthread_t self);
static pthread_workqueue_attr_t _pthread_wq_attr_default = {0};
-void _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
extern void start_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
-extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, int flags);
+extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags);
static pthread_workitem_t alloc_workitem(void);
static void free_workitem(pthread_workitem_t);
static pthread_workqueue_t alloc_workqueue(void);
static int _pthread_work_internal_init(void);
static void workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item);
-/* workq_ops commands */
+void pthread_workqueue_atfork_prepare(void);
+void pthread_workqueue_atfork_parent(void);
+void pthread_workqueue_atfork_child(void);
+
+extern void dispatch_atfork_prepare(void);
+extern void dispatch_atfork_parent(void);
+extern void dispatch_atfork_child(void);
+
+/* workq_kernreturn commands */
#define WQOPS_QUEUE_ADD 1
#define WQOPS_QUEUE_REMOVE 2
#define WQOPS_THREAD_RETURN 4
+#define WQOPS_THREAD_SETCONC 8
/*
* Flags filed passed to bsdthread_create and back in pthread_start
| flags(8) | policy(8) | importance(16) |
-----------------------------------------
*/
+__private_extern__
void _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags);
+__private_extern__
+void _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
+
#define PTHREAD_START_CUSTOM 0x01000000
#define PTHREAD_START_SETSCHED 0x02000000
#define PTHREAD_START_DETACHED 0x04000000
#define PTHREAD_START_IMPORTANCE_MASK 0xffff
static int pthread_setschedparam_internal(pthread_t, mach_port_t, int, const struct sched_param *);
-extern pthread_t __bsdthread_create(void (*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
+extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
+extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, pthread_workitem_t, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, pthread_workitem_t, int),__uint64_t);
extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
+extern __uint64_t __thread_selfid( void );
+extern int __pthread_canceled(int);
+extern void _pthread_keys_init(void);
+extern int __pthread_kill(mach_port_t, int);
+extern int __pthread_markcancel(int);
+extern int __workq_open(void);
+
+#define WORKQUEUE_OVERCOMMIT 0x10000
+
+extern int __workq_kernreturn(int, pthread_workitem_t, int, int);
#if defined(__ppc__) || defined(__ppc64__)
static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000;
mach_port_t kport;
semaphore_t joinsem = SEMAPHORE_NULL;
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x900001c, freestruct, termthread, 0, 0, 0);
#endif
kport = t->kernel_thread;
if (freestruct != 0) {
TAILQ_REMOVE(&__pthread_head, t, plist);
/* if parent has not returned from create yet keep pthread_t */
-#if WQ_TRACE
+#if PTH_LISTTRACE
__kdebug_trace(0x9000010, t, 0, 0, 1, 0);
#endif
if (t->parentcheck == 0)
thread_count = --_pthread_count;
UNLOCK(_pthread_list_lock);
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000020, freeaddr, freesize, kport, 1, 0);
#endif
if (thread_count <=0)
exit(0);
else
- __bsdthread_terminate(freeaddr, freesize, kport, joinsem);
- abort();
+ __bsdthread_terminate((void *)freeaddr, freesize, kport, joinsem);
+ LIBC_ABORT("thread %p didn't terminate", t);
} else {
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000024, freeaddr, freesize, 0, 1, 0);
#endif
res = vm_deallocate(mach_task_self(), freeaddr, freesize);
LOCK(_pthread_list_lock);
if (freestruct != 0) {
TAILQ_REMOVE(&__pthread_head, t, plist);
-#if WQ_TRACE
+#if PTH_LISTTRACE
__kdebug_trace(0x9000010, t, 0, 0, 2, 0);
#endif
}
UNLOCK(_pthread_list_lock);
if (freestruct) {
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000008, t, 0, 0, 2, 0);
#endif
free(t);
freeaddr = 0;
freesize = 0;
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000020, 0, 0, kport, 2, 0);
#endif
exit(0);
else
__bsdthread_terminate(NULL, 0, kport, joinsem);
- abort();
+ LIBC_ABORT("thread %p didn't terminate", t);
} else if (freestruct) {
t->sig = _PTHREAD_NO_SIG;
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000024, t, 0, 0, 2, 0);
#endif
free(t);
char * stackaddr;
if ((pflags & PTHREAD_START_CUSTOM) == 0) {
- stackaddr = self;
+ stackaddr = (char *)self;
_pthread_struct_init(self, attrs, stackaddr, stacksize, 1, 1);
+ #if defined(__i386__) || defined(__x86_64__)
+ _pthread_set_self(self);
+ #endif
LOCK(_pthread_list_lock);
if (pflags & PTHREAD_START_SETSCHED) {
self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK);
self->detached &= ~PTHREAD_CREATE_JOINABLE;
self->detached |= PTHREAD_CREATE_DETACHED;
}
- } else
+ } else {
+ #if defined(__i386__) || defined(__x86_64__)
+ _pthread_set_self(self);
+ #endif
LOCK(_pthread_list_lock);
+ }
self->kernel_thread = kport;
self->fun = fun;
self->arg = funarg;
/* Add to the pthread list */
if (self->parentcheck == 0) {
TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
-#if WQ_TRACE
+#if PTH_LISTTRACE
__kdebug_trace(0x900000c, self, 0, 0, 3, 0);
#endif
_pthread_count++;
}
self->childrun = 1;
UNLOCK(_pthread_list_lock);
-#if defined(__i386__) || defined(__x86_64__)
- _pthread_set_self(self);
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+ if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1)
+ printf("Failed to set thread_id in pthread_start\n");
#endif
#if WQ_DEBUG
pself = pthread_self();
if (self != pself)
- abort();
+ LIBC_ABORT("self %p != pself %p", self, pself);
#endif
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000030, self, pflags, 0, 0, 0);
#endif
void
_pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero)
{
- mach_vm_offset_t stackaddr = (mach_vm_offset_t)stack;
+ mach_vm_offset_t stackaddr = (mach_vm_offset_t)(long)stack;
if (nozero == 0) {
memset(t, 0, sizeof(*t));
t->schedset = attrs->schedset;
t->tsd[0] = t;
if (kernalloc != 0) {
- stackaddr = (mach_vm_offset_t)t;
+ stackaddr = (mach_vm_offset_t)(long)t;
/* if allocated from kernel set values appropriately */
t->stacksize = stacksize;
- t->stackaddr = stackaddr;
+ t->stackaddr = (void *)(long)stackaddr;
t->freeStackOnExit = 1;
- t->freeaddr = stackaddr - stacksize - vm_page_size;
+ t->freeaddr = (void *)(long)(stackaddr - stacksize - vm_page_size);
t->freesize = pthreadsize + stacksize + vm_page_size;
} else {
t->stacksize = attrs->stacksize;
{
mach_port_t kport = MACH_PORT_NULL;
+ if (t == NULL)
+ goto out;
+
+ /*
+ * If the call is on self, return the kernel port. We cannot
+ * add this bypass for main thread as it might have exited,
+ * and we should not return stale port info.
+ */
+ if (t == pthread_self())
+ {
+ kport = t->kernel_thread;
+ goto out;
+ }
+
if (_pthread_lookup_thread(t, &kport, 0) != 0)
- return(NULL);
+ return((mach_port_t)0);
+out:
return(kport);
}
size_t
pthread_get_stacksize_np(pthread_t t)
{
- int ret;
+ int ret,nestingDepth=0;
size_t size = 0;
+ vm_address_t address=0;
+ vm_size_t region_size=0;
+ struct vm_region_submap_info_64 info;
+ mach_msg_type_number_t count;
if (t == NULL)
return(ESRCH);
+ if ( t == pthread_self() || t == &_thread ) //since the main thread will not get de-allocated from underneath us
+ {
+ size=t->stacksize;
+ return size;
+ }
+
+
LOCK(_pthread_list_lock);
if ((ret = _pthread_find_thread(t)) != 0) {
UNLOCK(_pthread_list_lock);
return(ret);
}
- size = t->stacksize;
+
+ size=t->stacksize;
UNLOCK(_pthread_list_lock);
+
return(size);
}
void * addr = NULL;
if (t == NULL)
- return(ESRCH);
+ return((void *)(long)ESRCH);
+ if(t == pthread_self() || t == &_thread) //since the main thread will not get deallocated from underneath us
+ return t->stackaddr;
+
LOCK(_pthread_list_lock);
if ((ret = _pthread_find_thread(t)) != 0) {
UNLOCK(_pthread_list_lock);
- return(ret);
+ return((void *)(long)ret);
}
addr = t->stackaddr;
UNLOCK(_pthread_list_lock);
return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
}
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+/* if we are passed in a pthread_t that is NULL, then we return
+ the current thread's thread_id. So folks don't have to call
+ pthread_self, in addition to us doing it, if they just want
+ their thread_id.
+*/
+int
+pthread_threadid_np(pthread_t thread, __uint64_t *thread_id)
+{
+ int rval=0;
+ pthread_t self = pthread_self();
+
+ if (thread_id == NULL) {
+ return(EINVAL);
+ } else if (thread == NULL || thread == self) {
+ *thread_id = self->thread_id;
+ return rval;
+ }
+
+ LOCK(_pthread_list_lock);
+ if ((rval = _pthread_find_thread(thread)) != 0) {
+ UNLOCK(_pthread_list_lock);
+ return(rval);
+ }
+ *thread_id = thread->thread_id;
+ UNLOCK(_pthread_list_lock);
+ return rval;
+}
+#endif
+
+int
+pthread_getname_np(pthread_t thread, char *threadname, size_t len)
+{
+ int rval;
+ rval = 0;
+
+ if (thread == NULL)
+ return(ESRCH);
+
+ LOCK(_pthread_list_lock);
+ if ((rval = _pthread_find_thread(thread)) != 0) {
+ UNLOCK(_pthread_list_lock);
+ return(rval);
+ }
+ strlcpy(threadname, thread->pthread_name, len);
+ UNLOCK(_pthread_list_lock);
+ return rval;
+}
+
+int
+pthread_setname_np(const char *threadname)
+{
+ int rval;
+ size_t len;
+
+ rval = 0;
+ len = strlen(threadname);
+ rval = sysctlbyname("kern.threadname", NULL, 0, threadname, len);
+ if(rval == 0)
+ {
+ strlcpy((pthread_self())->pthread_name, threadname, len+1);
+ }
+ return rval;
+
+}
+
static int
_new_pthread_create_suspended(pthread_t *thread,
const pthread_attr_t *attr,
void *stack;
int error;
unsigned int flags;
- pthread_t t;
+ pthread_t t,t2;
kern_return_t kern_res;
mach_port_t kernel_thread = MACH_PORT_NULL;
int needresume;
/* Rosetta or pthread_create_suspended() */
/* running under rosetta */
/* Allocate a stack for the thread */
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000000, create_susp, 0, 0, 0, 0);
#endif
if ((error = _pthread_allocate_stack(attrs, &stack)) != 0) {
/* Now set it up to execute */
LOCK(_pthread_list_lock);
TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
-#if WQ_TRACE
+#if PTH_LISTTRACE
__kdebug_trace(0x900000c, t, 0, 0, 4, 0);
#endif
_pthread_count++;
t->fun = start_routine;
t->newstyle = 1;
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000004, t, flags, 0, 0, 0);
#endif
- if ((t = __bsdthread_create(start_routine, arg, stack, t, flags)) == -1) {
+ if ((t2 = __bsdthread_create(start_routine, arg, stack, t, flags)) == (pthread_t)-1) {
_pthread_free_pthread_onstack(t, 1, 0);
return (EAGAIN);
}
+ else t=t2;
LOCK(_pthread_list_lock);
t->parentcheck = 1;
if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) {
/* detached child exited, mop up */
UNLOCK(_pthread_list_lock);
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000008, t, 0, 0, 1, 0);
#endif
+ if(t->freeStackOnExit)
+ vm_deallocate(self, (mach_vm_address_t)(long)t, pthreadsize);
+ else
free(t);
} else if (t->childrun == 0) {
TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
_pthread_count++;
-#if WQ_TRACE
+#if PTH_LISTTRACE
__kdebug_trace(0x900000c, t, 0, 0, 1, 0);
#endif
UNLOCK(_pthread_list_lock);
*thread = t;
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000014, t, 0, 0, 1, 0);
#endif
return (0);
} else {
/* kernel allocation */
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000018, flags, 0, 0, 0, 0);
#endif
- if ((t = __bsdthread_create(start_routine, arg, attrs->stacksize, NULL, flags)) == -1)
+ if ((t = __bsdthread_create(start_routine, arg, (void *)attrs->stacksize, NULL, flags)) == (pthread_t)-1)
return (EAGAIN);
/* Now set it up to execute */
LOCK(_pthread_list_lock);
if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) {
/* detached child exited, mop up */
UNLOCK(_pthread_list_lock);
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000008, t, pthreadsize, 0, 2, 0);
#endif
- vm_deallocate(self, t, pthreadsize);
+ vm_deallocate(self, (mach_vm_address_t)(long)t, pthreadsize);
} else if (t->childrun == 0) {
TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
_pthread_count++;
-#if WQ_TRACE
+#if PTH_LISTTRACE
__kdebug_trace(0x900000c, t, 0, 0, 2, 0);
#endif
UNLOCK(_pthread_list_lock);
*thread = t;
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000014, t, 0, 0, 2, 0);
#endif
return(0);
/* Now set it up to execute */
LOCK(_pthread_list_lock);
TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
-#if WQ_TRACE
+#if PTH_LISTTRACE
__kdebug_trace(0x900000c, t, 0, 0, 5, 0);
#endif
_pthread_count++;
/*
* pthread_kill call to system call
*/
-
-extern int __pthread_kill(mach_port_t, int);
-
int
pthread_kill (
pthread_t th,
if (_pthread_lookup_thread(th, &kport, 0) != 0)
return (ESRCH); /* Not a valid thread */
+ /* if the thread is a workqueue thread, just return error */
+ if ((th->wqthread != 0) && (th->wqkillset ==0)) {
+ return(ENOTSUP);
+ }
+
error = __pthread_kill(kport, sig);
if (error == -1)
return(error);
}
+int
+__pthread_workqueue_setkill(int enable)
+{
+ pthread_t self = pthread_self();
+
+ LOCK(self->lock);
+ if (enable == 0)
+ self->wqkillset = 0;
+ else
+ self->wqkillset = 1;
+ UNLOCK(self->lock);
+
+ return(0);
+
+}
+
/* Announce that there are pthread resources ready to be reclaimed in a */
/* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
/* thread underneath is terminated right away. */
*value_ptr = th->exit_value;
if (conforming) {
if ((th->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
- (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
+ (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING) && (value_ptr != NULL))
*value_ptr = PTHREAD_CANCELED;
th->sig = _PTHREAD_NO_SIG;
}
mach_port_t kernel_thread = msg.header.msgh_remote_port;
pthread_t thread = msg.thread;
- if (_pthread_reap_thread(thread, kernel_thread, (void **)0, 0) == EAGAIN)
+ /* deal with race with thread_create_running() */
+ if (kernel_thread == MACH_PORT_NULL &&
+ kernel_thread != thread->kernel_thread) {
+ kernel_thread = thread->kernel_thread;
+ }
+
+ if ( kernel_thread == MACH_PORT_NULL ||
+ _pthread_reap_thread(thread, kernel_thread, (void **)0, 0) == EAGAIN)
{
/* not dead yet, put it back for someone else to reap, stop here */
_pthread_become_available(thread, kernel_thread);
return;
}
+
ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
sizeof msg, thread_recycle_port,
MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
/* Make this thread not to receive any signals */
__disable_threadsignal(1);
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x900001c, self, newstyle, 0, 0, 0);
#endif
UNLOCK(self->lock);
LOCK(_pthread_list_lock);
TAILQ_REMOVE(&__pthread_head, self, plist);
-#if WQ_TRACE
+#if PTH_LISTTRACE
__kdebug_trace(0x9000010, self, 0, 0, 5, 0);
#endif
thread_count = --_pthread_count;
} else {
semaphore_t joinsem = SEMAPHORE_NULL;
- if ((self->joiner_notify == NULL) && (self->detached & PTHREAD_CREATE_JOINABLE))
+ if ((self->joiner_notify == (mach_port_t)0) && (self->detached & PTHREAD_CREATE_JOINABLE))
joinsem = new_sem_from_pool();
LOCK(self->lock);
self->detached |= _PTHREAD_EXITED;
self->exit_value = value_ptr;
if (self->detached & PTHREAD_CREATE_JOINABLE) {
- if (self->joiner_notify == NULL) {
+ if (self->joiner_notify == (mach_port_t)0) {
self->joiner_notify = joinsem;
joinsem = SEMAPHORE_NULL;
}
_pthread_free_pthread_onstack(self, 1, 1);
}
}
- abort();
+ LIBC_ABORT("thread %p didn't exit", self);
}
void
pthread_exit(void *value_ptr)
{
pthread_t self = pthread_self();
- if (self->wqthread != 0)
- workqueue_exit(self, self->cur_workq, self->cur_workitem);
- else
+ /* if the current thread is a workqueue thread, just crash the app, as per libdispatch folks */
+ if (self->wqthread == 0) {
_pthread_exit(self, value_ptr);
+ } else {
+ LIBC_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
+ }
}
/*
_spin_lock(&once_control->lock);
if (once_control->sig == _PTHREAD_ONCE_SIG_init)
{
- pthread_cleanup_push(__pthread_once_cancel_handler, once_control);
+ pthread_cleanup_push((void (*)(void *))__pthread_once_cancel_handler, once_control);
(*init_routine)();
pthread_cleanup_pop(0);
once_control->sig = _PTHREAD_ONCE_SIG;
/*
* Perform package initialization - called automatically when application starts
*/
-
__private_extern__ int
pthread_init(void)
{
host_t host;
mach_msg_type_number_t count;
int mib[2];
+ int ncpus = 0;
size_t len;
void *stackaddr;
thread = &_thread;
TAILQ_INSERT_HEAD(&__pthread_head, thread, plist);
_pthread_set_self(thread);
-
+#if PTH_LISTTRACE
+ __kdebug_trace(0x900000c, thread, 0, 0, 10, 0);
+#endif
+
/* In case of dyld reset the tsd keys from 1 - 10 */
_pthread_keys_init();
if (sysctl (mib, 2, &stackaddr, &len, NULL, 0) != 0)
stackaddr = (void *)USRSTACK;
_pthread_create(thread, attrs, stackaddr, mach_thread_self());
+ thread->stacksize = DFLSSIZ; //initialize main thread's stacksize based on vmparam.h
thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT;
_init_cpu_capabilities();
- if (_NumCPUs() > 1)
+ if ((ncpus = _NumCPUs()) > 1)
_spin_tries = MP_SPIN_TRIES;
+ workq_targetconc[WORKQ_HIGH_PRIOQUEUE] = ncpus;
+ workq_targetconc[WORKQ_DEFAULT_PRIOQUEUE] = ncpus;
+ workq_targetconc[WORKQ_LOW_PRIOQUEUE] = ncpus;
+
mach_port_deallocate(mach_task_self(), host);
#if defined(__ppc__)
mig_init(1); /* enable multi-threaded mig interfaces */
if (__oldstyle == 0) {
#if defined(__i386__) || defined(__x86_64__)
- __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)));
+ __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)), _pthread_start, &workq_targetconc[0], (__uint64_t)(&thread->tsd[__PTK_LIBDISPATCH_KEY0]) - (__uint64_t)thread);
#else
- __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)));
+ __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)), NULL, &workq_targetconc[0], (__uint64_t)&thread->tsd[__PTK_LIBDISPATCH_KEY0] - (__uint64_t)thread);
#endif
}
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+ if( (thread->thread_id = __thread_selfid()) == (__uint64_t)-1)
+ printf("Failed to set thread_id in pthread_init\n");
return 0;
+#endif
}
int sched_yield(void)
TAILQ_INIT(&__pthread_head);
LOCK_INIT(_pthread_list_lock);
TAILQ_INSERT_HEAD(&__pthread_head, p, plist);
+#if PTH_LISTTRACE
+ __kdebug_trace(0x900000c, p, 0, 0, 10, 0);
+#endif
_pthread_count = 1;
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+ if( (p->thread_id = __thread_selfid()) == (__uint64_t)-1)
+ printf("Failed to set thread_id in pthread_fork_child\n");
+#endif
}
/*
kern_return_t res;
int detached = 0, ret;
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000028, thread, 0, 0, 1, 0);
#endif
/* The scenario where the joiner was waiting for the thread and
}
/* It is still a joinable thread and needs to be reaped */
TAILQ_REMOVE(&__pthread_head, thread, plist);
-#if WQ_TRACE
+#if PTH_LISTTRACE
__kdebug_trace(0x9000010, thread, 0, 0, 3, 0);
#endif
UNLOCK(_pthread_list_lock);
*value_ptr = thread->exit_value;
if (conforming) {
if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
- (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) {
+ (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING) && (value_ptr != NULL)) {
*value_ptr = PTHREAD_CANCELED;
}
}
}
if (thread->freeStackOnExit) {
thread->sig = _PTHREAD_NO_SIG;
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000028, thread, 0, 0, 2, 0);
#endif
- vm_deallocate(mach_task_self(), thread, pthreadsize);
+ vm_deallocate(mach_task_self(), (mach_vm_address_t)(long)thread, pthreadsize);
} else {
thread->sig = _PTHREAD_NO_SIG;
-#if WQ_TRACE
+#if PTH_TRACE
__kdebug_trace(0x9000028, thread, 0, 0, 3, 0);
#endif
free(thread);
int
pthread_workqueue_attr_init_np(pthread_workqueue_attr_t * attrp)
{
- attrp->stacksize = DEFAULT_STACK_SIZE;
- attrp->istimeshare = 1;
- attrp->importance = 0;
- attrp->affinity = 0;
- attrp->queueprio = WORK_QUEUE_NORMALIZER;
- attrp->sig = PTHEAD_WRKQUEUE_ATTR_SIG;
+ attrp->queueprio = WORKQ_DEFAULT_PRIOQUEUE;
+ attrp->sig = PTHREAD_WORKQUEUE_ATTR_SIG;
+ attrp->overcommit = 0;
return(0);
}
int
pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t * attr)
{
- if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG)
+ if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG)
{
return (0);
} else
}
}
-#ifdef NOTYET /* [ */
-int
-pthread_workqueue_attr_getstacksize_np(const pthread_workqueue_attr_t * attr, size_t * stacksizep)
-{
- if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
- *stacksizep = attr->stacksize;
- return (0);
- } else {
- return (EINVAL); /* Not an attribute structure! */
- }
-}
-
int
-pthread_workqueue_attr_setstacksize_np(pthread_workqueue_attr_t * attr, size_t stacksize)
+pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t * attr, int * qpriop)
{
- if ((attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
- attr->stacksize = stacksize;
+ if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
+ *qpriop = attr->queueprio;
return (0);
} else {
return (EINVAL); /* Not an attribute structure! */
int
-pthread_workqueue_attr_getthreadtimeshare_np(const pthread_workqueue_attr_t * attr, int * istimesahrep)
+pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t * attr, int qprio)
{
- if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
- *istimesahrep = attr->istimeshare;
- return (0);
- } else {
- return (EINVAL); /* Not an attribute structure! */
- }
-}
+int error = 0;
-int
-pthread_workqueue_attr_settthreadtimeshare_np(pthread_workqueue_attr_t * attr, int istimeshare)
-{
- if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
- if (istimeshare != 0)
- attr->istimeshare = istimeshare;
- else
- attr->istimeshare = 0;
- return (0);
+ if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
+ switch(qprio) {
+ case WORKQ_HIGH_PRIOQUEUE:
+ case WORKQ_DEFAULT_PRIOQUEUE:
+ case WORKQ_LOW_PRIOQUEUE:
+ attr->queueprio = qprio;
+ break;
+ default:
+ error = EINVAL;
+ }
} else {
- return (EINVAL); /* Not an attribute structure! */
+ error = EINVAL;
}
+ return (error);
}
-int
-pthread_workqueue_attr_getthreadimportance_np(const pthread_workqueue_attr_t * attr, int * importancep)
-{
- if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
- *importancep = attr->importance;
- return (0);
- } else {
- return (EINVAL); /* Not an attribute structure! */
- }
-}
int
-pthread_workqueue_attr_settthreadimportance_np(pthread_workqueue_attr_t * attr, int importance)
+pthread_workqueue_attr_getovercommit_np(const pthread_workqueue_attr_t * attr, int * ocommp)
{
- if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG){
- attr->importance = importance;
+ if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
+ *ocommp = attr->overcommit;
return (0);
} else {
return (EINVAL); /* Not an attribute structure! */
}
}
-int
-pthread_workqueue_attr_getthreadaffinity_np(const pthread_workqueue_attr_t * attr, int * affinityp)
-{
- if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
- *affinityp = attr->affinity;
- return (0);
- } else {
- return (EINVAL); /* Not an attribute structure! */
- }
-}
int
-pthread_workqueue_attr_settthreadaffinity_np(pthread_workqueue_attr_t * attr, int affinity)
+pthread_workqueue_attr_setovercommit_np(pthread_workqueue_attr_t * attr, int ocomm)
{
- if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG){
- attr->affinity = affinity;
- return (0);
- } else {
- return (EINVAL); /* Not an attribute structure! */
- }
-}
-
-#endif /* NOTYET ] */
+int error = 0;
-int
-pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t * attr, int * qpriop)
-{
- if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
- *qpriop = (attr->queueprio - WORK_QUEUE_NORMALIZER);
- return (0);
+ if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
+ attr->overcommit = ocomm;
} else {
- return (EINVAL); /* Not an attribute structure! */
+ error = EINVAL;
}
+ return (error);
}
-
-int
-pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t * attr, int qprio)
-{
- /* only -2 to +2 is valid */
- if ((attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) && (qprio <= 2) && (qprio >= -2)) {
- attr->queueprio = (qprio + WORK_QUEUE_NORMALIZER);
- return (0);
- } else {
- return (EINVAL); /* Not an attribute structure! */
- }
-}
-
/* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
static void
return(ret);
}
+int
+pthread_workqueue_requestconcurrency_np(int queue, int request_concurrency)
+{
+ int error = 0;
+
+ if (queue < 0 || queue > WORKQ_NUM_PRIOQUEUE)
+ return(EINVAL);
+
+ error =__workq_kernreturn(WQOPS_THREAD_SETCONC, NULL, request_concurrency, queue);
+
+ if (error == -1)
+ return(errno);
+ return(0);
+}
+
+void
+pthread_workqueue_atfork_prepare(void)
+{
+ /*
+ * NOTE: Any workq additions here
+ * should be for i386,x86_64 only
+ */
+ dispatch_atfork_prepare();
+}
+
+void
+pthread_workqueue_atfork_parent(void)
+{
+ /*
+ * NOTE: Any workq additions here
+ * should be for i386,x86_64 only
+ */
+ dispatch_atfork_parent();
+}
+
+void
+pthread_workqueue_atfork_child(void)
+{
+#if defined(__i386__) || defined(__x86_64__)
+ /*
+ * NOTE: workq additions here
+ * are for i386,x86_64 only as
+ * ppc and arm do not support it
+ */
+ __workqueue_list_lock = OS_SPINLOCK_INIT;
+ if (kernel_workq_setup != 0){
+ kernel_workq_setup = 0;
+ _pthread_work_internal_init();
+ }
+#endif
+ dispatch_atfork_child();
+}
+
static int
_pthread_work_internal_init(void)
{
if (kernel_workq_setup == 0) {
#if defined(__i386__) || defined(__x86_64__)
- __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)));
+ __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)),NULL,NULL, NULL);
#else
- __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)));
+ __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)),NULL,NULL, NULL);
#endif
- _pthread_wq_attr_default.stacksize = DEFAULT_STACK_SIZE;
- _pthread_wq_attr_default.istimeshare = 1;
- _pthread_wq_attr_default.importance = 0;
- _pthread_wq_attr_default.affinity = 0;
- _pthread_wq_attr_default.queueprio = WORK_QUEUE_NORMALIZER;
- _pthread_wq_attr_default.sig = PTHEAD_WRKQUEUE_ATTR_SIG;
+ _pthread_wq_attr_default.queueprio = WORKQ_DEFAULT_PRIOQUEUE;
+ _pthread_wq_attr_default.sig = PTHREAD_WORKQUEUE_ATTR_SIG;
for( i = 0; i< WQ_NUM_PRIO_QS; i++) {
headp = __pthread_wq_head_tbl[i];
if (TAILQ_EMPTY(&__pthread_workitem_pool_head)) {
workqueue_list_unlock();
witem = malloc(sizeof(struct _pthread_workitem));
+ witem->gencount = 0;
workqueue_list_lock();
} else {
witem = TAILQ_FIRST(&__pthread_workitem_pool_head);
static void
free_workitem(pthread_workitem_t witem)
{
+ witem->gencount++;
TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, witem, item_entry);
}
{
bzero(wq, sizeof(struct _pthread_workqueue));
if (attr != NULL) {
- wq->stacksize = attr->stacksize;
- wq->istimeshare = attr->istimeshare;
- wq->importance = attr->importance;
- wq->affinity = attr->affinity;
wq->queueprio = attr->queueprio;
+ wq->overcommit = attr->overcommit;
} else {
- wq->stacksize = DEFAULT_STACK_SIZE;
- wq->istimeshare = 1;
- wq->importance = 0;
- wq->affinity = 0;
- wq->queueprio = WORK_QUEUE_NORMALIZER;
+ wq->queueprio = WORKQ_DEFAULT_PRIOQUEUE;
+ wq->overcommit = 0;
}
LOCK_INIT(wq->lock);
wq->flags = 0;
TAILQ_INIT(&wq->item_listhead);
TAILQ_INIT(&wq->item_kernhead);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080ac, wq, &wq->item_listhead, wq->item_listhead.tqh_first, wq->item_listhead.tqh_last, 0);
+#endif
wq->wq_list.tqe_next = 0;
wq->wq_list.tqe_prev = 0;
- wq->sig = PTHEAD_WRKQUEUE_SIG;
+ wq->sig = PTHREAD_WORKQUEUE_SIG;
wq->headp = __pthread_wq_head_tbl[wq->queueprio];
}
int
valid_workq(pthread_workqueue_t workq)
{
- if (workq->sig == PTHEAD_WRKQUEUE_SIG)
+ if (workq->sig == PTHREAD_WORKQUEUE_SIG)
return(1);
else
return(0);
pthread_workqueue_t workq;
pthread_workqueue_t nworkq = NULL;
+#if WQ_TRACE
+ __kdebug_trace(0x9008098, kernel_workq_count, 0, 0, 0, 0);
+#endif
loop:
while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
found = 0;
headp->next_workq = TAILQ_NEXT(workq, wq_list);
if (headp->next_workq == NULL)
headp->next_workq = TAILQ_FIRST(&headp->wqhead);
+#if WQ_TRACE
+ __kdebug_trace(0x9008098, kernel_workq_count, workq, 0, 1, 0);
+#endif
val = post_nextworkitem(workq);
if (val != 0) {
static int
post_nextworkitem(pthread_workqueue_t workq)
{
- int error;
+ int error, prio;
pthread_workitem_t witem;
pthread_workqueue_head_t headp;
void (*func)(pthread_workqueue_t, void *);
if ((workq->flags & PTHREAD_WORKQ_SUSPEND) == PTHREAD_WORKQ_SUSPEND) {
return(0);
}
+#if WQ_TRACE
+ __kdebug_trace(0x900809c, workq, workq->item_listhead.tqh_first, 0, 1, 0);
+#endif
if (TAILQ_EMPTY(&workq->item_listhead)) {
return(0);
}
+ if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON)
+ return(0);
+
witem = TAILQ_FIRST(&workq->item_listhead);
headp = workq->headp;
+#if WQ_TRACE
+ __kdebug_trace(0x900809c, workq, witem, 0, 0xee, 0);
+#endif
if ((witem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) {
+#if WQ_TRACE
+ __kdebug_trace(0x9000064, workq, 0, 0, 2, 0);
+#endif
if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
return(0);
__kdebug_trace(0x9000064, 2, workq->barrier_count, 0, 0, 0);
#endif
if (witem->func != NULL) {
+ /* since we are going to drop list lock */
+ witem->flags |= PTH_WQITEM_APPLIED;
+ workq->flags |= PTHREAD_WORKQ_BARRIER_ON;
workqueue_list_unlock();
- func = witem->func;
+ func = (void (*)(pthread_workqueue_t, void *))witem->func;
(*func)(workq, witem->func_arg);
+#if WQ_TRACE
+ __kdebug_trace(0x9000064, 3, workq->barrier_count, 0, 0, 0);
+#endif
workqueue_list_lock();
+ workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
}
TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
witem->flags = 0;
free_workitem(witem);
+#if WQ_TRACE
+ __kdebug_trace(0x9000064, 4, workq->barrier_count, 0, 0, 0);
+#endif
return(1);
}
} else if ((witem->flags & PTH_WQITEM_DESTROY) == PTH_WQITEM_DESTROY) {
witem->flags |= PTH_WQITEM_APPLIED;
workq->flags |= (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON);
workq->barrier_count = workq->kq_count;
- workq->term_callback = witem->func;
+ workq->term_callback = (void (*)(struct _pthread_workqueue *,void *))witem->func;
workq->term_callarg = witem->func_arg;
TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
if ((TAILQ_EMPTY(&workq->item_listhead)) && (workq->kq_count == 0)) {
if (!(TAILQ_EMPTY(&workq->item_kernhead))) {
#if WQ_TRACE
}
free_workqueue(workq);
return(1);
- } else
+ } else {
TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
+ }
#if WQ_TRACE
__kdebug_trace(0x9000068, 2, workq->barrier_count, 0, 0, 0);
#endif
__kdebug_trace(0x9000060, witem, workq, witem->func_arg, 0xfff, 0);
#endif
TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
TAILQ_INSERT_TAIL(&workq->item_kernhead, witem, item_entry);
if ((witem->flags & PTH_WQITEM_KERN_COUNT) == 0) {
workq->kq_count++;
}
OSAtomicIncrement32(&kernel_workq_count);
workqueue_list_unlock();
- if (( error =__workq_ops(WQOPS_QUEUE_ADD, witem, 0)) == -1) {
+
+ prio = workq->queueprio;
+ if (workq->overcommit != 0) {
+ prio |= WORKQUEUE_OVERCOMMIT;
+ }
+
+ if (( error =__workq_kernreturn(WQOPS_QUEUE_ADD, witem, workq->affinity, prio)) == -1) {
OSAtomicDecrement32(&kernel_workq_count);
workqueue_list_lock();
#if WQ_TRACE
#endif
TAILQ_REMOVE(&workq->item_kernhead, witem, item_entry);
TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
if ((workq->flags & (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON)) != 0)
workq->flags |= PTHREAD_WORKQ_REQUEUED;
} else
/* noone should come here */
#if 1
printf("error in logic for next workitem\n");
- abort();
+ LIBC_ABORT("error in logic for next workitem");
#endif
return(0);
}
int ret;
pthread_attr_t *attrs = &_pthread_attr_default;
pthread_workqueue_t workq;
+#if WQ_DEBUG
pthread_t pself;
+#endif
workq = item->workq;
/* reuse is set to 0, when a thread is newly created to run a workitem */
_pthread_struct_init(self, attrs, stackaddr, DEFAULT_STACK_SIZE, 1, 1);
self->wqthread = 1;
+ self->wqkillset = 0;
self->parentcheck = 1;
/* These are not joinable threads */
__kdebug_trace(0x9000050, self, item, item->func_arg, 0, 0);
#endif
self->kernel_thread = kport;
- self->fun = item->func;
+ self->fun = (void *(*)(void *))item->func;
self->arg = item->func_arg;
/* Add to the pthread list */
LOCK(_pthread_list_lock);
TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
-#if WQ_TRACE
+#if PTH_LISTTRACE
__kdebug_trace(0x900000c, self, 0, 0, 10, 0);
#endif
_pthread_count++;
UNLOCK(_pthread_list_lock);
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+ if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1)
+ printf("Failed to set thread_id in pthread_wqthread\n");
+#endif
+
} else {
/* reuse is set to 1, when a thread is resued to run another work item */
#if WQ_TRACE
__kdebug_trace(0x9000054, self, item, item->func_arg, 0, 0);
#endif
/* reset all tsd from 1 to KEYS_MAX */
- _pthread_tsd_reinit(self);
+ if (self == NULL)
+ LIBC_ABORT("_pthread_wqthread: pthread %p setup to be NULL", self);
- self->fun = item->func;
+ self->fun = (void *(*)(void *))item->func;
self->arg = item->func_arg;
}
pself = pthread_self();
if (self != pself) {
printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself, self);
- abort();
+ LIBC_ABORT("(3)pthread_self not set in reuse: pself %p, passed in %p", pself, self);
}
}
#endif /* WQ_DEBUG */
self->cur_workitem = item;
OSAtomicDecrement32(&kernel_workq_count);
- ret = (*self->fun)(self->arg);
+ ret = (int)(intptr_t)(*self->fun)(self->arg);
+ /* If we reach here without going through the above initialization path then don't go through
+ * with the teardown code path ( e.g. setjmp/longjmp ). Instead just exit this thread.
+ */
+ if(self != pthread_self()) {
+ pthread_exit(PTHREAD_CANCELED);
+ }
+
workqueue_exit(self, workq, item);
}
/* if the front item is a barrier and call back is registered, run that */
if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) && (baritem->func != NULL)) {
workqueue_list_unlock();
- func = baritem->func;
+ func = (void (*)(pthread_workqueue_t, void *))baritem->func;
(*func)(workq, baritem->func_arg);
workqueue_list_lock();
}
TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
baritem->flags = 0;
free_workitem(baritem);
workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
static void
_pthread_workq_return(pthread_t self)
{
- struct __darwin_pthread_handler_rec *handler;
- int value = 0;
- int * value_ptr=&value;
-
- /* set cancel state to disable and type to deferred */
- _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming);
-
- /* Make this thread not to receive any signals */
- __disable_threadsignal(1);
-
- while ((handler = self->__cleanup_stack) != 0)
- {
- (handler->__routine)(handler->__arg);
- self->__cleanup_stack = handler->__next;
- }
- _pthread_tsd_cleanup(self);
-
- __workq_ops(WQOPS_THREAD_RETURN, NULL, 0);
+ __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
/* This is the way to terminate the thread */
_pthread_exit(self, NULL);
}
-/* returns 0 if it handles it, otherwise 1 */
-static int
-handle_removeitem(pthread_workqueue_t workq, pthread_workitem_t item)
-{
- pthread_workitem_t baritem;
- pthread_workqueue_head_t headp;
- void (*func)(pthread_workqueue_t, void *);
-
- if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) {
- workq->barrier_count--;
- if (workq->barrier_count <= 0 ) {
- /* Need to remove barrier item from the list */
- baritem = TAILQ_FIRST(&workq->item_listhead);
-#if WQ_DEBUG
- if ((baritem->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_DESTROY| PTH_WQITEM_APPLIED)) == 0)
- printf("Incorect bar item being removed in barrier processing\n");
-#endif /* WQ_DEBUG */
- /* if the front item is a barrier and call back is registered, run that */
- if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER)
- && (baritem->func != NULL)) {
- workqueue_list_unlock();
- func = baritem->func;
- (*func)(workq, baritem->func_arg);
- workqueue_list_lock();
- }
- TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry);
- baritem->flags = 0;
- free_workitem(baritem);
- item->flags = 0;
- free_workitem(item);
- workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
-#if WQ_TRACE
- __kdebug_trace(0x9000058, pthread_self(), item, item->func_arg, 0, 0);
-#endif
- if ((workq->flags & PTHREAD_WORKQ_TERM_ON) != 0) {
- headp = __pthread_wq_head_tbl[workq->queueprio];
- workq->flags |= PTHREAD_WORKQ_DESTROYED;
-#if WQ_TRACE
- __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 2, 0);
-#endif
- if (headp->next_workq == workq) {
- headp->next_workq = TAILQ_NEXT(workq, wq_list);
- if (headp->next_workq == NULL) {
- headp->next_workq = TAILQ_FIRST(&headp->wqhead);
- if (headp->next_workq == workq)
- headp->next_workq = NULL;
- }
- }
- TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
- workq->sig = 0;
- if (workq->term_callback != NULL) {
- workqueue_list_unlock();
- (*workq->term_callback)(workq, workq->term_callarg);
- workqueue_list_lock();
- }
- free_workqueue(workq);
- pick_nextworkqueue_droplock();
- return(0);
- } else {
- /* if there are higher prio schedulabel item reset to wqreadyprio */
- if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead))))
- wqreadyprio = workq->queueprio;
- free_workitem(item);
- pick_nextworkqueue_droplock();
- return(0);
- }
- }
- }
- return(1);
-}
/* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
int
pthread_workqueue_t wq;
pthread_workqueue_head_t headp;
- if ((attr != NULL) && (attr->sig != PTHEAD_WRKQUEUE_ATTR_SIG)) {
+#if defined(__arm__)
+ /* not supported under arm */
+ return(ENOTSUP);
+#endif
+#if defined(__ppc__)
+ IF_ROSETTA() {
+ return(ENOTSUP);
+ }
+#endif
+ if ((attr != NULL) && (attr->sig != PTHREAD_WORKQUEUE_ATTR_SIG)) {
return(EINVAL);
}
}
int
-pthread_workqueue_destroy_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg)
-{
- pthread_workitem_t witem;
- pthread_workqueue_head_t headp;
-
- if (valid_workq(workq) == 0) {
- return(EINVAL);
- }
-
- workqueue_list_lock();
-
- /*
- * Allocate the workitem here as it can drop the lock.
- * Also we can evaluate the workqueue state only once.
- */
- witem = alloc_workitem();
- witem->item_entry.tqe_next = 0;
- witem->item_entry.tqe_prev = 0;
- witem->func = callback_func;
- witem->func_arg = callback_arg;
- witem->flags = PTH_WQITEM_DESTROY;
-
- if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_TERM_ON | PTHREAD_WORKQ_DESTROYED)) == 0) {
- workq->flags |= PTHREAD_WORKQ_IN_TERMINATE;
- /* If nothing queued or running, destroy now */
- if ((TAILQ_EMPTY(&workq->item_listhead)) && (TAILQ_EMPTY(&workq->item_kernhead))) {
- workq->flags |= (PTHREAD_WORKQ_TERM_ON | PTHREAD_WORKQ_DESTROYED);
- headp = __pthread_wq_head_tbl[workq->queueprio];
- workq->term_callback = callback_func;
- workq->term_callarg = callback_arg;
- if (headp->next_workq == workq) {
- headp->next_workq = TAILQ_NEXT(workq, wq_list);
- if (headp->next_workq == NULL) {
- headp->next_workq = TAILQ_FIRST(&headp->wqhead);
- if (headp->next_workq == workq)
- headp->next_workq = NULL;
- }
- }
- TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
- workq->sig = 0;
- free_workitem(witem);
- if (workq->term_callback != NULL) {
- workqueue_list_unlock();
- (*workq->term_callback)(workq, workq->term_callarg);
- workqueue_list_lock();
- }
-#if WQ_TRACE
- __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 3, 0);
-#endif
- free_workqueue(workq);
- workqueue_list_unlock();
- return(0);
- }
- TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
- } else {
- free_workitem(witem);
- workqueue_list_unlock();
- return(EINPROGRESS);
- }
- workqueue_list_unlock();
- return(0);
-}
-
-
-int
-pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep)
+pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp)
{
pthread_workitem_t witem;
if (itemhandlep != NULL)
*itemhandlep = (pthread_workitem_handle_t *)witem;
+ if (gencountp != NULL)
+ *gencountp = witem->gencount;
+#if WQ_TRACE
+ __kdebug_trace(0x9008090, witem, witem->func, witem->func_arg, workq, 0);
+#endif
TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
- if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
- wqreadyprio = workq->queueprio;
-
- pick_nextworkqueue_droplock();
-
- return(0);
-}
-
-int
-pthread_workqueue_removeitem_np(pthread_workqueue_t workq, pthread_workitem_handle_t itemhandle)
-{
- pthread_workitem_t item, baritem;
- pthread_workqueue_head_t headp;
- int error;
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080a4, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
- if (valid_workq(workq) == 0) {
- return(EINVAL);
- }
-
- workqueue_list_lock();
- if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
- workqueue_list_unlock();
- return(ESRCH);
- }
-
- TAILQ_FOREACH(item, &workq->item_listhead, item_entry) {
- if (item == (pthread_workitem_t)itemhandle) {
- TAILQ_REMOVE(&workq->item_listhead, item, item_entry);
- if ((item->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_APPLIED)) == (PTH_WQITEM_BARRIER | PTH_WQITEM_APPLIED)) {
- workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
- workq->barrier_count = 0;
- if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead)))) {
- wqreadyprio = workq->queueprio;
- }
- } else if ((item->flags & PTH_WQITEM_KERN_COUNT) == PTH_WQITEM_KERN_COUNT) {
- workq->kq_count--;
- item->flags |= PTH_WQITEM_REMOVED;
- if (handle_removeitem(workq, item) == 0)
- return(0);
- }
- item->flags |= PTH_WQITEM_NOTINLIST;
- free_workitem(item);
- workqueue_list_unlock();
- return(0);
- }
- }
-
- TAILQ_FOREACH(item, &workq->item_kernhead, item_entry) {
- if (item == (pthread_workitem_t)itemhandle) {
- workqueue_list_unlock();
- if ((error = __workq_ops(WQOPS_QUEUE_REMOVE, item, 0)) == 0) {
- workqueue_list_lock();
- TAILQ_REMOVE(&workq->item_kernhead, item, item_entry);
- OSAtomicDecrement32(&kernel_workq_count);
- workq->kq_count--;
- item->flags |= PTH_WQITEM_REMOVED;
- if (handle_removeitem(workq, item) != 0) {
- free_workitem(item);
- pick_nextworkqueue_droplock();
- }
- return(0);
- } else {
- return(EBUSY);
- }
- }
- }
- workqueue_list_unlock();
- return(EINVAL);
-}
-
-
-int
-pthread_workqueue_addbarrier_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg, __unused int waitforcallback, pthread_workitem_handle_t *itemhandlep)
-{
- pthread_workitem_t witem;
-
- if (valid_workq(workq) == 0) {
- return(EINVAL);
- }
-
- workqueue_list_lock();
-
- /*
- * Allocate the workitem here as it can drop the lock.
- * Also we can evaluate the workqueue state only once.
- */
- witem = alloc_workitem();
- witem->item_entry.tqe_next = 0;
- witem->item_entry.tqe_prev = 0;
- witem->func = callback_func;
- witem->func_arg = callback_arg;
- witem->flags = PTH_WQITEM_BARRIER;
-
- /* alloc workitem can drop the lock, check the state */
- if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
- free_workitem(witem);
- workqueue_list_unlock();
- return(ESRCH);
- }
-
- if (itemhandlep != NULL)
- *itemhandlep = (pthread_workitem_handle_t *)witem;
-
- TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
wqreadyprio = workq->queueprio;
return(0);
}
-int
-pthread_workqueue_suspend_np(pthread_workqueue_t workq)
+int
+pthread_workqueue_getovercommit_np(pthread_workqueue_t workq, unsigned int *ocommp)
{
- if (valid_workq(workq) == 0) {
- return(EINVAL);
- }
- workqueue_list_lock();
- if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
- workqueue_list_unlock();
- return(ESRCH);
- }
+ pthread_workitem_t witem;
- workq->flags |= PTHREAD_WORKQ_SUSPEND;
- workq->suspend_count++;
- workqueue_list_unlock();
+ if (valid_workq(workq) == 0) {
+ return(EINVAL);
+ }
+
+ if (ocommp != NULL)
+ *ocommp = workq->overcommit;
return(0);
}
-int
-pthread_workqueue_resume_np(pthread_workqueue_t workq)
-{
- if (valid_workq(workq) == 0) {
- return(EINVAL);
- }
- workqueue_list_lock();
- if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
- workqueue_list_unlock();
- return(ESRCH);
- }
-
- workq->suspend_count--;
- if (workq->suspend_count <= 0) {
- workq->flags &= ~PTHREAD_WORKQ_SUSPEND;
- if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
- wqreadyprio = workq->queueprio;
-
- pick_nextworkqueue_droplock();
- } else
- workqueue_list_unlock();
-
- return(0);
-}
+/* DEPRECATED
+int pthread_workqueue_removeitem_np(pthread_workqueue_t workq, pthread_workitem_handle_t itemhandle, unsigned int gencount)
+int pthread_workqueue_addbarrier_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg, pthread_workitem_handle_t *itemhandlep, unsigned int *gencountp)
+int pthread_workqueue_suspend_np(pthread_workqueue_t workq)
+int pthread_workqueue_resume_np(pthread_workqueue_t workq)
+*/
#else /* !BUILDING_VARIANT ] [ */
extern int __unix_conforming;
#if WQ_TRACE
__kdebug_trace(0x900002c, thread, newstyle, 0, 0, 0);
#endif
- if (newstyle = 0) {
+ if (newstyle == 0) {
death = thread->death;
if (!already_exited){
thread->joiner = (struct _pthread *)NULL;
if (_pthread_lookup_thread(thread, NULL, 0) != 0)
return(ESRCH);
+ /* if the thread is a workqueue thread, then return error */
+ if (thread->wqthread != 0) {
+ return(ENOTSUP);
+ }
#if __DARWIN_UNIX03
int state;
/* returns non-zero if pthread_create or cthread_fork have been called */
int pthread_is_threaded_np(void);
+#if defined(__i386__) || defined(__x86_64__)
+int pthread_threadid_np(pthread_t,__uint64_t*);
+#endif
+
+/*SPI to set and get pthread name*/
+int pthread_getname_np(pthread_t,char*,size_t);
+int pthread_setname_np(const char*);
/* returns non-zero if the current thread is the main thread */
int pthread_main_np(void);
--- /dev/null
+.\" Copyright (c) 2004-2007 Apple Inc. All rights reserved.
+.Dd December 31, 2007
+.Dt PTHREAD_ATTR 3
+.Os
+.Sh NAME
+.Nm pthread_attr_destroy ,
+.Nm pthread_attr_init
+.Nd thread attribute operations
+.Sh SYNOPSIS
+.Fd #include <pthread.h>
+.Ft int
+.Fo pthread_attr_destroy
+.Fa "pthread_attr_t *attr"
+.Fc
+.Ft int
+.Fo pthread_attr_init
+.Fa "pthread_attr_t *attr"
+.Fc
+.Sh DESCRIPTION
+Thread attributes are used to specify parameters to
+.Fn pthread_create .
+One attribute object can be used in multiple calls to
+.Fn pthread_create ,
+with or without modifications between calls.
+.Pp
+The
+.Fn pthread_attr_init
+function initializes
+.Fa attr
+with all the default thread attributes.
+.Pp
+The
+.Fn pthread_attr_destroy
+function destroys
+.Fa attr .
+.Sh RETURN VALUES
+If successful, these functions return 0.
+Otherwise, an error number is returned to indicate the error.
+.Sh ERRORS
+.Fn pthread_attr_init
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er ENOMEM
+Out of memory.
+.El
+.Pp
+.Fn pthread_attr_destroy
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.El
+.Pp
+.Sh SEE ALSO
+.Xr pthread_create 3
+.Sh STANDARDS
+.Fn pthread_attr_init ,
+.Fn pthread_attr_destroy
+conform to
+.St -p1003.1-96
+.Pp
--- /dev/null
+.\" Copyright (c) 2004-2007 Apple Inc. All rights reserved.
+.Dd December 31, 2007
+.Dt PTHREAD_ATTR 3
+.Os
+.Sh NAME
+.Nm pthread_attr_getdetachstate ,
+.Nm pthread_attr_setdetachstate
+.Nd thread attribute operations
+.Sh SYNOPSIS
+.Fd #include <pthread.h>
+.Ft int
+.Fo pthread_attr_getdetachstate
+.Fa "const pthread_attr_t *attr"
+.Fa "int *detachstate"
+.Fc
+.Ft int
+.Fo pthread_attr_setdetachstate
+.Fa "pthread_attr_t *attr"
+.Fa "int detachstate"
+.Fc
+.Sh DESCRIPTION
+Thread attributes are used to specify parameters to
+.Fn pthread_create .
+One attribute object can be used in multiple calls to
+.Fn pthread_create ,
+with or without modifications between calls.
+.Pp
+One of these thread attributes governs the creation state of the new thread. The new thread
+can be either created "detached" or "joinable". The constants corresponding to these states are PTHREAD_CREATE_DETACHED and PTHREAD_CREATE_JOINABLE respectively.
+Creating a "joinable" thread allows the user
+to call
+.Fn pthread_join
+and
+.Fn pthread_detach ,
+with the new thread's ID. A "detached" thread's ID cannot be used with
+.Fn pthread_join
+and
+.Fn pthread_detach .
+The default value for the "detachstate" attribute is PTHREAD_CREATE_JOINABLE.
+.Pp
+The
+.Fn pthread_attr_setdetachstate
+function sets the thread's "detachstate" attribute.
+.Pp
+The "detachstate" attribute is set within the
+.Fa attr
+argument, which can subsequently be used as an argument to
+.Fn pthread_create .
+.Sh RETURN VALUES
+If successful, these functions return 0.
+Otherwise, an error number is returned to indicate the error.
+.Fn pthread_attr_getdetachstate ,
+on success, will copy the value of the thread's "detachstate" attribute
+to the location pointed to by the second function parameter.
+.Sh ERRORS
+.Fn pthread_attr_getdetachstate
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr
+.El
+.Pp
+.Fn pthread_attr_setdetachstate
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr
+or
+.Fa detachstate .
+.El
+.Pp
+.Sh SEE ALSO
+.Xr pthread_create 3 ,
+.Xr pthread_join 3 ,
+.Xr pthread_attr_init 3 ,
+.Xr pthread_detach 3
+.Sh STANDARDS
+.Fn pthread_attr_setdetachstate ,
+.Fn pthread_attr_getdetachstate
+conform to
+.St -p1003.1-96
+.Pp
--- /dev/null
+.\" Copyright (c) 2004-2007 Apple Inc. All rights reserved.
+.Dd December 31, 2007
+.Dt PTHREAD_ATTR 3
+.Os
+.Sh NAME
+.Nm pthread_attr_getinheritsched ,
+.Nm pthread_attr_setinheritsched
+.Nd thread attribute operations
+.Sh SYNOPSIS
+.Fd #include <pthread.h>
+.Ft int
+.Fo pthread_attr_getinheritsched
+.Fa "const pthread_attr_t *restrict attr"
+.Fa "int *restrict inheritsched"
+.Fc
+.Ft int
+.Fo pthread_attr_setinheritsched
+.Fa "pthread_attr_t *attr"
+.Fa "int inheritsched"
+.Fc
+.Sh DESCRIPTION
+Thread attributes are used to specify parameters to
+.Fn pthread_create .
+One attribute object can be used in multiple calls to
+.Fn pthread_create ,
+with or without modifications between calls.
+.Pp
+One of the thread attributes of interest is the "inheritsched" attribute. This attribute
+controls the scheduling policy and related attributes of the newly created thread. The values of the
+"inheritsched" attribute can be either PTHREAD_INHERIT_SCHED or PTHREAD_EXPLICIT_SCHED.
+.Pp
+PTHREAD_INHERIT_SCHED
+.Pp
+ Indicates that the newly created thread should inherit all it's scheduling related attributes from it's creating
+thread. It ignores the values of the relevant attributes within the
+.Fa attr
+argument.
+.Pp
+PTHREAD_EXPLICIT_SCHED
+.Pp
+ Indicates that the newly created thread should set it's scheduling related attributes based on
+.Fa attr
+argument.
+.Pp
+The
+.Fn pthread_attr_setinheritsched
+functions set the "inheritsched" attribute within the
+.Fa attr
+argument to the desired value.
+.Pp
+The
+.Fn pthread_attr_getinheritsched
+functions copy the value of the "inheritsched" attribute to the location pointed to by the second function parameter.
+.Sh RETURN VALUES
+If successful, these functions return 0.
+Otherwise, an error number is returned to indicate the error.
+.Sh ERRORS
+.Pp
+.Fn pthread_attr_getinheritsched
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.El
+.Pp
+.Fn pthread_attr_setinheritsched
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.El
+.Sh SEE ALSO
+.Xr pthread_create 3 ,
+.Xr pthread_attr_init 3 ,
+.Xr pthread_attr_setschedparam 3
+.Sh STANDARDS
+.Pp
+.Fn pthread_attr_setinheritsched ,
+.Fn pthread_attr_getinheritsched
+conform to
+.St -susv2
--- /dev/null
+.\" Copyright (c) 2004-2007 Apple Inc. All rights reserved.
+.Dd December 31, 2007
+.Dt PTHREAD_ATTR 3
+.Os
+.Sh NAME
+.Nm pthread_attr_getschedparam ,
+.Nm pthread_attr_setschedparam
+.Nd thread attribute operations
+.Sh SYNOPSIS
+.Fd #include <pthread.h>
+.Ft int
+.Fo pthread_attr_getschedparam
+.Fa "const pthread_attr_t *restrict attr"
+.Fa "struct sched_param *restrict param"
+.Fc
+.Ft int
+.Fo pthread_attr_setschedparam
+.Fa "pthread_attr_t *restrict attr"
+.Fa "const struct sched_param *restrict param"
+.Fc
+.Sh DESCRIPTION
+Thread attributes are used to specify parameters to
+.Fn pthread_create .
+One attribute object can be used in multiple calls to
+.Fn pthread_create ,
+with or without modifications between calls.
+.Pp
+.Fn pthread_attr_getschedparam
+and
+.Fn pthread_attr_setschedparam
+get and set the scheduling parameters within the
+.Fa attr
+argument. See
+.Fd /usr/include/sched.h
+for the definition of
+.Fa struct sched_param .
+The
+.Fa sched_priority
+field of
+.Fa struct sched_param
+can be set to SCHED_OTHER, SCHED_FIFO and SCHED_RR.
+.Sh RETURN VALUES
+If successful, these functions return 0.
+Otherwise, an error number is returned to indicate the error.
+.Fn pthread_attr_getschedparam ,
+on success, will copy the value of the thread's scheduling parameter attribute
+to the location pointed to by the second function parameter.
+.Sh ERRORS
+.Pp
+.Fn pthread_attr_getschedparam
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.\" ========
+.El
+.Pp
+.Fn pthread_attr_setschedparam
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.\" ========
+.It Bq Er ENOTSUP
+Invalid value for
+.Fa param .
+.El
+.Sh SEE ALSO
+.Xr pthread_create 3 ,
+.Xr pthread_attr_init 3 ,
+.Xr pthread_attr_setinheritsched 3
+.Sh STANDARDS
+.Pp
+.Fn pthread_attr_setschedparam ,
+.Fn pthread_attr_getschedparam
+conform to
+.St -susv2
--- /dev/null
+.\" Copyright (c) 2004-2007 Apple Inc. All rights reserved.
+.Dd December 31, 2007
+.Dt PTHREAD_ATTR 3
+.Os
+.Sh NAME
+.Nm pthread_attr_getschedpolicy ,
+.Nm pthread_attr_setschedpolicy
+.Nd thread attribute operations
+.Sh SYNOPSIS
+.Fd #include <pthread.h>
+.Ft int
+.Fo pthread_attr_getschedpolicy
+.Fa "const pthread_attr_t *restrict attr"
+.Fa "int *restrict policy"
+.Fc
+.Ft int
+.Fo pthread_attr_setschedpolicy
+.Fa "pthread_attr_t *attr"
+.Fa "int policy"
+.Fc
+.Sh DESCRIPTION
+Thread attributes are used to specify parameters to
+.Fn pthread_create .
+One attribute object can be used in multiple calls to
+.Fn pthread_create ,
+with or without modifications between calls.
+.Pp
+The functions
+.Fn pthread_attr_setschedpolicy
+and
+.Fn pthread_attr_getschedpolicy ,
+set and get the attribute in the
+.Fa attr
+argument related to the scheduling policy.
+The value for the aforementioned attribute can be SCHED_FIFO, SCHED_RR and SCHED_OTHER.
+.Sh RETURN VALUES
+If successful, these functions return 0.
+Otherwise, an error number is returned to indicate the error.
+.Fn pthread_attr_getschedpolicy ,
+on success, will copy the value of the thread's scheduling policy attribute
+to the location pointed to by the second function parameter.
+.Sh ERRORS
+.Pp
+.Fn pthread_attr_getschedpolicy
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.El
+.Pp
+.Fn pthread_attr_setschedpolicy
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.It Bq Er ENOTSUP
+Invalid or unsupported value for
+.Fa policy .
+.El
+.Sh SEE ALSO
+.Xr pthread_create 3 ,
+.Xr pthread_attr_init 3 ,
+.Xr pthread_attr_setschedparam 3 ,
+.Xr pthread_attr_setinheritsched 3
+.Sh STANDARDS
+.Fn pthread_attr_setschedpolicy ,
+.Fn pthread_attr_getschedpolicy
+conform to
+.St -susv2
--- /dev/null
+.\" Copyright (c) 2004-2007 Apple Inc. All rights reserved.
+.Dd December 31, 2007
+.Dt PTHREAD_ATTR 3
+.Os
+.Sh NAME
+.Nm pthread_attr_getscope ,
+.Nm pthread_attr_setscope
+.Nd thread attribute operations
+.Sh SYNOPSIS
+.Fd #include <pthread.h>
+.Ft int
+.Fo pthread_attr_getscope
+.Fa "const pthread_attr_t *restrict attr"
+.Fa "int *restrict contentionscope"
+.Fc
+.Ft int
+.Fo pthread_attr_setscope
+.Fa "pthread_attr_t *attr"
+.Fa "int contentionscope"
+.Fc
+.Sh DESCRIPTION
+Thread attributes are used to specify parameters to
+.Fn pthread_create .
+One attribute object can be used in multiple calls to
+.Fn pthread_create ,
+with or without modifications between calls.
+.Pp
+The
+.Fn pthread_attr_setscope
+and
+.Fn pthread_attr_getscope
+functions, respectively, set and get the attribute within
+.Fa attr
+argument that controls the contention scope of the thread.
+The acceptable values are PTHREAD_SCOPE_SYSTEM, indicating a scheduling contention scope that
+is system-wide, and PTHREAD_SCOPE_PROCESS, which indicates a process scheduling contention scope.
+Currently on Mac OS X we only support PTHREAD_SCOPE_SYSTEM.
+.Sh RETURN VALUES
+If successful, these functions return 0.
+Otherwise, an error number is returned to indicate the error.
+.Sh ERRORS
+.Pp
+.Fn pthread_attr_getscope
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.\" ========
+.El
+.Pp
+.Fn pthread_attr_setscope
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.\" ========
+.It Bq Er ENOTSUP
+Invalid or unsupported value for
+.Fa contentionscope .
+.El
+.Sh SEE ALSO
+.Xr pthread_create 3 ,
+.Xr pthread_attr_init 3 ,
+.Xr pthread_attr_setinheritsched 3 ,
+.Xr pthread_attr_setschedpolicy 3 ,
+.Xr pthread_attr_setschedparam 3
+.Sh STANDARDS
+.Fn pthread_attr_setscope ,
+.Fn pthread_attr_getscope
+conform to
+.St -susv2
--- /dev/null
+.\" Copyright (c) 2004-2007 Apple Inc. All rights reserved.
+.Dd December 31, 2007
+.Dt PTHREAD_ATTR 3
+.Os
+.Sh NAME
+.Nm pthread_attr_getstackaddr ,
+.Nm pthread_attr_setstackaddr
+.Nd thread attribute operations
+.Sh SYNOPSIS
+.Fd #include <pthread.h>
+.Ft int
+.Fo pthread_attr_getstackaddr
+.Fa "const pthread_attr_t *restrict attr"
+.Fa "void **restrict stackaddr"
+.Fc
+.Ft int
+.Fo pthread_attr_setstackaddr
+.Fa "pthread_attr_t *attr"
+.Fa "void *stackaddr"
+.Fc
+.Sh DESCRIPTION
+Thread attributes are used to specify parameters to
+.Fn pthread_create .
+One attribute object can be used in multiple calls to
+.Fn pthread_create ,
+with or without modifications between calls.
+.Pp
+The functions
+.Fn pthread_attr_setstackaddr
+and
+.Fn pthread_attr_getstackaddr
+respectively, set and get the address at which the stack of the newly created thread should be located.
+The stackaddr attribute is set within the
+.Fa attr
+argument, which can subsequently be used as an argument to
+.Fn pthread_create .
+.Sh RETURN VALUES
+If successful, these functions return 0.
+Otherwise, an error number is returned to indicate the error.
+.Fn pthread_attr_getstackaddr
+returns the stackaddr attribute value in
+.Fa stackaddr
+if successful.
+.Sh ERRORS
+.Fn pthread_attr_setstackaddr
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.\" ========
+.El
+.Pp
+.Fn pthread_attr_getstackaddr
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.\" ========
+.El
+.Pp
+.Sh SEE ALSO
+.Xr pthread_create 3 ,
+.Xr pthread_attr_init 3 ,
+.Xr pthread_attr_setdetachstate 3 ,
+.Xr pthread_attr_setstacksize 3
+.Sh STANDARDS
+.Fn pthread_attr_setstackaddr ,
+.Fn pthread_attr_getstackaddr ,
+conform to
+.St -p1003.1-96
--- /dev/null
+.\" Copyright (c) 2004-2007 Apple Inc. All rights reserved.
+.Dd December 31, 2007
+.Dt PTHREAD_ATTR 3
+.Os
+.Sh NAME
+.Nm pthread_attr_getstacksize ,
+.Nm pthread_attr_setstacksize
+.Nd thread attribute operations
+.Sh SYNOPSIS
+.Fd #include <pthread.h>
+.Ft int
+.Fo pthread_attr_getstacksize
+.Fa "const pthread_attr_t *restrict attr"
+.Fa "size_t *restrict stacksize"
+.Fc
+.Ft int
+.Fo pthread_attr_setstacksize
+.Fa "pthread_attr_t *attr"
+.Fa "size_t stacksize"
+.Fc
+.Sh DESCRIPTION
+Thread attributes are used to specify parameters to
+.Fn pthread_create .
+One attribute object can be used in multiple calls to
+.Fn pthread_create ,
+with or without modifications between calls.
+.Pp
+The functions
+.Fn pthread_attr_setstacksize
+and
+.Fn pthread_attr_getstacksize ,
+respectively, set and get the size of the stack that is to be created for the new thread. The stack size attribute is set within the
+.Fa attr
+argument, which can subsequently be used as an argument to
+.Fn pthread_create .
+.Sh RETURN VALUES
+If successful, these functions return 0.
+Otherwise, an error number is returned to indicate the error.
+.Fn pthread_attr_getstacksize
+returns the stacksize attribute value in
+.Fa stacksize
+if successful.
+.Sh ERRORS
+.Fn pthread_attr_getstacksize
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.El
+.Pp
+.Fn pthread_attr_setstacksize
+will fail if:
+.Bl -tag -width Er
+.\" ========
+.It Bq Er EINVAL
+Invalid value for
+.Fa attr .
+.\" ========
+.It Bq Er EINVAL
+.Fa stacksize
+is less than
+.Dv PTHREAD_STACK_MIN .
+.\" ========
+.It Bq Er EINVAL
+.Fa stacksize
+is not a multiple of the system page size.
+.El
+.Sh SEE ALSO
+.Xr pthread_create 3 ,
+.Xr pthread_attr_init 3 ,
+.Xr pthread_attr_setstackaddr 3
+.Sh STANDARDS
+.Fn pthread_attr_setstacksize ,
+.Fn pthread_attr_getstacksize
+conform to
+.St -p1003.1-96
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
const struct timespec *abstime,
int isRelative,
int isconforming);
+extern int __semwait_signal(int cond_sem, int mutex_sem, int timeout, int relative, __int64_t tv_sec, __int32_t tv_nsec);
extern int __sigwait(const sigset_t *set, int *sig);
/*
pthread_join(pthread_t thread,
void **value_ptr)
{
- kern_return_t kern_res;
int res = 0;
pthread_t self = pthread_self();
- mach_port_t ignore;
mach_port_t kthport;
int conforming = 0;
- task_t tself = mach_task_self();
+#if !__DARWIN_UNIX03
+ kern_return_t kern_res;
+#endif
#if __DARWIN_UNIX03
if (__unix_conforming == 0)
/* Wait for it to signal... */
pthread_cleanup_push(__posix_join_cleanup, (void *)thread);
do {
- res = __semwait_signal(death, 0, 0, 0, 0, 0);
+ res = __semwait_signal(death, 0, 0, 0, (int64_t)0, (int32_t)0);
} while ((res < 0) && (errno == EINTR));
pthread_cleanup_pop(0);
semaphore_t death = SEMAPHORE_NULL; /* in case we need it */
semaphore_t joinsem = SEMAPHORE_NULL;
- if (thread->joiner_notify == NULL)
+ if (thread->joiner_notify == MACH_PORT_NULL)
death = new_sem_from_pool();
LOCK(thread->lock);
assert(thread->kernel_thread == kthport);
if (thread != self && (self == NULL || self->joiner != thread))
{
- int already_exited;
-
- if (thread->joiner_notify == NULL) {
+ if (thread->joiner_notify == MACH_PORT_NULL) {
if (death == SEMAPHORE_NULL)
- abort();
+ LIBC_ABORT("thread %p: death == SEMAPHORE_NULL", thread);
thread->joiner_notify = death;
death = SEMAPHORE_NULL;
}
/* Wait for it to signal... */
pthread_cleanup_push(__posix_join_cleanup, (void *)thread);
do {
- res = __semwait_signal(joinsem, 0, 0, 0, 0, 0);
+ res = __semwait_signal(joinsem, 0, 0, 0, (int64_t)0, (int32_t)0);
} while ((res < 0) && (errno == EINTR));
pthread_cleanup_pop(0);
#else /* __DARWIN_UNIX03 */
if (__sigwait(set, sig) == -1) {
err = errno;
+
+ /*
+ * EINTR that isn't a result of pthread_cancel()
+ * is translated to 0.
+ */
+ if (err == EINTR) {
+ err = 0;
+ }
}
return(err);
#else /* __DARWIN_UNIX03 */
- return(__sigwait(set, sig));
+ if (__sigwait(set, sig) == -1) {
+ /*
+ * EINTR that isn't a result of pthread_cancel()
+ * is translated to 0.
+ */
+ if (errno != EINTR) {
+ return -1;
+ }
+ }
+ return 0;
#endif /* __DARWIN_UNIX03 */
}
If there is no cleanup routine,
.Fn pthread_cleanup_pop
does nothing.
+.Pp
+.Fn pthread_cleanup_pop
+must be paired with a corresponding
+.Xr pthread_cleanup_push 3
+in the same lexical scope.
.Sh RETURN VALUES
.Fn pthread_cleanup_pop
does not return any value.
is called, it is passed
.Fa arg
as its only argument.
+.Fn pthread_cleanup_push
+must be paired with a corresponding
+.Xr pthread_cleanup_pop 3
+in the same lexical scope.
.Sh RETURN VALUES
.Fn pthread_cleanup_push
does not return any value.
#define PLOCKSTAT_MUTEX_RELEASE(x, y)
#endif /* PLOCKSTAT */
-extern void _pthread_mutex_remove(pthread_mutex_t *, pthread_t);
+
+extern int __semwait_signal(int, int, int, int, int64_t, int32_t);
+extern int _pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *, int);
extern int __unix_conforming;
+#ifdef PR_5243343
+/* 5243343 - temporary hack to detect if we are running the conformance test */
+extern int PR_5243343_flag;
+#endif /* PR_5243343 */
+
+#if defined(__i386__) || defined(__x86_64__)
+__private_extern__ int __new_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int isRelative, int isconforming);
+extern int _new_pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *, int);
+extern int _new_pthread_cond_destroy(pthread_cond_t *);
+extern int _new_pthread_cond_destroy_locked(pthread_cond_t *);
+int _new_pthread_cond_broadcast(pthread_cond_t *cond);
+int _new_pthread_cond_signal_thread_np(pthread_cond_t *cond, pthread_t thread);
+int _new_pthread_cond_signal(pthread_cond_t *cond);
+int _new_pthread_cond_timedwait_relative_np(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime);
+int _new_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
+int _new_pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime);
+static void _new_cond_cleanup(void *arg);
+static void _new_cond_dropwait(npthread_cond_t * cond);
+
+
+#if defined(__LP64__)
+#define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt) \
+{ \
+ if (cond->misalign != 0) { \
+ c_lseqcnt = &cond->c_seq[1]; \
+ c_useqcnt = &cond->c_seq[2]; \
+ } else { \
+ /* aligned */ \
+ c_lseqcnt = &cond->c_seq[0]; \
+ c_useqcnt = &cond->c_seq[1]; \
+ } \
+}
+#else /* __LP64__ */
+#define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt) \
+{ \
+ if (cond->misalign != 0) { \
+ c_lseqcnt = &cond->c_seq[1]; \
+ c_useqcnt = &cond->c_seq[2]; \
+ } else { \
+ /* aligned */ \
+ c_lseqcnt = &cond->c_seq[0]; \
+ c_useqcnt = &cond->c_seq[1]; \
+ } \
+}
+#endif /* __LP64__ */
+
+
+#define _KSYN_TRACE_ 0
+
+#if _KSYN_TRACE_
+/* The Function qualifiers */
+#define DBG_FUNC_START 1
+#define DBG_FUNC_END 2
+#define DBG_FUNC_NONE 0
+
+int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
+
+#define _KSYN_TRACE_UM_LOCK 0x9000060
+#define _KSYN_TRACE_UM_UNLOCK 0x9000064
+#define _KSYN_TRACE_UM_MHOLD 0x9000068
+#define _KSYN_TRACE_UM_MDROP 0x900006c
+#define _KSYN_TRACE_UM_CVWAIT 0x9000070
+#define _KSYN_TRACE_UM_CVSIG 0x9000074
+#define _KSYN_TRACE_UM_CVBRD 0x9000078
+
+#endif /* _KSYN_TRACE_ */
+#endif /* __i386__ || __x86_64__ */
+
+
#ifndef BUILDING_VARIANT /* [ */
/*
int sig = cond->sig;
/* to provide backwards compat for apps using united condtn vars */
- if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init) && (sig != _PTHREAD_KERN_COND_SIG))
+ if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
return(EINVAL);
LOCK(cond->lock);
if (cond->sig == _PTHREAD_COND_SIG)
{
+#if defined(__i386__) || defined(__x86_64__)
+ if (cond->pshared == PTHREAD_PROCESS_SHARED) {
+ ret = _new_pthread_cond_destroy_locked(cond);
+ UNLOCK(cond->lock);
+ return(ret);
+ }
+#endif /* __i386__ || __x86_64__ */
if (cond->busy == (pthread_mutex_t *)NULL)
{
cond->sig = _PTHREAD_NO_SIG;
ret = 0;
} else
ret = EBUSY;
- } else if (cond->sig == _PTHREAD_KERN_COND_SIG) {
- int condid = cond->_pthread_cond_kernid;
- UNLOCK(cond->lock);
- if (__pthread_cond_destroy(condid) == -1)
- return(errno);
- cond->sig = _PTHREAD_NO_SIG;
- return(0);
} else
ret = EINVAL; /* Not an initialized condition variable structure */
UNLOCK(cond->lock);
int sig = cond->sig;
/* to provide backwards compat for apps using united condtn vars */
- if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init) && (sig != _PTHREAD_KERN_COND_SIG))
+ if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
return(EINVAL);
LOCK(cond->lock);
{
_pthread_cond_init(cond, NULL, 0);
res = 0;
- } else if (cond->sig == _PTHREAD_KERN_COND_SIG) {
- int condid = cond->_pthread_cond_kernid;
- UNLOCK(cond->lock);
- if (__pthread_cond_broadcast(condid) == -1)
- return(errno);
- return(0);
} else
res = EINVAL; /* Not a condition variable */
UNLOCK(cond->lock);
return (res);
}
+#if defined(__i386__) || defined(__x86_64__)
+ else if (cond->pshared == PTHREAD_PROCESS_SHARED) {
+ UNLOCK(cond->lock);
+ return(_new_pthread_cond_broadcast(cond));
+ }
+#endif /* __i386__ || __x86_64__ */
else if ((sem = cond->sem) == SEMAPHORE_NULL)
{
/* Avoid kernel call since there are no waiters... */
/* to provide backwards compat for apps using united condtn vars */
- if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init) && (sig != _PTHREAD_KERN_COND_SIG))
+ if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
return(EINVAL);
LOCK(cond->lock);
if (cond->sig != _PTHREAD_COND_SIG)
{
_pthread_cond_init(cond, NULL, 0);
ret = 0;
- } else if (cond->sig == _PTHREAD_KERN_COND_SIG) {
- int condid = cond->_pthread_cond_kernid;
- UNLOCK(cond->lock);
- if (__pthread_cond_signal(condid) == -1)
- return(errno);
- return(0);
} else
ret = EINVAL; /* Not a condition variable */
UNLOCK(cond->lock);
return (ret);
}
+#if defined(__i386__) || defined(__x86_64__)
+ else if (cond->pshared == PTHREAD_PROCESS_SHARED) {
+ UNLOCK(cond->lock);
+ return(_new_pthread_cond_signal_thread_np(cond, thread));
+ }
+#endif /* __i386__ || __x86_64__ */
else if ((sem = cond->sem) == SEMAPHORE_NULL)
{
/* Avoid kernel call since there are not enough waiters... */
}
}
-static void cond_cleanup(void *arg)
+static void
+cond_cleanup(void *arg)
{
pthread_cond_t *cond = (pthread_cond_t *)arg;
pthread_mutex_t *mutex;
int isRelative,
int isconforming)
{
- int res, saved_error;
- kern_return_t kern_res;
- int wait_res;
+ int res;
+ kern_return_t kern_res = KERN_SUCCESS;
+ int wait_res = 0;
pthread_mutex_t *busy;
- mach_timespec_t then;
+ mach_timespec_t then = {0, 0};
struct timespec cthen = {0,0};
int sig = cond->sig;
int msig = mutex->sig;
extern void _pthread_testcancel(pthread_t thread, int isconforming);
/* to provide backwards compat for apps using united condtn vars */
- if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init) && (sig != _PTHREAD_KERN_COND_SIG))
+ if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
return(EINVAL);
if (isconforming) {
- if((msig != _PTHREAD_MUTEX_SIG) && (msig != _PTHREAD_MUTEX_SIG_init) && (msig != _PTHREAD_KERN_MUTEX_SIG))
+ if((msig != _PTHREAD_MUTEX_SIG) && (msig != _PTHREAD_MUTEX_SIG_init))
return(EINVAL);
if (isconforming > 0)
_pthread_testcancel(pthread_self(), 1);
{
if (cond->sig != _PTHREAD_COND_SIG_init)
{
- if ((cond->sig == _PTHREAD_KERN_COND_SIG) && (mutex->sig == _PTHREAD_KERN_MUTEX_SIG)) {
- int condid = cond->_pthread_cond_kernid;
- int mutexid = mutex->_pthread_mutex_kernid;
- UNLOCK(cond->lock);
-
- if (abstime) {
- struct timespec now;
- struct timeval tv;
- gettimeofday(&tv, NULL);
- TIMEVAL_TO_TIMESPEC(&tv, &now);
-
- /* Compute relative time to sleep */
- then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
- then.tv_sec = abstime->tv_sec - now.tv_sec;
- if (then.tv_nsec < 0)
- {
- then.tv_nsec += NSEC_PER_SEC;
- then.tv_sec--;
- }
- if (((int)then.tv_sec < 0) ||
- ((then.tv_sec == 0) && (then.tv_nsec == 0)))
- {
- UNLOCK(cond->lock);
- return ETIMEDOUT;
- }
- if ((res = pthread_mutex_unlock(mutex)) != 0)
- return (res);
-
- if ((__pthread_cond_timedwait(condid, mutexid, &then)) == -1)
- saved_error = errno;
- else
- saved_error = 0;
- } else {
- if ((res = pthread_mutex_unlock(mutex)) != 0)
- return (res);
- if(( __pthread_cond_wait(condid, mutexid)) == -1)
- saved_error = errno;
- else
- saved_error = 0;
- }
- if ((res = pthread_mutex_lock(mutex)) != 0)
- return (res);
- return(saved_error);
- } else {
UNLOCK(cond->lock);
return (EINVAL); /* Not a condition variable */
- }
}
_pthread_cond_init(cond, NULL, 0);
}
+#if defined(__i386__) || defined(__x86_64__)
+ else if (cond->pshared == PTHREAD_PROCESS_SHARED) {
+ UNLOCK(cond->lock);
+ return(__new_pthread_cond_wait(cond, mutex, abstime, isRelative, isconforming));
+ }
+#endif /* __i386__ || __x86_64__ */
if (abstime) {
if (!isconforming)
* we change the kernel do this anyway
*/
cthen.tv_sec = abstime->tv_sec;
- cthen.tv_nsec = abstime->tv_nsec;
- if ((cthen.tv_sec < 0) || (cthen.tv_nsec < 0)) {
- UNLOCK(cond->lock);
- return EINVAL;
- }
- if (cthen.tv_nsec >= NSEC_PER_SEC) {
- UNLOCK(cond->lock);
- return EINVAL;
- }
- }
+ cthen.tv_nsec = abstime->tv_nsec;
+ if ((cthen.tv_sec < 0) || (cthen.tv_nsec < 0)) {
+ UNLOCK(cond->lock);
+ return EINVAL;
+ }
+ if (cthen.tv_nsec >= NSEC_PER_SEC) {
+ UNLOCK(cond->lock);
+ return EINVAL;
+ }
+ }
}
if (++cond->waiters == 1)
}
UNLOCK(cond->lock);
-#if defined(DEBUG)
- _pthread_mutex_remove(mutex, pthread_self());
-#endif
LOCK(mutex->lock);
- if (--mutex->lock_count == 0)
+ if (--mutex->mtxopts.options.lock_count == 0)
{
- PLOCKSTAT_MUTEX_RELEASE(mutex, (mutex->type == PTHREAD_MUTEX_RECURSIVE)? 1:0);
+ PLOCKSTAT_MUTEX_RELEASE(mutex, (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)? 1:0);
if (mutex->sem == SEMAPHORE_NULL)
mutex->sem = new_sem_from_pool();
} else {
pthread_cleanup_push(cond_cleanup, (void *)cond);
wait_res = __semwait_signal(cond->sem, mutex->sem, abstime != NULL, isRelative,
- cthen.tv_sec, cthen.tv_nsec);
+ (int64_t)cthen.tv_sec, (int32_t)cthen.tv_nsec);
pthread_cleanup_pop(0);
}
} else {
- PLOCKSTAT_MUTEX_RELEASE(mutex, (mutex->type == PTHREAD_MUTEX_RECURSIVE)? 1:0);
+ PLOCKSTAT_MUTEX_RELEASE(mutex, (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)? 1:0);
UNLOCK(mutex->lock);
if (!isconforming) {
if (abstime) {
}
} else {
pthread_cleanup_push(cond_cleanup, (void *)cond);
- wait_res = __semwait_signal(cond->sem, NULL, abstime != NULL, isRelative,
- cthen.tv_sec, cthen.tv_nsec);
+ wait_res = __semwait_signal(cond->sem, 0, abstime != NULL, isRelative,
+ (int64_t)cthen.tv_sec, (int32_t)cthen.tv_nsec);
pthread_cleanup_pop(0);
}
}
}
-#ifdef PR_5243343
-/* 5243343 - temporary hack to detect if we are running the conformance test */
-extern int PR_5243343_flag;
-#endif /* PR_5243343 */
__private_extern__ int
_pthread_cond_init(pthread_cond_t *cond,
cond->waiters = 0;
cond->sigspending = 0;
if (conforming) {
- if (attr) {
+ if (attr)
cond->pshared = attr->pshared;
- if (cond->pshared == PTHREAD_PROCESS_SHARED) {
- cond->sem = SEMAPHORE_NULL;
- cond->sig = 0;
- if( __pthread_cond_init(cond, attr) == -1)
- return(errno);
- cond->sig = _PTHREAD_KERN_COND_SIG;
- return(0);
- }
- }
else
cond->pshared = _PTHREAD_DEFAULT_PSHARED;
} else
}
+#if defined(__i386__) || defined(__x86_64__)
+
+__private_extern__ int
+_new_pthread_cond_init(pthread_cond_t *ocond,
+ const pthread_condattr_t *attr,
+ int conforming)
+{
+ npthread_cond_t * cond = (npthread_cond_t *)ocond;
+
+ cond->busy = (npthread_mutex_t *)NULL;
+ cond->c_seq[0] = 0;
+ cond->c_seq[1] = 0;
+ cond->c_seq[2] = 0;
+
+ cond->rfu = 0;
+ if (((uintptr_t)cond & 0x07) != 0) {
+ cond->misalign = 1;
+ } else {
+ cond->misalign = 0;
+ }
+ if (conforming) {
+ if (attr)
+ cond->pshared = attr->pshared;
+ else
+ cond->pshared = _PTHREAD_DEFAULT_PSHARED;
+ } else
+ cond->pshared = _PTHREAD_DEFAULT_PSHARED;
+ cond->sig = _PTHREAD_COND_SIG;
+ return (0);
+}
+
+int
+_new_pthread_cond_destroy(pthread_cond_t * ocond)
+{
+ npthread_cond_t *cond = (npthread_cond_t *)ocond;
+ int ret;
+
+ LOCK(cond->lock);
+ ret = _new_pthread_cond_destroy_locked(ocond);
+ UNLOCK(cond->lock);
+
+ return(ret);
+}
+
+int
+_new_pthread_cond_destroy_locked(pthread_cond_t * ocond)
+{
+ npthread_cond_t *cond = (npthread_cond_t *)ocond;
+ int ret;
+ int sig = cond->sig;
+ uint32_t * c_lseqcnt;
+ uint32_t * c_useqcnt;
+ uint32_t lgenval , ugenval;
+
+ /* to provide backwards compat for apps using united condtn vars */
+ if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
+ return(EINVAL);
+
+ if (cond->sig == _PTHREAD_COND_SIG)
+ {
+ COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt);
+retry:
+ lgenval = *c_lseqcnt;
+ ugenval = *c_useqcnt;
+ if (lgenval == ugenval)
+ {
+ cond->sig = _PTHREAD_NO_SIG;
+ ret = 0;
+ } else
+ ret = EBUSY;
+ } else
+ ret = EINVAL; /* Not an initialized condition variable structure */
+ return (ret);
+}
+
+/*
+ * Signal a condition variable, waking up all threads waiting for it.
+ */
+int
+_new_pthread_cond_broadcast(pthread_cond_t *ocond)
+{
+ npthread_cond_t * cond = (npthread_cond_t *)ocond;
+ int sig = cond->sig;
+ npthread_mutex_t * mutex;
+ uint32_t lgenval, ugenval, mgen, ugen, flags, mtxgen, mtxugen, notify;
+ int diffgen, retval, dropcount, mutexrefs;
+ uint64_t oldval64, newval64;
+ uint32_t * c_lseqcnt;
+ uint32_t * c_useqcnt;
+ uint32_t * pmtx = NULL;
+
+
+ /* to provide backwards compat for apps using united condtn vars */
+ if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
+ return(EINVAL);
+
+ if (sig != _PTHREAD_COND_SIG)
+ {
+ int res;
+
+ LOCK(cond->lock);
+ if (cond->sig == _PTHREAD_COND_SIG_init)
+ {
+ _new_pthread_cond_init(ocond, NULL, 0);
+ res = 0;
+ } else if (cond->sig != _PTHREAD_COND_SIG) {
+ res = EINVAL; /* Not a condition variable */
+ UNLOCK(cond->lock);
+ return (res);
+ }
+ UNLOCK(cond->lock);
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0, 0);
+#endif
+
+ COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt);
+retry:
+ lgenval = *c_lseqcnt;
+ ugenval = *c_useqcnt;
+ diffgen = lgenval - ugenval; /* pendig waiters */
+
+ if (diffgen <= 0) {
+ return(0);
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
+#endif
+ }
+
+ mutex = cond->busy;
+
+ if (OSAtomicCompareAndSwap32(ugenval, ugenval+diffgen, (volatile int *)c_useqcnt) != TRUE)
+ goto retry;
+
+#ifdef COND_MTX_WAITQUEUEMOVE
+
+ if ((mutex != NULL) && cond->pshared != PTHREAD_PROCESS_SHARED) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, 1, diffgen, 0, 0);
+#endif
+ (void)__mtx_holdlock(mutex, diffgen, &flags, &pmtx, &mgen, &ugen);
+ mutexrefs = 1;
+ } else {
+ if (cond->pshared != PTHREAD_PROCESS_SHARED)
+ flags = _PTHREAD_MTX_OPT_NOHOLD;
+ else
+ flags = _PTHREAD_MTX_OPT_NOHOLD | _PTHREAD_MTX_OPT_PSHARED;
+ mgen = ugen = 0;
+ mutexrefs = 0;
+ pmtx = NULL;
+ }
+#else /* COND_MTX_WAITQUEUEMOVE */
+
+ if (cond->pshared != PTHREAD_PROCESS_SHARED)
+ flags = _PTHREAD_MTX_OPT_NOHOLD;
+ else
+ flags = _PTHREAD_MTX_OPT_NOHOLD | _PTHREAD_MTX_OPT_PSHARED;
+ pmtx = NULL;
+ mgen = ugen = 0;
+ mutexrefs = 0;
+#endif /* COND_MTX_WAITQUEUEMOVE */
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, 3, diffgen, 0, 0);
+#endif
+ retval = __psynch_cvbroad(ocond, lgenval, diffgen, (pthread_mutex_t *)pmtx, mgen, ugen , (uint64_t)0, flags);
+
+#ifdef COND_MTX_WAITQUEUEMOVE
+ if ((retval != -1) && (retval != 0)) {
+ if ((mutexrefs != 0) && (retval <= PTHRW_MAX_READERS/2)) {
+ dropcount = (retval);
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, 2, dropcount, 0, 0);
+#endif
+ retval = __mtx_droplock(mutex, dropcount, &flags, &pmtx, &mtxgen, &mtxugen, ¬ify);
+ }
+ }
+#endif /* COND_MTX_WAITQUEUEMOVE */
+
+ oldval64 = (((uint64_t)(ugenval+diffgen)) << 32);
+ oldval64 |= lgenval;
+ newval64 = 0;
+
+ OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt);
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
+#endif
+ return(0);
+}
+
+
+/*
+ * Signal a condition variable, waking a specified thread.
+ */
+int
+_new_pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread)
+{
+ npthread_cond_t * cond = (npthread_cond_t *)ocond;
+ int sig = cond->sig;
+ npthread_mutex_t * mutex;
+ int retval, dropcount;
+ uint32_t lgenval, ugenval, diffgen, mgen, ugen, flags, mtxgen, mtxugen, notify;
+ uint32_t * c_lseqcnt;
+ uint32_t * c_useqcnt;
+ uint64_t oldval64, newval64;
+ int mutexrefs;
+ uint32_t * pmtx = NULL;
+
+ /* to provide backwards compat for apps using united condtn vars */
+
+ if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
+ return(EINVAL);
+ if (cond->sig != _PTHREAD_COND_SIG) {
+ LOCK(cond->lock);
+ if (cond->sig != _PTHREAD_COND_SIG) {
+ if (cond->sig == _PTHREAD_COND_SIG_init) {
+ _new_pthread_cond_init(ocond, NULL, 0);
+ } else {
+ UNLOCK(cond->lock);
+ return(EINVAL);
+ }
+ }
+ UNLOCK(cond->lock);
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0, 0);
+#endif
+ COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt);
+retry:
+ lgenval = *c_lseqcnt;
+ ugenval = *c_useqcnt;
+ diffgen = lgenval - ugenval; /* pendig waiters */
+ if (diffgen <= 0) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
+#endif
+ return(0);
+ }
+
+ mutex = cond->busy;
+
+ if (OSAtomicCompareAndSwap32(ugenval, ugenval+1, (volatile int *)c_useqcnt) != TRUE)
+ goto retry;
+
+#ifdef COND_MTX_WAITQUEUEMOVE
+ if ((mutex != NULL) && (cond->pshared != PTHREAD_PROCESS_SHARED)) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 1, 0, 0, 0);
+#endif
+ (void)__mtx_holdlock(mutex, 1, &flags, &pmtx, &mgen, &ugen);
+ mutexrefs = 1;
+ } else {
+ if (cond->pshared != PTHREAD_PROCESS_SHARED)
+ flags = _PTHREAD_MTX_OPT_NOHOLD;
+ else
+ flags = _PTHREAD_MTX_OPT_NOHOLD | _PTHREAD_MTX_OPT_PSHARED;
+ mgen = ugen = 0;
+ mutexrefs = 0;
+ }
+#else /* COND_MTX_WAITQUEUEMOVE */
+ if (cond->pshared != PTHREAD_PROCESS_SHARED)
+ flags = _PTHREAD_MTX_OPT_NOHOLD;
+ else
+ flags = _PTHREAD_MTX_OPT_NOHOLD | _PTHREAD_MTX_OPT_PSHARED;
+ mgen = ugen = 0;
+ mutexrefs = 0;
+
+#endif /* COND_MTX_WAITQUEUEMOVE */
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 3, lgenval, ugenval+1, 0);
+#endif
+ retval = __psynch_cvsignal(ocond, lgenval, ugenval+1,(pthread_mutex_t *)mutex, mgen, ugen, pthread_mach_thread_np(thread), flags);
+
+#ifdef COND_MTX_WAITQUEUEMOVE
+ if ((retval != -1) && (retval != 0) && (mutexrefs != 0)) {
+ dropcount = retval;
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 4, dropcount, 0, 0);
+#endif
+ retval = __mtx_droplock(mutex, dropcount, &flags, &pmtx, &mtxgen, &mtxugen, ¬ify);
+ }
+#endif /* COND_MTX_WAITQUEUEMOVE */
+
+ if (lgenval == ugenval+1){
+ oldval64 = (((uint64_t)(ugenval+1)) << 32);
+ oldval64 |= lgenval;
+ newval64 = 0;
+ OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt);
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 5, 0, 0, 0);
+#endif
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
+#endif
+ return (0);
+}
+
+/*
+ * Signal a condition variable, waking only one thread.
+ */
+int
+_new_pthread_cond_signal(pthread_cond_t *cond)
+{
+ return _new_pthread_cond_signal_thread_np(cond, NULL);
+}
+
+/*
+ * Manage a list of condition variables associated with a mutex
+ */
+
+
+/*
+ * Suspend waiting for a condition variable.
+ * Note: we have to keep a list of condition variables which are using
+ * this same mutex variable so we can detect invalid 'destroy' sequences.
+ * If isconforming < 0, we skip the _pthread_testcancel(), but keep the
+ * remaining conforming behavior..
+ */
+__private_extern__ int
+__new_pthread_cond_wait(pthread_cond_t *ocond,
+ pthread_mutex_t *omutex,
+ const struct timespec *abstime,
+ int isRelative,
+ int isconforming)
+{
+ int retval;
+ npthread_cond_t * cond = (npthread_cond_t *)ocond;
+ npthread_mutex_t * mutex = (npthread_mutex_t * )omutex;
+ mach_timespec_t then = {0,0};
+ struct timespec cthen = {0,0};
+ int sig = cond->sig;
+ int msig = mutex->sig;
+ int firstfit = 0;
+ npthread_mutex_t * pmtx;
+ uint32_t mtxgen, mtxugen, flags, updateval, notify;
+ uint32_t lgenval, ugenval;
+ uint32_t * c_lseqcnt;
+ uint32_t * c_useqcnt;
+ uint32_t * npmtx = NULL;
+
+extern void _pthread_testcancel(pthread_t thread, int isconforming);
+
+ /* to provide backwards compat for apps using united condtn vars */
+ if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
+ return(EINVAL);
+
+ if (isconforming) {
+ if((msig != _PTHREAD_MUTEX_SIG) && (msig != _PTHREAD_MUTEX_SIG_init))
+ return(EINVAL);
+ if (isconforming > 0)
+ _pthread_testcancel(pthread_self(), 1);
+ }
+ if (cond->sig != _PTHREAD_COND_SIG)
+ {
+ LOCK(cond->lock);
+ if (cond->sig != _PTHREAD_COND_SIG_init)
+ {
+ UNLOCK(cond->lock);
+ return (EINVAL); /* Not a condition variable */
+ }
+ _new_pthread_cond_init(ocond, NULL, 0);
+ UNLOCK(cond->lock);
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_START, (uint32_t)cond, 0, 0, (uint32_t)abstime, 0);
+#endif
+ COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt);
+
+ /* send relative time to kernel */
+ if (abstime) {
+ if (isRelative == 0) {
+ struct timespec now;
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ TIMEVAL_TO_TIMESPEC(&tv, &now);
+
+ /* Compute relative time to sleep */
+ then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
+ then.tv_sec = abstime->tv_sec - now.tv_sec;
+ if (then.tv_nsec < 0)
+ {
+ then.tv_nsec += NSEC_PER_SEC;
+ then.tv_sec--;
+ }
+ if (((int)then.tv_sec < 0) ||
+ ((then.tv_sec == 0) && (then.tv_nsec == 0)))
+ {
+ UNLOCK(cond->lock);
+ return ETIMEDOUT;
+ }
+ if (isconforming != 0) {
+ cthen.tv_sec = abstime->tv_sec;
+ cthen.tv_nsec = abstime->tv_nsec;
+ if ((cthen.tv_sec < 0) || (cthen.tv_nsec < 0)) {
+ UNLOCK(cond->lock);
+ return EINVAL;
+ }
+ if (cthen.tv_nsec >= NSEC_PER_SEC) {
+ UNLOCK(cond->lock);
+ return EINVAL;
+ }
+ }
+ } else {
+ then.tv_sec = abstime->tv_sec;
+ then.tv_nsec = abstime->tv_nsec;
+ }
+ if(isconforming && ((then.tv_sec < 0) || (then.tv_nsec < 0))) {
+ return EINVAL;
+ }
+ if (then.tv_nsec >= NSEC_PER_SEC) {
+ return EINVAL;
+ }
+ }
+
+ cond->busy = mutex;
+ pmtx = mutex;
+
+ ugenval = *c_useqcnt;
+ lgenval = OSAtomicIncrement32((volatile int32_t *)c_lseqcnt);
+
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 1, lgenval, ugenval, 0);
+#endif
+ notify = 0;
+ retval = __mtx_droplock(pmtx, 1, &flags, &npmtx, &mtxgen, &mtxugen, ¬ify);
+ if (retval != 0)
+ return(EINVAL);
+ if ((notify & 1) == 0) {
+ npmtx = NULL;
+ }
+ if ((notify & 0xc0000000) != 0)
+ then.tv_nsec |= (notify & 0xc0000000);
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 3, (uint32_t)mutex, 0, 0);
+#endif
+
+ if (isconforming) {
+ pthread_cleanup_push(_new_cond_cleanup, (void *)cond);
+ updateval = __psynch_cvwait(ocond, lgenval, ugenval, (pthread_mutex_t *)npmtx, mtxgen, mtxugen, (uint64_t)then.tv_sec, (uint64_t)then.tv_nsec);
+ pthread_cleanup_pop(0);
+ } else {
+ updateval = __psynch_cvwait(ocond, lgenval, ugenval, (pthread_mutex_t *)npmtx, mtxgen, mtxugen, (uint64_t)then.tv_sec, (uint64_t)then.tv_nsec);
+
+ }
+
+ retval = 0;
+
+#ifdef COND_MTX_WAITQUEUEMOVE
+ /* Needs to handle timedout */
+ if (updateval == (uint32_t)-1) {
+ retval = errno;
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 4, retval, 0, 0);
+#endif
+ /* add unlock ref to show one less waiter */
+ _new_cond_dropwait(cond);
+
+ pthread_mutex_lock(omutex);
+
+ } else if ((updateval & PTHRW_MTX_NONE) != 0) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 5, updateval, 0, 0);
+#endif
+ pthread_mutex_lock(omutex);
+ } else {
+ /* on successful return mutex held */
+ /* returns 0 on succesful update */
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 6, updateval, 0, 0);
+#endif
+ firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
+ if (__mtx_updatebits( mutex, updateval, firstfit, 1) == 1) {
+ /* not expected to be here */
+ LIBC_ABORT("CONDWAIT mutex acquire mishap");
+ }
+ if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
+ mutex->mtxopts.options.lock_count++;
+ }
+#else /* COND_MTX_WAITQUEUEMOVE */
+ if (updateval == (uint32_t)-1) {
+ if (errno == ETIMEDOUT) {
+ retval = ETIMEDOUT;
+ } else if (errno == EINTR) {
+ /*
+ ** EINTR can be treated as a spurious wakeup unless we were canceled.
+ */
+ retval = 0;
+ } else
+ retval = EINVAL;
+
+ /* add unlock ref to show one less waiter */
+ _new_cond_dropwait(cond);
+ } else
+ retval = 0;
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 4, retval, 0, 0);
+#endif
+ pthread_mutex_lock(omutex);
+
+#endif /* COND_MTX_WAITQUEUEMOVE */
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_END, (uint32_t)cond, 0, 0, retval, 0);
+#endif
+ return(retval);
+}
+
+static void
+_new_cond_cleanup(void *arg)
+{
+ npthread_cond_t *cond = (npthread_cond_t *)arg;
+ pthread_mutex_t *mutex;
+
+// 4597450: begin
+ pthread_t thread = pthread_self();
+ int thcanceled = 0;
+
+ LOCK(thread->lock);
+ thcanceled = (thread->detached & _PTHREAD_WASCANCEL);
+ UNLOCK(thread->lock);
+
+ if (thcanceled == 0)
+ return;
+
+// 4597450: end
+ mutex = cond->busy;
+
+ /* add unlock ref to show one less waiter */
+ _new_cond_dropwait(cond);
+
+ /*
+ ** Can't do anything if this fails -- we're on the way out
+ */
+ if (mutex != NULL)
+ (void)pthread_mutex_lock(mutex);
+
+}
+
+void
+_new_cond_dropwait(npthread_cond_t * cond)
+{
+ int sig = cond->sig;
+ int retval;
+ uint32_t lgenval, ugenval, diffgen, mgen, ugen, flags;
+ uint32_t * c_lseqcnt;
+ uint32_t * c_useqcnt;
+ uint64_t oldval64, newval64;
+
+ /* to provide backwards compat for apps using united condtn vars */
+
+ if (sig != _PTHREAD_COND_SIG)
+ return;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0xee, 0);
+#endif
+ COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt);
+retry:
+ lgenval = *c_lseqcnt;
+ ugenval = *c_useqcnt;
+ diffgen = lgenval - ugenval; /* pending waiters */
+
+ if (diffgen <= 0) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, 1, 0, 0xee, 0);
+#endif
+ return;
+ }
+
+ if (OSAtomicCompareAndSwap32(ugenval, ugenval+1, (volatile int *)c_useqcnt) != TRUE)
+ goto retry;
+
+ if (lgenval == ugenval+1) {
+ /* last one */
+ /* send last drop notify to erase pre post */
+ flags = _PTHREAD_MTX_OPT_LASTDROP;
+
+ if (cond->pshared == PTHREAD_PROCESS_SHARED)
+ flags |= _PTHREAD_MTX_OPT_PSHARED;
+ mgen = ugen = 0;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 1, 0, 0xee, 0);
+#endif
+ retval = __psynch_cvsignal((pthread_cond_t *)cond, lgenval, ugenval+1,(pthread_mutex_t *)NULL, mgen, ugen, MACH_PORT_NULL, flags);
+
+ oldval64 = (((uint64_t)(ugenval+1)) << 32);
+ oldval64 |= lgenval;
+ newval64 = 0;
+ OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt);
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, 2, 0, 0xee, 0);
+#endif
+ return;
+}
+
+
+int
+_new_pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
+ pthread_mutex_t *mutex,
+ const struct timespec *abstime)
+{
+ return (__new_pthread_cond_wait(cond, mutex, abstime, 1, 0));
+}
+
+
+int
+_new_pthread_cond_wait(pthread_cond_t *cond,
+ pthread_mutex_t *mutex)
+{
+ return(__new_pthread_cond_wait(cond, mutex, 0, 0, 1));
+}
+
+int
+_new_pthread_cond_timedwait(pthread_cond_t *cond,
+ pthread_mutex_t *mutex,
+ const struct timespec *abstime)
+{
+ return(__new_pthread_cond_wait(cond, mutex, abstime, 0, 1));
+}
+
+#endif /* __i386__ || __x86_64__ */
#else /* !BUILDING_VARIANT */
+
extern int _pthread_cond_wait(pthread_cond_t *cond,
pthread_mutex_t *mutex,
const struct timespec *abstime,
int isRelative,
int isconforming);
-extern int
-_pthread_cond_init(pthread_cond_t *cond,
- const pthread_condattr_t *attr,
- int conforming);
-
#endif /* !BUILDING_VARIANT ] */
/*
* Initialize a condition variable. Note: 'attr' is ignored.
conforming = 0;
#endif /* __DARWIN_UNIX03 */
-
LOCK_INIT(cond->lock);
+#if defined(__i386__) || defined(__x86_64__)
+ if ((attr != NULL) && (attr->pshared == PTHREAD_PROCESS_SHARED)) {
+ return(_new_pthread_cond_init(cond, attr, conforming));
+ }
+#endif /* __i386__ || __x86_64__ */
+
return (_pthread_cond_init(cond, attr, conforming));
}
.Fa abstime ,
and the current thread reacquires the lock on
.Fa mutex .
+.Pp
+Values for struct timespec can be obtained by adding the required
+time interval to the the current time obtained using
+.Xr gettimeofday 2 .
+.Pp
+.Fa Note
+that struct timeval and struct timespec use different units to specify
+the time. Hence, the user should always take care to perform the time unit
+conversions accordingly.
+.Sh EXAMPLE
+.Pp
+ struct timeval tv;
+ struct timespec ts;
+ gettimeofday(&tv, NULL);
+ ts.tv_sec = tv.tv_sec + 0;
+ ts.tv_nsec = 0;
+.Pp
.Sh RETURN VALUES
If successful, the
.Fn pthread_cond_timedwait
.Xr pthread_cond_destroy 3 ,
.Xr pthread_cond_init 3 ,
.Xr pthread_cond_signal 3 ,
-.Xr pthread_cond_wait 3
+.Xr pthread_cond_wait 3 ,
+.Xr gettimeofday 2
.Sh STANDARDS
.Fn pthread_cond_timedwait
conforms to
.Fa mutex
argument and waits on the
.Fa cond
-argument.
+argument. Before returning control to the calling function,
+.Fn pthread_cond_wait
+re-acquires the
+.Fa mutex.
.Sh RETURN VALUES
If successful, the
.Fn pthread_cond_wait
#include <errno.h>
#include <mach/mach.h>
#include <mach/mach_error.h>
+#include <libkern/OSAtomic.h>
#ifndef __POSIX_LIB__
/*
* Threads
*/
+#define MAXTHREADNAMESIZE 64
#define _PTHREAD_T
typedef struct _pthread
{
long sig; /* Unique signature for this structure */
struct __darwin_pthread_handler_rec *__cleanup_stack;
pthread_lock_t lock; /* Used for internal mutex on structure */
- u_int32_t detached:8,
+ uint32_t detached:8,
inherit:8,
policy:8,
freeStackOnExit:1,
kernalloc:1,
schedset:1,
wqthread:1,
- pad:3;
+ wqkillset:1,
+ pad:2;
size_t guardsize; /* size in bytes to guard stack overflow */
#if !defined(__LP64__)
int pad0; /* for backwards compatibility */
#endif
void *cthread_self; /* cthread_self() if somebody calls cthread_set_self() */
/* protected by list lock */
- u_int32_t childrun:1,
+ uint32_t childrun:1,
parentcheck:1,
childexit:1,
pad3:29;
void * freeaddr;
size_t freesize;
mach_port_t joiner_notify;
- char pthread_name[64]; /* including nulll the name */
+ char pthread_name[MAXTHREADNAMESIZE]; /* including nulll the name */
int max_tsd_key;
void * cur_workq;
void * cur_workitem;
+ uint64_t thread_id;
} *pthread_t;
/*
{
long sig; /* Unique signature for this structure */
pthread_lock_t lock;
- u_int32_t detached:8,
+ uint32_t detached:8,
inherit:8,
policy:8,
freeStackOnExit:1,
/*
* Mutex attributes
*/
+#define _PTHREAD_MUTEX_POLICY_NONE 0
+#define _PTHREAD_MUTEX_POLICY_FAIRSHARE 1
+#define _PTHREAD_MUTEX_POLICY_FIRSTFIT 2
+#define _PTHREAD_MUTEX_POLICY_REALTIME 3
+#define _PTHREAD_MUTEX_POLICY_ADAPTIVE 4
+#define _PTHREAD_MUTEX_POLICY_PRIPROTECT 5
+#define _PTHREAD_MUTEX_POLICY_PRIINHERIT 6
+
#define _PTHREAD_MUTEXATTR_T
typedef struct
{
long sig; /* Unique signature for this structure */
int prioceiling;
- u_int32_t protocol:2, /* protocol attribute */
+ uint32_t protocol:2, /* protocol attribute */
type:2, /* mutex type */
pshared:2,
- rfu:26;
+ policy:3,
+ rfu:23;
} pthread_mutexattr_t;
/*
* Mutex variables
*/
+struct _pthread_mutex_options {
+ uint32_t protocol:2, /* protocol */
+ type:2, /* mutex type */
+ pshared:2, /* mutex type */
+ policy:3,
+ hold:2,
+ misalign:1, /* 8 byte aligned? */
+ rfu:4,
+ lock_count:16;
+};
+
+
+#define _PTHREAD_MTX_OPT_PSHARED 0x010
+#define _PTHREAD_MTX_OPT_HOLD 0x200
+#define _PTHREAD_MTX_OPT_NOHOLD 0x400
+#define _PTHREAD_MTX_OPT_LASTDROP (_PTHREAD_MTX_OPT_NOHOLD | _PTHREAD_MTX_OPT_HOLD)
+
+
#define _PTHREAD_MUTEX_T
typedef struct _pthread_mutex
{
long sig; /* Unique signature for this structure */
pthread_lock_t lock; /* Used for internal mutex on structure */
- u_int32_t waiters; /* Count of threads waiting for this mutex */
-#define _pthread_mutex_kernid waiters
+ union {
+ uint32_t value;
+ struct _pthread_mutex_options options;
+ } mtxopts;
+ int16_t prioceiling;
+ int16_t priority; /* Priority to restore when mutex unlocked */
+ uint32_t waiters; /* Count of threads waiting for this mutex */
pthread_t owner; /* Which thread has this mutex locked */
- semaphore_t sem; /* Semaphore used for waiting */
- u_int32_t protocol:2, /* protocol */
- type:2, /* mutex type */
- pshared:2, /* mutex type */
- rfu:10,
- lock_count:16;
struct _pthread_mutex *next, *prev; /* List of other mutexes he owns */
struct _pthread_cond *busy; /* List of condition variables using this mutex */
- int16_t prioceiling;
- int16_t priority; /* Priority to restore when mutex unlocked */
+ semaphore_t sem; /* Semaphore used for waiting */
semaphore_t order;
} pthread_mutex_t;
+typedef struct _npthread_mutex
+{
+/* keep same as pthread_mutex_t from here to .. */
+ long sig; /* Unique signature for this structure */
+ pthread_lock_t lock; /* Used for static init sequencing */
+ union {
+ uint32_t value;
+ struct _pthread_mutex_options options;
+ } mtxopts;
+ int16_t prioceiling;
+ int16_t priority; /* Priority to restore when mutex unlocked */
+ uint32_t m_seq[3];
+#if defined(__LP64__)
+ uint64_t m_tid; /* Which thread has this mutex locked */
+ uint32_t * m_lseqaddr;
+ uint32_t * m_useqaddr;
+ uint32_t reserved[2];
+#else
+ uint32_t * m_lseqaddr;
+ uint64_t m_tid; /* Which thread has this mutex locked */
+ uint32_t * m_useqaddr;
+#endif
+} npthread_mutex_t;
+
+
+
/*
* Condition variable attributes
typedef struct
{
long sig; /* Unique signature for this structure */
- u_int32_t pshared:2, /* pshared */
+ uint32_t pshared:2, /* pshared */
unsupported:30;
} pthread_condattr_t;
{
long sig; /* Unique signature for this structure */
pthread_lock_t lock; /* Used for internal mutex on structure */
- semaphore_t sem; /* Kernel semaphore */
-#define _pthread_cond_kernid sem
- struct _pthread_cond *next, *prev; /* List of condition variables using mutex */
- struct _pthread_mutex *busy; /* mutex associated with variable */
- u_int32_t waiters:15, /* Number of threads waiting */
+ uint32_t waiters:15, /* Number of threads waiting */
sigspending:15, /* Number of outstanding signals */
pshared:2;
+ struct _pthread_cond *next, *prev; /* List of condition variables using mutex */
+ struct _pthread_mutex *busy; /* mutex associated with variable */
+ semaphore_t sem; /* Kernel semaphore */
} pthread_cond_t;
+
+typedef struct _npthread_cond
+{
+ long sig; /* Unique signature for this structure */
+ pthread_lock_t lock; /* Used for internal mutex on structure */
+ uint32_t rfu:29, /* not in use*/
+ misalign: 1, /* structure is not aligned to 8 byte boundary */
+ pshared:2;
+ struct _npthread_mutex *busy; /* mutex associated with variable */
+ uint32_t c_seq[3];
+#if defined(__LP64__)
+ uint32_t reserved[3];
+#endif /* __LP64__ */
+} npthread_cond_t;
+
/*
* Initialization control (once) variables
*/
long sig;
pthread_mutex_t lock; /* monitor lock */
int state;
-#define _pthread_rwlock_kernid state
pthread_cond_t read_signal;
pthread_cond_t write_signal;
int blocked_writers;
- int pshared;
- pthread_t owner;
- int rfu[2];
+ int reserved;
+ pthread_t owner;
+ int rfu[1];
+ int pshared;
} pthread_rwlock_t;
+#define _PTHREAD_RWLOCK_T
+typedef struct {
+ long sig;
+ pthread_lock_t lock;
+#if defined(__LP64__)
+ int reserv;
+ volatile uint32_t rw_seq[4];
+ pthread_t rw_owner;
+#else /* __LP64__ */
+ volatile uint32_t rw_seq[4];
+ pthread_t rw_owner;
+ int reserv;
+#endif /* __LP64__ */
+ volatile uint32_t * rw_lseqaddr;
+ volatile uint32_t * rw_wcaddr;
+ volatile uint32_t * rw_useqaddr;
+ uint32_t rw_flags;
+ int misalign;
+#if defined(__LP64__)
+ uint32_t rfu[31];
+#else /* __LP64__ */
+ uint32_t rfu[18];
+#endif /* __LP64__ */
+ int pshared;
+} npthread_rwlock_t;
+
+/* flags for rw_flags */
+#define PTHRW_KERN_PROCESS_SHARED 0x10
+#define PTHRW_KERN_PROCESS_PRIVATE 0x20
+#define PTHRW_KERN_PROCESS_FLAGS_MASK 0x30
+
+#define PTHRW_EBIT 0x01
+#define PTHRW_LBIT 0x02
+#define PTHRW_YBIT 0x04
+#define PTHRW_WBIT 0x08
+#define PTHRW_UBIT 0x10
+#define PTHRW_RETRYBIT 0x20
+#define PTHRW_SHADOW_W 0x20 /* same as 0x20, shadow W bit for rwlock */
+
+#define PTHRW_TRYLKBIT 0x40
+#define PTHRW_RW_HUNLOCK 0x40 /* readers responsible for handling unlock */
+
+#define PTHRW_MTX_NONE 0x80
+#define PTHRW_RW_INIT 0x80 /* reset on the lock bits */
+#define PTHRW_RW_SPURIOUS 0x80 /* same as 0x80, spurious rwlock unlock ret from kernel */
+
+#define PTHRW_INC 0x100
+#define PTHRW_BIT_MASK 0x000000ff
+#define PTHRW_UN_BIT_MASK 0x000000df /* remove shadow bit */
+
+#define PTHRW_COUNT_SHIFT 8
+#define PTHRW_COUNT_MASK 0xffffff00
+#define PTHRW_MAX_READERS 0xffffff00
+
+
+#define PTHREAD_MTX_TID_SWITCHING (uint64_t)-1
+
+#define is_rw_ewubit_set(x) (((x) & (PTHRW_EBIT | PTHRW_WBIT | PTHRW_UBIT)) != 0)
+#define is_rw_lbit_set(x) (((x) & PTHRW_LBIT) != 0)
+#define is_rw_lybit_set(x) (((x) & (PTHRW_LBIT | PTHRW_YBIT)) != 0)
+#define is_rw_ebit_set(x) (((x) & PTHRW_EBIT) != 0)
+#define is_rw_ebit_clear(x) (((x) & PTHRW_EBIT) == 0)
+#define is_rw_uebit_set(x) (((x) & (PTHRW_EBIT | PTHRW_UBIT)) != 0)
+#define is_rw_ewuybit_set(x) (((x) & (PTHRW_EBIT | PTHRW_WBIT | PTHRW_UBIT | PTHRW_YBIT)) != 0)
+#define is_rw_ewuybit_clear(x) (((x) & (PTHRW_EBIT | PTHRW_WBIT | PTHRW_UBIT | PTHRW_YBIT)) == 0)
+#define is_rw_ewubit_set(x) (((x) & (PTHRW_EBIT | PTHRW_WBIT | PTHRW_UBIT)) != 0)
+#define is_rw_ewubit_clear(x) (((x) & (PTHRW_EBIT | PTHRW_WBIT | PTHRW_UBIT)) == 0)
+
+/* is x lower than Y */
+#define is_seqlower(x, y) ((x < y) || ((x - y) > (PTHRW_MAX_READERS/2)))
+/* is x lower than or eq Y */
+#define is_seqlower_eq(x, y) ((x <= y) || ((x - y) > (PTHRW_MAX_READERS/2)))
+
+/* is x greater than Y */
+#define is_seqhigher(x, y) ((x > y) || ((y - x) > (PTHRW_MAX_READERS/2)))
+
+static inline int diff_genseq(uint32_t x, uint32_t y) {
+ if (x > y) {
+ return(x-y);
+ } else {
+ return((PTHRW_MAX_READERS - y) + x +1);
+ }
+}
+
/* keep the size to 64bytes for both 64 and 32 */
#define _PTHREAD_WORKQUEUE_ATTR_T
typedef struct {
- u_int32_t sig;
-#if defined(__ppc64__) || defined(__x86_64__)
- u_int32_t resv1;
-#endif
- size_t stacksize;
- int istimeshare;
- int importance;
- int affinity;
- int queueprio;
-#if defined(__ppc64__) || defined(__x86_64__)
- unsigned int resv2[8];
-#else
- unsigned int resv2[10];
-#endif
+ uint32_t sig;
+ int queueprio;
+ int overcommit;
+ unsigned int resv2[13];
} pthread_workqueue_attr_t;
#define _PTHREAD_WORKITEM_T
void * func_arg;
struct _pthread_workqueue * workq;
unsigned int flags;
+ unsigned int gencount;
} * pthread_workitem_t;
#define PTH_WQITEM_INKERNEL_QUEUE 1
TAILQ_HEAD(__pthread_workitem_pool, _pthread_workitem);
extern struct __pthread_workitem_pool __pthread_workitem_pool_head; /* head list of workitem pool */
-#define WQ_NUM_PRIO_QS 5 /* -2 to +2 */
-#define WORK_QUEUE_NORMALIZER 2 /* so all internal usages are from 0 to 4 */
+#define WQ_NUM_PRIO_QS 3 /* WORKQ_HIGH/DEFAULT/LOW_PRIOQUEUE */
#define _PTHREAD_WORKQUEUE_HEAD_T
typedef struct _pthread_workqueue_head {
void (*term_callback)(struct _pthread_workqueue *,void *);
void * term_callarg;
pthread_workqueue_head_t headp;
- int suspend_count;
+ int overcommit;
#if defined(__ppc64__) || defined(__x86_64__)
unsigned int rev2[2];
#else
#include "pthread.h"
-#if defined(__i386__) || defined(__ppc64__) || defined(__x86_64__) || defined(__arm__)
+#if defined(__i386__) || defined(__ppc64__) || defined(__x86_64__) || (defined(__arm__) && (defined(_ARM_ARCH_7) || !defined(_ARM_ARCH_6) || !defined(__thumb__)))
/*
* Inside libSystem, we can use r13 or %gs directly to get access to the
* thread-specific data area. The current thread is in the first slot.
#elif defined(__ppc64__)
register const pthread_t __pthread_self asm ("r13");
ret = __pthread_self;
-#elif defined(__arm__)
+#elif defined(__arm__) && defined(_ARM_ARCH_6)
+ __asm__ ("mrc p15, 0, %0, c13, c0, 3" : "=r"(ret));
+#elif defined(__arm__) && !defined(_ARM_ARCH_6)
register const pthread_t __pthread_self asm ("r9");
ret = __pthread_self;
#endif
extern void _pthread_tsd_cleanup(pthread_t self);
+#if defined(__i386__) || defined(__x86_64__)
+__private_extern__ void __mtx_holdlock(npthread_mutex_t *mutex, uint32_t diff, uint32_t * flagp, uint32_t ** pmtxp, uint32_t * mgenp, uint32_t * ugenp);
+__private_extern__ int __mtx_droplock(npthread_mutex_t *mutex, int count, uint32_t * flagp, uint32_t ** pmtxp, uint32_t * mgenp, uint32_t * ugenp, uint32_t * notifyp);
+__private_extern__ int __mtx_updatebits(npthread_mutex_t *mutex, uint32_t updateval, int firstfiti, int fromcond);
+
+/* syscall interfaces */
+extern uint32_t __psynch_mutexwait(pthread_mutex_t * mutex, uint32_t mgen, uint32_t ugen, uint64_t tid, uint32_t flags);
+extern uint32_t __psynch_mutexdrop(pthread_mutex_t * mutex, uint32_t mgen, uint32_t ugen, uint64_t tid, uint32_t flags);
+extern int __psynch_cvbroad(pthread_cond_t * cv, uint32_t cvgen, uint32_t diffgen, pthread_mutex_t * mutex, uint32_t mgen, uint32_t ugen, uint64_t tid, uint32_t flags);
+extern int __psynch_cvsignal(pthread_cond_t * cv, uint32_t cvgen, uint32_t cvugen, pthread_mutex_t * mutex, uint32_t mgen, uint32_t ugen, int thread_port, uint32_t flags);
+extern uint32_t __psynch_cvwait(pthread_cond_t * cv, uint32_t cvgen, uint32_t cvugen, pthread_mutex_t * mutex, uint32_t mgen, uint32_t ugen, uint64_t sec, uint64_t usec);
+extern uint32_t __psynch_rw_longrdlock(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags);
+extern uint32_t __psynch_rw_yieldwrlock(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags);
+extern int __psynch_rw_downgrade(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags);
+extern uint32_t __psynch_rw_upgrade(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags);
+extern uint32_t __psynch_rw_rdlock(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags);
+extern uint32_t __psynch_rw_wrlock(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags);
+extern uint32_t __psynch_rw_unlock(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags);
+extern uint32_t __psynch_rw_unlock2(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags);
+#endif /* __i386__ || __x86_64__ */
+
__private_extern__ semaphore_t new_sem_from_pool(void);
__private_extern__ void restore_sem_to_pool(semaphore_t);
__private_extern__ void _pthread_atfork_queue_init(void);
int _pthread_lookup_thread(pthread_t thread, mach_port_t * port, int only_joinable);
int _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming);
+
#endif /* _POSIX_PTHREAD_INTERNALS_H */
/*
- * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2003-2004, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#ifndef __ASSEMBLER__
#include <System/machine/cpu_capabilities.h>
+#ifdef __arm__
+#include <arm/arch.h>
+#endif
/*
** Define macros for inline pthread_getspecific() usage.
#define __PTK_LIBC_TTYNAME_KEY 11
#define __PTK_LIBC_LOCALTIME_KEY 12
#define __PTK_LIBC_GMTIME_KEY 13
+#define __PTK_LIBC_GDTOA_BIGINT_KEY 14
+#define __PTK_LIBC_PARSEFLOAT_KEY 15
+/* Keys 20-25 for libdispactch usage */
+#define __PTK_LIBDISPATCH_KEY0 20
+#define __PTK_LIBDISPATCH_KEY1 21
+#define __PTK_LIBDISPATCH_KEY2 22
+#define __PTK_LIBDISPATCH_KEY3 23
+#define __PTK_LIBDISPATCH_KEY4 24
+#define __PTK_LIBDISPATCH_KEY5 25
/* Keys 30-255 for Non Libsystem usage */
+
+/* Keys 30-39 for Graphic frameworks usage */
#define _PTHREAD_TSD_SLOT_OPENGL 30 /* backwards compat sake */
#define __PTK_FRAMEWORK_OPENGL_KEY 30
+#define __PTK_FRAMEWORK_GRAPHICS_KEY1 31
+#define __PTK_FRAMEWORK_GRAPHICS_KEY2 32
+#define __PTK_FRAMEWORK_GRAPHICS_KEY3 33
+#define __PTK_FRAMEWORK_GRAPHICS_KEY4 34
+#define __PTK_FRAMEWORK_GRAPHICS_KEY5 35
+#define __PTK_FRAMEWORK_GRAPHICS_KEY6 36
+#define __PTK_FRAMEWORK_GRAPHICS_KEY7 37
+#define __PTK_FRAMEWORK_GRAPHICS_KEY8 38
+#define __PTK_FRAMEWORK_GRAPHICS_KEY9 39
+
+/* Keys 40-49 for Objective-C runtime usage */
+#define __PTK_FRAMEWORK_OBJC_KEY0 40
+#define __PTK_FRAMEWORK_OBJC_KEY1 41
+#define __PTK_FRAMEWORK_OBJC_KEY2 42
+#define __PTK_FRAMEWORK_OBJC_KEY3 43
+#define __PTK_FRAMEWORK_OBJC_KEY4 44
+#define __PTK_FRAMEWORK_OBJC_KEY5 45
+#define __PTK_FRAMEWORK_OBJC_KEY6 46
+#define __PTK_FRAMEWORK_OBJC_KEY7 47
+#define __PTK_FRAMEWORK_OBJC_KEY8 48
+#define __PTK_FRAMEWORK_OBJC_KEY9 49
+
+/* Keys 50-59 for Core Foundation usage */
+#define __PTK_FRAMEWORK_COREFOUNDATION_KEY0 50
+#define __PTK_FRAMEWORK_COREFOUNDATION_KEY1 51
+#define __PTK_FRAMEWORK_COREFOUNDATION_KEY2 52
+#define __PTK_FRAMEWORK_COREFOUNDATION_KEY3 53
+#define __PTK_FRAMEWORK_COREFOUNDATION_KEY4 54
+#define __PTK_FRAMEWORK_COREFOUNDATION_KEY5 55
+#define __PTK_FRAMEWORK_COREFOUNDATION_KEY6 56
+#define __PTK_FRAMEWORK_COREFOUNDATION_KEY7 57
+#define __PTK_FRAMEWORK_COREFOUNDATION_KEY8 58
+#define __PTK_FRAMEWORK_COREFOUNDATION_KEY9 59
+
+/* Keys 60-69 for Foundation usage */
+#define __PTK_FRAMEWORK_FOUNDATION_KEY0 60
+#define __PTK_FRAMEWORK_FOUNDATION_KEY1 61
+#define __PTK_FRAMEWORK_FOUNDATION_KEY2 62
+#define __PTK_FRAMEWORK_FOUNDATION_KEY3 63
+#define __PTK_FRAMEWORK_FOUNDATION_KEY4 64
+#define __PTK_FRAMEWORK_FOUNDATION_KEY5 65
+#define __PTK_FRAMEWORK_FOUNDATION_KEY6 66
+#define __PTK_FRAMEWORK_FOUNDATION_KEY7 67
+#define __PTK_FRAMEWORK_FOUNDATION_KEY8 68
+#define __PTK_FRAMEWORK_FOUNDATION_KEY9 69
+
+/* Keys 70-79 for Core Animation/QuartzCore usage */
+#define __PTK_FRAMEWORK_QUARTZCORE_KEY0 70
+#define __PTK_FRAMEWORK_QUARTZCORE_KEY1 71
+#define __PTK_FRAMEWORK_QUARTZCORE_KEY2 72
+#define __PTK_FRAMEWORK_QUARTZCORE_KEY3 73
+#define __PTK_FRAMEWORK_QUARTZCORE_KEY4 74
+#define __PTK_FRAMEWORK_QUARTZCORE_KEY5 75
+#define __PTK_FRAMEWORK_QUARTZCORE_KEY6 76
+#define __PTK_FRAMEWORK_QUARTZCORE_KEY7 77
+#define __PTK_FRAMEWORK_QUARTZCORE_KEY8 78
+#define __PTK_FRAMEWORK_QUARTZCORE_KEY9 79
+
+
+/* Keys 80-89 for Garbage Collection */
+#define __PTK_FRAMEWORK_GC_KEY0 80
+#define __PTK_FRAMEWORK_GC_KEY1 81
+#define __PTK_FRAMEWORK_GC_KEY2 82
+#define __PTK_FRAMEWORK_GC_KEY3 83
+#define __PTK_FRAMEWORK_GC_KEY4 84
+#define __PTK_FRAMEWORK_GC_KEY5 85
+#define __PTK_FRAMEWORK_GC_KEY6 86
+#define __PTK_FRAMEWORK_GC_KEY7 87
+#define __PTK_FRAMEWORK_GC_KEY8 88
+#define __PTK_FRAMEWORK_GC_KEY9 89
+
/*
** Define macros for inline pthread_getspecific() usage.
#endif
extern void *pthread_getspecific(unsigned long);
+/* setup destructor function for static key as it is not created with pthread_key_create() */
int pthread_key_init_np(int, void (*)(void *));
#if defined(__cplusplus)
} else {
return 0;
}
+#elif defined(__arm__) && defined(__thumb__) && defined(_ARM_ARCH_6) && !defined(_ARM_ARCH_7)
+ return 0;
#else
return 1;
#endif
}
+/* To be used with static constant keys only */
inline static void *
-_pthread_getspecific_direct(unsigned long slot)
+_pthread_getspecific_direct(unsigned long slot)
{
- void *ret;
-#if defined(__OPTIMIZE__)
+ void *ret;
#if defined(__i386__) || defined(__x86_64__)
- asm volatile("mov %%gs:%P1, %0" : "=r" (ret) : "i" (slot * sizeof(void *) + _PTHREAD_TSD_OFFSET));
-#elif defined(__ppc__)
- void **__pthread_tsd;
- asm volatile("mfspr %0, 259" : "=r" (__pthread_tsd));
- ret = __pthread_tsd[slot + (_PTHREAD_TSD_OFFSET / sizeof(void *))];
+#if defined(__OPTIMIZE__)
+ asm volatile("mov %%gs:%P1, %0" : "=r" (ret) : "i" (slot * sizeof(void *) + _PTHREAD_TSD_OFFSET));
+#else
+ asm("mov %%gs:%P2(,%1,%P3), %0" : "=r" (ret) : "r" (slot), "i" (_PTHREAD_TSD_OFFSET), "i" (sizeof (void *)));
+#endif
+#elif defined(__ppc__)
+ void **__pthread_tsd;
+ asm volatile("mfspr %0, 259" : "=r" (__pthread_tsd));
+ ret = __pthread_tsd[slot + (_PTHREAD_TSD_OFFSET / sizeof(void *))];
#elif defined(__ppc64__)
- register void **__pthread_tsd asm ("r13");
- ret = __pthread_tsd[slot + (_PTHREAD_TSD_OFFSET / sizeof(void *))];
-#elif defined(__arm__)
- register void **__pthread_tsd asm ("r9");
+ register void **__pthread_tsd asm ("r13");
+ ret = __pthread_tsd[slot + (_PTHREAD_TSD_OFFSET / sizeof(void *))];
+#elif defined(__arm__) && defined(_ARM_ARCH_6)
+ void **__pthread_tsd;
+ __asm__ ("mrc p15, 0, %0, c13, c0, 3" : "=r"(__pthread_tsd));
ret = __pthread_tsd[slot + (_PTHREAD_TSD_OFFSET / sizeof(void *))];
-#else
+#elif defined(__arm__) && !defined(_ARM_ARCH_6)
+ register void **__pthread_tsd asm ("r9");
+ ret = __pthread_tsd[slot + (_PTHREAD_TSD_OFFSET / sizeof(void *))];
+#else
#error no pthread_getspecific_direct implementation for this arch
#endif
-#else /* ! __OPTIMIZATION__ */
- ret = pthread_getspecific(slot);
-#endif
- return ret;
+ return ret;
}
+/* To be used with static constant keys only */
+#define _pthread_setspecific_direct(key, val) pthread_setspecific(key, val)
+
#define LOCK_INIT(l) ((l) = 0)
#define LOCK_INITIALIZER 0
#endif /* PLOCKSTAT */
extern int __unix_conforming;
+extern int __unix_conforming;
+int _pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
+
+#if defined(__i386__) || defined(__x86_64__)
+#define USE_COMPAGE 1
+
+#include <machine/cpu_capabilities.h>
+
+extern int _commpage_pthread_mutex_lock(uint32_t * lvalp, int flags, uint64_t mtid, uint32_t mask, uint64_t * tidp, int *sysret);
+
+int _new_pthread_mutex_destroy(pthread_mutex_t *mutex);
+int _new_pthread_mutex_destroy_locked(pthread_mutex_t *mutex);
+int _new_pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
+int _new_pthread_mutex_lock(pthread_mutex_t *omutex);
+int _new_pthread_mutex_trylock(pthread_mutex_t *omutex);
+int _new_pthread_mutex_unlock(pthread_mutex_t *omutex);
+
+#if defined(__LP64__)
+#define MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr) \
+{ \
+ if (mutex->mtxopts.options.misalign != 0) { \
+ lseqaddr = &mutex->m_seq[0]; \
+ useqaddr = &mutex->m_seq[1]; \
+ } else { \
+ lseqaddr = &mutex->m_seq[1]; \
+ useqaddr = &mutex->m_seq[2]; \
+ } \
+}
+#else /* __LP64__ */
+#define MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr) \
+{ \
+ if (mutex->mtxopts.options.misalign != 0) { \
+ lseqaddr = &mutex->m_seq[1]; \
+ useqaddr = &mutex->m_seq[2]; \
+ }else { \
+ lseqaddr = &mutex->m_seq[0]; \
+ useqaddr = &mutex->m_seq[1]; \
+ } \
+}
+#endif /* __LP64__ */
+
+#define _KSYN_TRACE_ 0
+
+#if _KSYN_TRACE_
+/* The Function qualifiers */
+#define DBG_FUNC_START 1
+#define DBG_FUNC_END 2
+#define DBG_FUNC_NONE 0
+
+int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
+
+#define _KSYN_TRACE_UM_LOCK 0x9000060
+#define _KSYN_TRACE_UM_UNLOCK 0x9000064
+#define _KSYN_TRACE_UM_MHOLD 0x9000068
+#define _KSYN_TRACE_UM_MDROP 0x900006c
+#define _KSYN_TRACE_UM_MUBITS 0x900007c
+
+#endif /* _KSYN_TRACE_ */
+
+#endif /* __i386__ || __x86_64__ */
#ifndef BUILDING_VARIANT /* [ */
#define BLOCK_FAIL_PLOCKSTAT 0
#define BLOCK_SUCCESS_PLOCKSTAT 1
+#ifdef PR_5243343
+/* 5243343 - temporary hack to detect if we are running the conformance test */
+extern int PR_5243343_flag;
+#endif /* PR_5243343 */
+
/* This function is never called and exists to provide never-fired dtrace
* probes so that user d scripts don't get errors.
*/
LOCK(mutex->lock);
if (mutex->sig == _PTHREAD_MUTEX_SIG)
{
+
+#if defined(__i386__) || defined(__x86_64__)
+ if(mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED){
+
+ res = _new_pthread_mutex_destroy_locked(mutex);
+ UNLOCK(mutex->lock);
+ return(res);
+ }
+#endif /* __i386__ || __x86_64__ */
+
if (mutex->owner == (pthread_t)NULL &&
mutex->busy == (pthread_cond_t *)NULL)
{
}
else
res = EBUSY;
- } else if (mutex->sig == _PTHREAD_KERN_MUTEX_SIG) {
- int mutexid = mutex->_pthread_mutex_kernid;
- UNLOCK(mutex->lock);
- if( __pthread_mutex_destroy(mutexid) == -1)
- return(errno);
- mutex->sig = _PTHREAD_NO_SIG;
- return(0);
} else
res = EINVAL;
UNLOCK(mutex->lock);
return (res);
}
-#ifdef PR_5243343
-/* 5243343 - temporary hack to detect if we are running the conformance test */
-extern int PR_5243343_flag;
-#endif /* PR_5243343 */
/*
* Initialize a mutex variable, possibly with additional attributes.
*/
-static int
+int
_pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
{
if (attr)
{
if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG)
return (EINVAL);
- mutex->prioceiling = attr->prioceiling;
- mutex->protocol = attr->protocol;
- mutex->type = attr->type;
- mutex->pshared = attr->pshared;
+#if defined(__i386__) || defined(__x86_64__)
if (attr->pshared == PTHREAD_PROCESS_SHARED) {
- mutex->lock_count = 0;
- mutex->owner = (pthread_t)NULL;
- mutex->next = (pthread_mutex_t *)NULL;
- mutex->prev = (pthread_mutex_t *)NULL;
- mutex->busy = (pthread_cond_t *)NULL;
- mutex->waiters = 0;
- mutex->sem = SEMAPHORE_NULL;
- mutex->order = SEMAPHORE_NULL;
- mutex->sig = 0;
- if( __pthread_mutex_init(mutex, attr) == -1)
- return(errno);
- mutex->sig = _PTHREAD_KERN_MUTEX_SIG;
- return(0);
+ return(_new_pthread_mutex_init(mutex, attr));
+ } else
+#endif /* __i386__ || __x86_64__ */
+ {
+ mutex->prioceiling = attr->prioceiling;
+ mutex->mtxopts.options.protocol = attr->protocol;
+ mutex->mtxopts.options.policy = attr->policy;
+ mutex->mtxopts.options.type = attr->type;
+ mutex->mtxopts.options.pshared = attr->pshared;
}
} else {
mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
- mutex->protocol = _PTHREAD_DEFAULT_PROTOCOL;
- mutex->type = PTHREAD_MUTEX_DEFAULT;
- mutex->pshared = _PTHREAD_DEFAULT_PSHARED;
+ mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
+ mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
+ mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
+ mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
}
- mutex->lock_count = 0;
+ mutex->mtxopts.options.lock_count = 0;
mutex->owner = (pthread_t)NULL;
mutex->next = (pthread_mutex_t *)NULL;
mutex->prev = (pthread_mutex_t *)NULL;
mutex->waiters = 0;
mutex->sem = SEMAPHORE_NULL;
mutex->order = SEMAPHORE_NULL;
+ mutex->prioceiling = 0;
mutex->sig = _PTHREAD_MUTEX_SIG;
return (0);
}
return (_pthread_mutex_init(mutex, attr));
}
-/*
- * Manage a list of mutex variables owned by a thread
- */
-#if defined(DEBUG)
-static void
-_pthread_mutex_add(pthread_mutex_t *mutex, pthread_t self)
-{
- pthread_mutex_t *m;
- if (self != (pthread_t)0)
- {
- if ((m = self->mutexes) != (pthread_mutex_t *)NULL)
- { /* Add to list */
- m->prev = mutex;
- }
- mutex->next = m;
- mutex->prev = (pthread_mutex_t *)NULL;
- self->mutexes = mutex;
- }
-}
-
-__private_extern__ void
-_pthread_mutex_remove(pthread_mutex_t *mutex, pthread_t self)
-{
- pthread_mutex_t *n, *prev;
- if ((n = mutex->next) != (pthread_mutex_t *)NULL)
- {
- n->prev = mutex->prev;
- }
- if ((prev = mutex->prev) != (pthread_mutex_t *)NULL)
- {
- prev->next = mutex->next;
- } else
- { /* This is the first in the list */
- if (self != (pthread_t)0) {
- self->mutexes = n;
- }
- }
-}
-#endif
-
/*
* Lock a mutex.
* TODO: Priority inheritance stuff
int sig = mutex->sig;
/* To provide backwards compat for apps using mutex incorrectly */
- if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init) && (sig != _PTHREAD_KERN_MUTEX_SIG)) {
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
PLOCKSTAT_MUTEX_ERROR(mutex, EINVAL);
return(EINVAL);
}
+
LOCK(mutex->lock);
if (mutex->sig != _PTHREAD_MUTEX_SIG)
{
if (mutex->sig != _PTHREAD_MUTEX_SIG_init)
{
- if (mutex->sig == _PTHREAD_KERN_MUTEX_SIG) {
- int mutexid = mutex->_pthread_mutex_kernid;
- UNLOCK(mutex->lock);
-
- PLOCKSTAT_MUTEX_BLOCK(mutex);
- if( __pthread_mutex_lock(mutexid) == -1) {
- PLOCKSTAT_MUTEX_BLOCKED(mutex, BLOCK_FAIL_PLOCKSTAT);
- PLOCKSTAT_MUTEX_ERROR(mutex, errno);
- return(errno);
- }
-
- PLOCKSTAT_MUTEX_BLOCKED(mutex, BLOCK_SUCCESS_PLOCKSTAT);
- PLOCKSTAT_MUTEX_ACQUIRE(mutex, 0, 0);
- return(0);
- } else {
UNLOCK(mutex->lock);
PLOCKSTAT_MUTEX_ERROR(mutex, EINVAL);
return (EINVAL);
- }
}
_pthread_mutex_init(mutex, NULL);
self = _PTHREAD_MUTEX_OWNER_SELF;
}
- else if (mutex->type != PTHREAD_MUTEX_NORMAL)
+#if defined(__i386__) || defined(__x86_64__)
+ else if(mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED){
+ UNLOCK(mutex->lock);
+ return(_new_pthread_mutex_lock(mutex));
+ }
+#endif /* __i386__ || __x86_64__ */
+ else if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL)
{
self = pthread_self();
if (mutex->owner == self)
{
int res;
- if (mutex->type == PTHREAD_MUTEX_RECURSIVE)
+ if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
{
- if (mutex->lock_count < USHRT_MAX)
+ if (mutex->mtxopts.options.lock_count < USHRT_MAX)
{
- mutex->lock_count++;
+ mutex->mtxopts.options.lock_count++;
PLOCKSTAT_MUTEX_ACQUIRE(mutex, 1, 0);
res = 0;
} else {
}
}
- mutex->lock_count = 1;
+ mutex->mtxopts.options.lock_count = 1;
mutex->owner = self;
-#if defined(DEBUG)
- _pthread_mutex_add(mutex, self);
-#endif
UNLOCK(mutex->lock);
PLOCKSTAT_MUTEX_ACQUIRE(mutex, 0, 0);
return (0);
{
kern_return_t kern_res;
pthread_t self;
-
+
LOCK(mutex->lock);
if (mutex->sig != _PTHREAD_MUTEX_SIG)
{
if (mutex->sig != _PTHREAD_MUTEX_SIG_init)
{
-
- if (mutex->sig == _PTHREAD_KERN_MUTEX_SIG) {
- int mutexid = mutex->_pthread_mutex_kernid;
- UNLOCK(mutex->lock);
- if( __pthread_mutex_trylock(mutexid) == -1) {
- PLOCKSTAT_MUTEX_ERROR(mutex, errno);
- return(errno);
- }
- PLOCKSTAT_MUTEX_ACQUIRE(mutex, 0, 0);
- return(0);
- } else {
PLOCKSTAT_MUTEX_ERROR(mutex, EINVAL);
UNLOCK(mutex->lock);
return (EINVAL);
- }
}
_pthread_mutex_init(mutex, NULL);
self = _PTHREAD_MUTEX_OWNER_SELF;
}
- else if (mutex->type != PTHREAD_MUTEX_NORMAL)
+#if defined(__i386__) || defined(__x86_64__)
+ else if(mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED){
+ UNLOCK(mutex->lock);
+ return(_new_pthread_mutex_trylock(mutex));
+ }
+#endif /* __i386__ || __x86_64__ */
+ else if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL)
{
self = pthread_self();
- if (mutex->type == PTHREAD_MUTEX_RECURSIVE)
+ if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
{
if (mutex->owner == self)
{
int res;
- if (mutex->lock_count < USHRT_MAX)
+ if (mutex->mtxopts.options.lock_count < USHRT_MAX)
{
- mutex->lock_count++;
+ mutex->mtxopts.options.lock_count++;
PLOCKSTAT_MUTEX_ACQUIRE(mutex, 1, 0);
res = 0;
} else {
}
}
- mutex->lock_count = 1;
+ mutex->mtxopts.options.lock_count = 1;
mutex->owner = self;
-#if defined(DEBUG)
- _pthread_mutex_add(mutex, self);
-#endif
UNLOCK(mutex->lock);
PLOCKSTAT_MUTEX_ACQUIRE(mutex, 0, 0);
return (0);
int waiters;
int sig = mutex->sig;
+
/* To provide backwards compat for apps using mutex incorrectly */
- if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init) && (sig != _PTHREAD_KERN_MUTEX_SIG)) {
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
PLOCKSTAT_MUTEX_ERROR(mutex, EINVAL);
return(EINVAL);
}
{
if (mutex->sig != _PTHREAD_MUTEX_SIG_init)
{
- if (mutex->sig == _PTHREAD_KERN_MUTEX_SIG) {
- int mutexid = mutex->_pthread_mutex_kernid;
- UNLOCK(mutex->lock);
- if( __pthread_mutex_unlock(mutexid) == -1) {
- PLOCKSTAT_MUTEX_ERROR(mutex, errno);
- return(errno);
- }
- PLOCKSTAT_MUTEX_RELEASE(mutex, 0);
- return(0);
- } else {
PLOCKSTAT_MUTEX_ERROR(mutex, EINVAL);
UNLOCK(mutex->lock);
return (EINVAL);
- }
}
_pthread_mutex_init(mutex, NULL);
- } else
-
-#if !defined(DEBUG)
- if (mutex->type != PTHREAD_MUTEX_NORMAL)
-#endif
+ }
+#if defined(__i386__) || defined(__x86_64__)
+ else if(mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED){
+ UNLOCK(mutex->lock);
+ return(_new_pthread_mutex_unlock(mutex));
+ }
+#endif /* __i386__ || __x86_64__ */
+ else if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL)
{
pthread_t self = pthread_self();
if (mutex->owner != self)
{
-#if defined(DEBUG)
- abort();
-#endif
PLOCKSTAT_MUTEX_ERROR(mutex, EPERM);
UNLOCK(mutex->lock);
return EPERM;
- } else if (mutex->type == PTHREAD_MUTEX_RECURSIVE &&
- --mutex->lock_count)
+ } else if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE &&
+ --mutex->mtxopts.options.lock_count)
{
PLOCKSTAT_MUTEX_RELEASE(mutex, 1);
UNLOCK(mutex->lock);
}
}
- mutex->lock_count = 0;
-#if defined(DEBUG)
- _pthread_mutex_remove(mutex, mutex->owner);
-#endif /* DEBUG */
+ mutex->mtxopts.options.lock_count = 0;
waiters = mutex->waiters;
if (waiters)
{
attr->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
attr->protocol = _PTHREAD_DEFAULT_PROTOCOL;
+ attr->policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
attr->type = PTHREAD_MUTEX_DEFAULT;
attr->sig = _PTHREAD_MUTEX_ATTR_SIG;
attr->pshared = _PTHREAD_DEFAULT_PSHARED;
return (EINVAL); /* Not an initialized 'attribute' structure */
}
}
+
+#ifdef NOTYET
+int
+pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *attr,
+ int policy)
+{
+ if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG)
+ {
+ if ((policy == _PTHREAD_MUTEX_POLICY_FAIRSHARE) ||
+ (policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT) ||
+ (policy == _PTHREAD_MUTEX_POLICY_REALTIME) ||
+ (policy == _PTHREAD_MUTEX_POLICY_ADAPTIVE) ||
+ (policy == _PTHREAD_MUTEX_POLICY_PRIPROTECT) ||
+ (policy == _PTHREAD_MUTEX_POLICY_PRIINHERIT))
+ {
+ attr->policy = policy;
+ return (0);
+ } else
+ {
+ return (EINVAL); /* Invalid parameter */
+ }
+ } else
+ {
+ return (EINVAL); /* Not an initialized 'attribute' structure */
+ }
+}
+#endif /* NOTYET */
+
/*
* Set the mutex 'type' value in a mutex attribute structure.
* Note: written as a 'helper' function to hide implementation details.
}
}
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * Acquire lock seq for condition var signalling/broadcast
+ */
+__private_extern__ void
+__mtx_holdlock(npthread_mutex_t * mutex, uint32_t diff, uint32_t * flagp, uint32_t **pmtxp, uint32_t * mgenp, uint32_t * ugenp)
+{
+ uint32_t mgen, ugen, ngen;
+ int hold = 0;
+ int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_START, (uint32_t)mutex, diff, firstfit, 0, 0);
+#endif
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ /* no holds for shared mutexes */
+ hold = 2;
+ mgen = 0;
+ ugen = 0;
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ goto out;
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+
+retry:
+ mgen = *lseqaddr;
+ ugen = *useqaddr;
+ /* no need to do extra wrap */
+ ngen = mgen + (PTHRW_INC * diff);
+ hold = 0;
+
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_NONE, (uint32_t)mutex, 0, mgen, ngen, 0);
+#endif
+ /* can we acquire the lock ? */
+ if ((mgen & PTHRW_EBIT) == 0) {
+ /* if it is firstfit, no need to hold till the cvar returns */
+ if (firstfit == 0) {
+ ngen |= PTHRW_EBIT;
+ hold = 1;
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_NONE, (uint32_t)mutex, 1, mgen, ngen, 0);
+#endif
+ }
+
+ /* update lockseq */
+ if (OSAtomicCompareAndSwap32(mgen, ngen, (volatile int32_t *)lseqaddr) != TRUE)
+ goto retry;
+ if (hold == 1) {
+ mutex->m_tid = PTHREAD_MTX_TID_SWITCHING ;
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_NONE, (uint32_t)mutex, 2, hold, 0, 0);
+#endif
+
+out:
+ if (flagp != NULL) {
+ if (hold == 1) {
+ *flagp = (mutex->mtxopts.value | _PTHREAD_MTX_OPT_HOLD);
+ } else if (hold == 2) {
+ *flagp = (mutex->mtxopts.value | _PTHREAD_MTX_OPT_NOHOLD);
+ } else {
+ *flagp = mutex->mtxopts.value;
+ }
+ }
+ if (mgenp != NULL)
+ *mgenp = mgen;
+ if (ugenp != NULL)
+ *ugenp = ugen;
+ if (pmtxp != NULL)
+ *pmtxp = lseqaddr;
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_END, (uint32_t)mutex, hold, 0, 0, 0);
+#endif
+}
+
+
+/*
+ * Drop the mutex unlock references(from cond wait or mutex_unlock().
+ * mgenp and ugenp valid only if notifyp is set
+ *
+ */
+__private_extern__ int
+__mtx_droplock(npthread_mutex_t * mutex, int count, uint32_t * flagp, uint32_t ** pmtxp, uint32_t * mgenp, uint32_t * ugenp, uint32_t *notifyp)
+{
+ int oldval, newval, lockval, unlockval;
+ uint64_t oldtid;
+ pthread_t self = pthread_self();
+ uint32_t notify = 0;
+ uint64_t oldval64, newval64;
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+ int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_START, (uint32_t)mutex, count, 0, 0, 0);
+#endif
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+
+ if (flagp != NULL)
+ *flagp = mutex->mtxopts.value;
+
+ if (firstfit != 0)
+ notify |= 0x80000000;
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED)
+ notify |= 0x40000000;
+
+ if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL)
+ {
+ if (mutex->m_tid != (uint64_t)((uintptr_t)self))
+ {
+ PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, EPERM);
+ return(EPERM);
+ } else if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE &&
+ --mutex->mtxopts.options.lock_count)
+ {
+ PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
+ goto out;
+ }
+ }
+
+
+ if (mutex->m_tid != (uint64_t)((uintptr_t)self))
+ return(EINVAL);
+
+
+ml0:
+ oldval = *useqaddr;
+ unlockval = oldval + (PTHRW_INC * count);
+ lockval = *lseqaddr;
+
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_NONE, (uint32_t)mutex, 10, lockval, oldval, 0);
+#endif
+#if 1
+ if (lockval == oldval)
+ LIBC_ABORT("same unlock and lockseq \n");
+#endif
+
+ if ((lockval & PTHRW_COUNT_MASK) == unlockval) {
+ oldtid = mutex->m_tid;
+
+ mutex->m_tid = 0;
+
+ oldval64 = (((uint64_t)oldval) << 32);
+ oldval64 |= lockval;
+
+ newval64 = 0;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) == TRUE) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_NONE, (uint32_t)mutex, 1, 0, 0, 0);
+#endif
+ goto out;
+ } else {
+ mutex->m_tid = oldtid;
+ /* fall thru for kernel call */
+ goto ml0;
+ }
+ }
+
+ if (firstfit != 0) {
+ /* reset ebit along with unlock */
+ newval = (lockval & ~PTHRW_EBIT);
+
+ lockval = newval;
+ oldval64 = (((uint64_t)oldval) << 32);
+ oldval64 |= lockval;
+
+ newval64 = (((uint64_t)unlockval) << 32);
+ newval64 |= newval;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE) {
+ goto ml0;
+ }
+ lockval = newval;
+ } else {
+ /* fairshare , just update and go to kernel */
+ if (OSAtomicCompareAndSwap32(oldval, unlockval, (volatile int32_t *)useqaddr) != TRUE)
+ goto ml0;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_NONE, (uint32_t)mutex, 2, oldval, unlockval, 0);
+#endif
+ }
+
+ notify |= 1;
+
+ if (notifyp != 0) {
+ if (mgenp != NULL)
+ *mgenp = lockval;
+ if (ugenp != NULL)
+ *ugenp = unlockval;
+ if (pmtxp != NULL)
+ *pmtxp = lseqaddr;
+ *notifyp = notify;
+ }
+out:
+ if (notifyp != 0) {
+ *notifyp = notify;
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
+#endif
+ return(0);
+}
+
+int
+__mtx_updatebits(npthread_mutex_t *mutex, uint32_t oupdateval, int firstfit, int fromcond)
+{
+ uint32_t lgenval, newval, bits;
+ int isebit = 0;
+ uint32_t updateval = oupdateval;
+ pthread_mutex_t * omutex = (pthread_mutex_t *)mutex;
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_START, (uint32_t)mutex, oupdateval, firstfit, fromcond, 0);
+#endif
+
+retry:
+ lgenval = *lseqaddr;
+ bits = updateval & PTHRW_BIT_MASK;
+
+ if (lgenval == updateval)
+ goto out;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 1, lgenval, updateval, 0);
+#endif
+ if ((lgenval & PTHRW_BIT_MASK) == bits)
+ goto out;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 2, lgenval, bits, 0);
+#endif
+ /* firsfit might not have EBIT */
+ if (firstfit != 0) {
+ lgenval &= ~PTHRW_EBIT; /* see whether EBIT is set */
+ if ((lgenval & PTHRW_EBIT) != 0)
+ isebit = 1;
+ }
+
+ if ((lgenval & PTHRW_COUNT_MASK) == (updateval & PTHRW_COUNT_MASK)) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 3, lgenval, updateval, 0);
+#endif
+ updateval |= PTHRW_EBIT; /* just in case.. */
+ if (OSAtomicCompareAndSwap32(lgenval, updateval, (volatile int32_t *)lseqaddr) != TRUE) {
+ if (firstfit == 0)
+ goto retry;
+ goto handleffit;
+ }
+ /* update succesfully */
+ goto out;
+ }
+
+
+ if (((lgenval & PTHRW_WBIT) != 0) && ((updateval & PTHRW_WBIT) == 0)) {
+ newval = lgenval | (bits | PTHRW_WBIT | PTHRW_EBIT);
+ } else {
+ newval = lgenval | (bits | PTHRW_EBIT);
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 4, lgenval, newval, 0);
+#endif
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE) {
+ if (firstfit == 0)
+ goto retry;
+ goto handleffit;
+ }
+out:
+ /* succesful bits updation */
+ mutex->m_tid = (uint64_t)((uintptr_t)pthread_self());
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
+#endif
+ return(0);
+
+handleffit:
+ /* firstfit failure */
+ newval = *lseqaddr;
+ if ((newval & PTHRW_EBIT) == 0)
+ goto retry;
+ if (((lgenval & PTHRW_COUNT_MASK) == (newval & PTHRW_COUNT_MASK)) && (isebit == 1)) {
+ if (fromcond == 0)
+ return(1);
+ else {
+ /* called from condition variable code block again */
+ml1:
+#if USE_COMPAGE /* [ */
+ updateval = __psynch_mutexwait((pthread_mutex_t *)lseqaddr, newval | PTHRW_RETRYBIT, *useqaddr, (uint64_t)0,
+ mutex->mtxopts.value);
+#else /* USECOMPAGE ][ */
+ updateval = __psynch_mutexwait(omutex, newval | PTHRW_RETRYBIT, *useqaddr, (uint64_t)0,
+#endif /* USE_COMPAGE ] */
+ if (updateval == (uint32_t)-1) {
+ goto ml1;
+ }
+
+ goto retry;
+ }
+ }
+ /* seqcount changed, retry */
+ goto retry;
+}
+
+int
+_new_pthread_mutex_lock(pthread_mutex_t *omutex)
+{
+ pthread_t self;
+ npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
+ int sig = mutex->sig;
+ int retval;
+ uint32_t oldval, newval, uval, updateval;
+ int gotlock = 0;
+ int firstfit = 0;
+ int retrybit = 0;
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+ int updatebitsonly = 0;
+#if USE_COMPAGE
+ uint64_t mytid;
+ int sysret = 0;
+ uint32_t mask;
+#else
+
+#endif
+
+ /* To provide backwards compat for apps using mutex incorrectly */
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
+ PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
+ return(EINVAL);
+ }
+ if (sig != _PTHREAD_MUTEX_SIG) {
+ LOCK(mutex->lock);
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig == _PTHREAD_MUTEX_SIG_init)) {
+ /* static initializer, init the mutex */
+ _new_pthread_mutex_init(omutex, NULL);
+ self = _PTHREAD_MUTEX_OWNER_SELF;
+ } else {
+ UNLOCK(mutex->lock);
+ PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
+ return(EINVAL);
+ }
+ UNLOCK(mutex->lock);
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_START, (uint32_t)mutex, 0, 0, 0, 0);
+#endif
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+
+ self = pthread_self();
+ if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
+ if (mutex->m_tid == (uint64_t)((uintptr_t)self)) {
+ if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
+ {
+ if (mutex->mtxopts.options.lock_count < USHRT_MAX)
+ {
+ mutex->mtxopts.options.lock_count++;
+ PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
+ retval = 0;
+ } else {
+ retval = EAGAIN;
+ PLOCKSTAT_MUTEX_ERROR(omutex, retval);
+ }
+ } else { /* PTHREAD_MUTEX_ERRORCHECK */
+ retval = EDEADLK;
+ PLOCKSTAT_MUTEX_ERROR(omutex, retval);
+ }
+ return (retval);
+ }
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 1, 0, 0, 0);
+#endif
+loop:
+#if USE_COMPAGE /* [ */
+
+ mytid = (uint64_t)((uintptr_t)pthread_self());
+
+ml0:
+ mask = PTHRW_EBIT;
+ retval = _commpage_pthread_mutex_lock(lseqaddr, mutex->mtxopts.value, mytid, mask, &mutex->m_tid, &sysret);
+ if (retval == 0) {
+ gotlock = 1;
+ } else if (retval == 1) {
+ gotlock = 1;
+ updateval = sysret;
+ /* returns 0 on succesful update */
+ if (__mtx_updatebits( mutex, updateval, firstfit, 0) == 1) {
+ /* could not acquire, may be locked in ffit case */
+#if USE_COMPAGE
+ LIBC_ABORT("comapge implementatin looping in libc \n");
+#endif
+ goto ml0;
+ }
+ }
+#if NEVERINCOMPAGE
+ else if (retval == 3) {
+ cthread_set_errno_self(sysret);
+ oldval = *lseqaddr;
+ uval = *useqaddr;
+ newval = oldval + PTHRW_INC;
+ gotlock = 0;
+ /* to block in the kerenl again */
+ }
+#endif
+ else {
+ LIBC_ABORT("comapge implementatin bombed \n");
+ }
+
+
+#else /* USECOMPAGE ][ */
+ oldval = *lseqaddr;
+ uval = *useqaddr;
+ newval = oldval + PTHRW_INC;
+
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 2, oldval, uval, 0);
+
+ if((oldval & PTHRW_EBIT) == 0) {
+ gotlock = 1;
+ newval |= PTHRW_EBIT;
+ } else {
+ gotlock = 0;
+ newval |= PTHRW_WBIT;
+ }
+
+ if (OSAtomicCompareAndSwap32(oldval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+ if (gotlock != 0)
+ mutex->m_tid = (uint64_t)((uintptr_t)self);
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 2, oldval, newval, 0);
+#endif
+ } else
+ goto loop;
+
+
+ retrybit = 0;
+ if (gotlock == 0) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 3, 0, 0, 0);
+#endif
+ firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
+ml1:
+ updateval = __psynch_mutexwait(omutex, newval | retrybit, uval, (uint64_t)0,
+ mutex->mtxopts.value);
+
+ if (updateval == (uint32_t)-1) {
+ updatebitsonly = 0;
+ goto ml1;
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 4, updateval, 0, 0);
+#endif
+ /* returns 0 on succesful update */
+ if (__mtx_updatebits( mutex, updateval, firstfit, 0) == 1) {
+ /* could not acquire, may be locked in ffit case */
+ retrybit = PTHRW_RETRYBIT;
+#if USE_COMPAGE
+ LIBC_ABORT("comapge implementatin looping in libc \n");
+
+#endif
+ goto ml1;
+ }
+ }
+#endif /* USE_COMPAGE ] */
+
+ if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
+ mutex->mtxopts.options.lock_count++;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
+#endif
+ return (0);
+}
+
+/*
+ * Attempt to lock a mutex, but don't block if this isn't possible.
+ */
+int
+_new_pthread_mutex_trylock(pthread_mutex_t *omutex)
+{
+ npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
+ int sig = mutex->sig;
+ uint32_t oldval, newval;
+ int error = 0;
+ pthread_t self;
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+
+ /* To provide backwards compat for apps using mutex incorrectly */
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
+ PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
+ return(EINVAL);
+ }
+
+ if (sig != _PTHREAD_MUTEX_SIG) {
+ LOCK(mutex->lock);
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig == _PTHREAD_MUTEX_SIG_init)) {
+ /* static initializer, init the mutex */
+ _new_pthread_mutex_init(omutex, NULL);
+ self = _PTHREAD_MUTEX_OWNER_SELF;
+ } else {
+ UNLOCK(mutex->lock);
+ PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
+ return(EINVAL);
+ }
+ UNLOCK(mutex->lock);
+ }
+
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+
+ self = pthread_self();
+ if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
+ if (mutex->m_tid == (uint64_t)((uintptr_t)self)) {
+ if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
+ {
+ if (mutex->mtxopts.options.lock_count < USHRT_MAX)
+ {
+ mutex->mtxopts.options.lock_count++;
+ PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
+ error = 0;
+ } else {
+ error = EAGAIN;
+ PLOCKSTAT_MUTEX_ERROR(omutex, error);
+ }
+ } else { /* PTHREAD_MUTEX_ERRORCHECK */
+ error = EDEADLK;
+ PLOCKSTAT_MUTEX_ERROR(omutex, error);
+ }
+ return (error);
+ }
+ }
+retry:
+ oldval = *lseqaddr;
+
+ if ((oldval & PTHRW_EBIT) != 0) {
+ newval = oldval | PTHRW_TRYLKBIT;
+ if (OSAtomicCompareAndSwap32(oldval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+ error = EBUSY;
+ } else
+ goto retry;
+ } else {
+ newval = (oldval + PTHRW_INC)| PTHRW_EBIT;
+ if ((OSAtomicCompareAndSwap32(oldval, newval, (volatile int32_t *)lseqaddr) == TRUE)) {
+ mutex->m_tid = (uint64_t)((uintptr_t)self);
+ if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
+ mutex->mtxopts.options.lock_count++;
+ } else
+ goto retry;
+ }
+
+ return(error);
+}
+
+/*
+ * Unlock a mutex.
+ * TODO: Priority inheritance stuff
+ */
+int
+_new_pthread_mutex_unlock(pthread_mutex_t *omutex)
+{
+ npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
+ int retval;
+ uint32_t mtxgen, mtxugen, flags, notify;
+ int sig = mutex->sig;
+ pthread_t self = pthread_self();
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+
+ /* To provide backwards compat for apps using mutex incorrectly */
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_START, (uint32_t)mutex, 0, 0, 0, 0);
+#endif
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
+ PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
+ return(EINVAL);
+ }
+ if (sig != _PTHREAD_MUTEX_SIG) {
+ LOCK(mutex->lock);
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig == _PTHREAD_MUTEX_SIG_init)) {
+ /* static initializer, init the mutex */
+ _new_pthread_mutex_init(omutex, NULL);
+ self = _PTHREAD_MUTEX_OWNER_SELF;
+ } else {
+ UNLOCK(mutex->lock);
+ PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
+ return(EINVAL);
+ }
+ UNLOCK(mutex->lock);
+ }
+
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+ notify = 0;
+ retval = __mtx_droplock(mutex, 1, &flags, NULL, &mtxgen, &mtxugen, ¬ify);
+ if (retval != 0)
+ return(retval);
+
+ if ((notify & 1) != 0) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_NONE, (uint32_t)mutex, 1, 0, 0, 0);
+#endif
+#if USE_COMPAGE /* [ */
+ if ( __psynch_mutexdrop((pthread_mutex_t *)lseqaddr, mtxgen, mtxugen, (uint64_t)0, flags)== (uint32_t)-1)
+#else /* USECOMPAGE ][ */
+ if ( __psynch_mutexdrop(omutex, mtxgen, mtxugen, (uint64_t)0, flags)== (uint32_t)-1)
+#endif /* USE_COMPAGE ] */
+ {
+ if (errno == EINTR)
+ return(0);
+ else
+ return(errno);
+ }
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
+#endif
+ return(0);
+}
+
+
+/*
+ * Initialize a mutex variable, possibly with additional attributes.
+ */
+int
+_new_pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr)
+{
+ npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
+
+ if (attr)
+ {
+ if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG)
+ return (EINVAL);
+ mutex->prioceiling = attr->prioceiling;
+ mutex->mtxopts.options.protocol = attr->protocol;
+ mutex->mtxopts.options.policy = attr->policy;
+ mutex->mtxopts.options.type = attr->type;
+ mutex->mtxopts.options.pshared = attr->pshared;
+ } else {
+ mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
+ mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
+ mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
+ mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
+ mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
+ }
+
+ mutex->mtxopts.options.lock_count = 0;
+ /* address 8byte aligned? */
+ if (((uintptr_t)mutex & 0x07) != 0) {
+ /* 4byte alinged */
+ mutex->mtxopts.options.misalign = 1;
+#if defined(__LP64__)
+ mutex->m_lseqaddr = &mutex->m_seq[0];
+ mutex->m_useqaddr = &mutex->m_seq[1];
+#else /* __LP64__ */
+ mutex->m_lseqaddr = &mutex->m_seq[1];
+ mutex->m_useqaddr = &mutex->m_seq[2];
+#endif /* __LP64__ */
+ } else {
+ /* 8byte alinged */
+ mutex->mtxopts.options.misalign = 0;
+#if defined(__LP64__)
+ mutex->m_lseqaddr = &mutex->m_seq[1];
+ mutex->m_useqaddr = &mutex->m_seq[2];
+#else /* __LP64__ */
+ mutex->m_lseqaddr = &mutex->m_seq[0];
+ mutex->m_useqaddr = &mutex->m_seq[1];
+#endif /* __LP64__ */
+ }
+ mutex->m_tid = 0;
+ mutex->m_seq[0] = 0;
+ mutex->m_seq[1] = 0;
+ mutex->m_seq[2] = 0;
+ mutex->prioceiling = 0;
+ mutex->priority = 0;
+ mutex->sig = _PTHREAD_MUTEX_SIG;
+ return (0);
+}
+
+
+
+/*
+ * Destroy a mutex variable.
+ */
+int
+_new_pthread_mutex_destroy(pthread_mutex_t *omutex)
+{
+ int res;
+ npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
+
+ LOCK(mutex->lock);
+ res = _new_pthread_mutex_destroy_locked(omutex);
+ UNLOCK(mutex->lock);
+
+ return(res);
+}
+
+
+int
+_new_pthread_mutex_destroy_locked(pthread_mutex_t *omutex)
+{
+ int res;
+ npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
+ uint32_t lgenval;
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+
+
+ if (mutex->sig == _PTHREAD_MUTEX_SIG)
+ {
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+
+ lgenval = *(lseqaddr);
+ if ((mutex->m_tid == (uint64_t)0) &&
+ ((lgenval & PTHRW_COUNT_MASK) == 0))
+ {
+ mutex->sig = _PTHREAD_NO_SIG;
+ res = 0;
+ }
+ else
+ res = EBUSY;
+ } else
+ res = EINVAL;
+
+ return (res);
+}
+
+#endif /* __i386__ || __x86_64__ */
#endif /* !BUILDING_VARIANT ] */
.Bl -tag -width Er
.It Bq Er EBUSY
.Fa Mutex
-is locked by another thread.
+is locked by a thread.
.It Bq Er EINVAL
The value specified by
.Fa mutex
.Fn pthread_mutex_unlock
function unlocks
.Fa mutex .
+.Pp
+Calling
+.Fn pthread_mutex_unlock
+with a
+.Fa mutex
+that the calling thread does not hold will result
+in undefined behavior.
.Sh RETURN VALUES
If successful,
.Fn pthread_mutex_unlock
.Fc
.Ft int
.Fo pthread_mutexattr_getprioceiling
-.Fa "const pthread_mutexattr_t *attr"
-.Fa "int *prioceiling"
+.Fa "const pthread_mutexattr_t *restrict attr"
+.Fa "int *restrict prioceiling"
.Fc
.\" To match the SUS, this should be:
.\" .Ft int
.\" .Fc
.Ft int
.Fo pthread_mutexattr_getprotocol
-.Fa "const pthread_mutexattr_t *attr"
-.Fa "int *protocol"
+.Fa "const pthread_mutexattr_t *restrict attr"
+.Fa "int *restrict protocol"
.Fc
.\" To match the SUS, this should be:
.\" .Ft int
.\" .Fc
.Ft int
.Fo pthread_mutexattr_gettype
-.Fa "const pthread_mutexattr_t *attr"
-.Fa "int *type"
+.Fa "const pthread_mutexattr_t *restrict attr"
+.Fa "int *restrict type"
.Fc
.\" To match the SUS, this should be:
.\" .Ft int
and
.Dv PTHREAD_MUTEX_DEFAULT .
The default mutex type for
-.Fn pthread_mutexaddr_init
+.Fn pthread_mutexattr_init
is
.Dv PTHREAD_MUTEX_DEFAULT .
.Pp
*/
#include "pthread_internals.h"
+#include <stdio.h> /* For printf(). */
+
extern int __unix_conforming;
-#ifdef PLOCKSTAT
-#include "plockstat.h"
-#else /* !PLOCKSTAT */
-#define PLOCKSTAT_RW_ERROR(x, y, z)
-#define PLOCKSTAT_RW_BLOCK(x, y)
-#define PLOCKSTAT_RW_BLOCKED(x, y, z)
-#define PLOCKSTAT_RW_ACQUIRE(x, y)
-#define PLOCKSTAT_RW_RELEASE(x, y)
-#endif /* PLOCKSTAT */
+#ifdef PLOCKSTAT
+#include "plockstat.h"
+#else /* !PLOCKSTAT */
+#define PLOCKSTAT_RW_ERROR(x, y, z)
+#define PLOCKSTAT_RW_BLOCK(x, y)
+#define PLOCKSTAT_RW_BLOCKED(x, y, z)
+#define PLOCKSTAT_RW_ACQUIRE(x, y)
+#define PLOCKSTAT_RW_RELEASE(x, y)
+#endif /* PLOCKSTAT */
+
+#define READ_LOCK_PLOCKSTAT 0
+#define WRITE_LOCK_PLOCKSTAT 1
+
+#define BLOCK_FAIL_PLOCKSTAT 0
+#define BLOCK_SUCCESS_PLOCKSTAT 1
+
+/* maximum number of times a read lock may be obtained */
+#define MAX_READ_LOCKS (INT_MAX - 1)
+
+#if defined(__i386__) || defined(__x86_64__)
+
+#ifndef BUILDING_VARIANT /* [ */
+int usenew_impl = 0;
+#else /* BUILDING_VARIANT */
+extern int usenew_impl;
+#endif /* BUILDING_VARIANT */
+
+
+#if defined(__LP64__)
+#define RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr) \
+{ \
+ if (rwlock->misalign != 0) { \
+ lseqaddr = &rwlock->rw_seq[1]; \
+ wcaddr = &rwlock->rw_seq[2]; \
+ useqaddr = &rwlock->rw_seq[3]; \
+ } else { \
+ lseqaddr = &rwlock->rw_seq[0]; \
+ wcaddr = &rwlock->rw_seq[1]; \
+ useqaddr = &rwlock->rw_seq[2]; \
+ } \
+}
+#else /* __LP64__ */
+#define RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr) \
+{ \
+ if (rwlock->misalign != 0) { \
+ lseqaddr = &rwlock->rw_seq[0]; \
+ wcaddr = &rwlock->rw_seq[1]; \
+ useqaddr = &rwlock->rw_seq[2]; \
+ }else { \
+ lseqaddr = &rwlock->rw_seq[1]; \
+ wcaddr = &rwlock->rw_seq[2]; \
+ useqaddr = &rwlock->rw_seq[3]; \
+ } \
+}
+#endif /* __LP64__ */
+
+int _new_pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
+int _new_pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
+int _new_pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
+int _new_pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
+int _new_pthread_rwlock_longrdlock_np(pthread_rwlock_t *rwlock);
+int _new_pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
+int _new_pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
+int _new_pthread_rwlock_yieldwrlock_np(pthread_rwlock_t *rwlock);
+int _new_pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
+int _new_pthread_rwlock_downgrade_np(pthread_rwlock_t *rwlock);
+int _new_pthread_rwlock_upgrade_np(pthread_rwlock_t *rwlock);
+
+#define _KSYN_TRACE_ 0
+
+#if _KSYN_TRACE_
+/* The Function qualifiers */
+#define DBG_FUNC_START 1
+#define DBG_FUNC_END 2
+#define DBG_FUNC_NONE 0
+
+int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
+
+#define _KSYN_TRACE_RW_RDLOCK 0x9000080
+#define _KSYN_TRACE_RW_WRLOCK 0x9000084
+#define _KSYN_TRACE_RW_UNLOCK 0x9000088
+#define _KSYN_TRACE_RW_UNACT1 0x900808c
+#define _KSYN_TRACE_RW_UNACT2 0x9008090
+#define _KSYN_TRACE_RW_UNACTK 0x9008094
+#define _KSYN_TRACE_RW_UNACTE 0x9008098
+#endif /* _KSYN_TRACE_ */
+#endif /* __i386__ || __x86_64__ */
+
+#ifndef BUILDING_VARIANT /* [ */
+
+#if defined(__i386__) || defined(__x86_64__)
+static int rwlock_unlock_action_onread(pthread_rwlock_t * rwlock, uint32_t updateval);
+static int rwlock_unlock_action1(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t updateval);
+static int rwlock_unlock_action2(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t updateval);
+static uint32_t modbits(uint32_t lgenval, uint32_t updateval);
+static int rwlock_unlock_action_k(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t updateval);
+static int rwlock_exclusive_lockreturn(pthread_rwlock_t * rwlock, uint32_t updateval);
+static int rw_diffgenseq(uint32_t x, uint32_t y);
+#endif /* __i386__ || __x86_64__ */
+
+
+int
+pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
+{
+ attr->sig = _PTHREAD_RWLOCK_ATTR_SIG;
+ attr->pshared = _PTHREAD_DEFAULT_PSHARED;
+ return (0);
+}
+
+int
+pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
+{
+ attr->sig = _PTHREAD_NO_SIG; /* Uninitialized */
+ attr->pshared = 0;
+ return (0);
+}
+
+int
+pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr,
+ int *pshared)
+{
+ if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG)
+ {
+ *pshared = (int)attr->pshared;
+ return (0);
+ } else
+ {
+ return (EINVAL); /* Not an initialized 'attribute' structure */
+ }
+}
+
+
+int
+pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr, int pshared)
+{
+ if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG)
+ {
+#if __DARWIN_UNIX03
+ if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
+#else /* __DARWIN_UNIX03 */
+ if ( pshared == PTHREAD_PROCESS_PRIVATE)
+#endif /* __DARWIN_UNIX03 */
+ {
+ attr->pshared = pshared ;
+ return (0);
+ } else
+ {
+ return (EINVAL); /* Invalid parameter */
+ }
+ } else
+ {
+ return (EINVAL); /* Not an initialized 'attribute' structure */
+ }
+
+}
+
+#if defined(__i386__) || defined(__x86_64__) /* [ */
+int
+_new_pthread_rwlock_destroy(pthread_rwlock_t *orwlock)
+{
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+#if __DARWIN_UNIX03
+ uint32_t rw_lseqcnt, rw_useqcnt;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+#endif /* __DARWIN_UNIX03 */
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ return(EINVAL);
+ } else {
+#if __DARWIN_UNIX03
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+ rw_lseqcnt = *lseqaddr;
+ rw_useqcnt = *useqaddr;
+
+ if((rw_lseqcnt & PTHRW_COUNT_MASK) != rw_useqcnt)
+ return(EBUSY);
+
+#endif /* __DARWIN_UNIX03 */
+ //bzero(rwlock, sizeof(npthread_rwlock_t));
+ rwlock->sig = _PTHREAD_NO_SIG;
+ return(0);
+ }
+}
+
+
+int
+_new_pthread_rwlock_init(pthread_rwlock_t * orwlock, const pthread_rwlockattr_t *attr)
+{
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+#if __DARWIN_UNIX03
+ uint32_t rw_lseqcnt, rw_useqcnt;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+#endif /* __DARWIN_UNIX03 */
+
+#if __DARWIN_UNIX03
+ if (attr && (attr->sig != _PTHREAD_RWLOCK_ATTR_SIG)) {
+ return(EINVAL);
+ }
+
+ /* if already inited check whether it is in use, then return EBUSY */
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG) {
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+ rw_lseqcnt = *lseqaddr;
+ rw_useqcnt = *useqaddr;
+
+ if ((rw_lseqcnt & PTHRW_COUNT_MASK) != rw_useqcnt)
+ return(EBUSY);
+
+ }
+#endif /* __DARWIN_UNIX03 */
+
+ /* initialize the lock */
+ bzero(rwlock, sizeof(pthread_rwlock_t));
+
+ if ((attr != NULL) && (attr->pshared == PTHREAD_PROCESS_SHARED)) {
+ rwlock->pshared = PTHREAD_PROCESS_SHARED;
+ rwlock->rw_flags = PTHRW_KERN_PROCESS_SHARED;
+
+ } else {
+ rwlock->pshared = _PTHREAD_DEFAULT_PSHARED;
+ rwlock->rw_flags = PTHRW_KERN_PROCESS_PRIVATE;
+ }
+
+ if (((uintptr_t)rwlock & 0x07) != 0) {
+ rwlock->misalign = 1;
+#if defined(__LP64__)
+ rwlock->rw_lseqaddr = &rwlock->rw_seq[1];
+ rwlock->rw_wcaddr = &rwlock->rw_seq[2];
+ rwlock->rw_useqaddr = &rwlock->rw_seq[3];
+ rwlock->rw_seq[1]= PTHRW_RW_INIT;
+#else /* __LP64__ */
+ rwlock->rw_lseqaddr = &rwlock->rw_seq[0];
+ rwlock->rw_wcaddr = &rwlock->rw_seq[1];
+ rwlock->rw_useqaddr = &rwlock->rw_seq[2];
+ rwlock->rw_seq[0]= PTHRW_RW_INIT;
+#endif /* __LP64__ */
+
+ } else {
+ rwlock->misalign = 0;
+#if defined(__LP64__)
+ rwlock->rw_lseqaddr = &rwlock->rw_seq[0];
+ rwlock->rw_wcaddr = &rwlock->rw_seq[1];
+ rwlock->rw_useqaddr = &rwlock->rw_seq[2];
+ rwlock->rw_seq[0]= PTHRW_RW_INIT;
+#else /* __LP64__ */
+ rwlock->rw_lseqaddr = &rwlock->rw_seq[1];
+ rwlock->rw_wcaddr = &rwlock->rw_seq[2];
+ rwlock->rw_useqaddr = &rwlock->rw_seq[3];
+ rwlock->rw_seq[1]= PTHRW_RW_INIT;
+#endif /* __LP64__ */
+
+ }
+ rwlock->sig = _PTHREAD_RWLOCK_SIG;
+
+ return(0);
+}
+
+int
+_new_pthread_rwlock_rdlock(pthread_rwlock_t * orwlock)
+{
+#if __DARWIN_UNIX03
+ pthread_t self;
+#endif /* __DARWIN_UNIX03 */
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint32_t lgenval, ugenval, rw_wc, newval, updateval;
+ int error = 0, ret;
+ uint64_t oldval64, newval64;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
+ return(error);
+ }
+ } else {
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
+ return(EINVAL);
+ }
+ }
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+loop:
+ lgenval = *lseqaddr;
+ ugenval = *useqaddr;
+ rw_wc = *wcaddr;
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgenval, newval, rw_wc, 0);
+#endif
+
+ if (is_rw_lbit_set(lgenval))
+ goto gotlock;
+ if(is_rw_ewubit_clear(lgenval))
+ goto gotlock;
+
+#if __DARWIN_UNIX03
+ if (is_rw_ebit_set(lgenval)) {
+ self = pthread_self();
+ if(rwlock->rw_owner == self) {
+ error = EDEADLK;
+ goto out;
+ }
+ }
+#endif /* __DARWIN_UNIX03 */
+
+ /* mean Lbit is set and R bit not set; block in kernel */
+ newval = (lgenval + PTHRW_INC);
+
+ oldval64 = (((uint64_t)rw_wc) << 32);
+ oldval64 |= lgenval;
+
+ newval64 = (((uint64_t)(rw_wc + 1)) << 32);
+ newval64 |= newval;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
+ goto loop;
+
+ /* give writers priority over readers */
+ PLOCKSTAT_RW_BLOCK(orwlock, READ_LOCK_PLOCKSTAT);
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, lgenval, newval, rw_wc+1, 0);
+#endif
+
+retry:
+ updateval = __psynch_rw_rdlock(orwlock, (newval & ~PTHRW_RW_INIT), ugenval, rw_wc, rwlock->rw_flags);
+
+ if (updateval == (uint32_t)-1) {
+ error = errno;
+ } else
+ error = 0;
+
+ if (error == EINTR)
+ goto retry;
+
+ OSAtomicDecrement32((volatile int32_t *)wcaddr);
+
+
+
+ if (error == 0) {
+ if ((updateval & PTHRW_RW_HUNLOCK) != 0) {
+ ret = rwlock_unlock_action_onread(orwlock, (updateval & ~PTHRW_RW_HUNLOCK));
+ if (ret != 0) {
+ LIBC_ABORT("rdlock_unlock handling failed");
+ }
+ }
+ PLOCKSTAT_RW_BLOCKED(orwlock, READ_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
+ PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
+ return(0);
+ } else {
+ PLOCKSTAT_RW_BLOCKED(orwlock, READ_LOCK_PLOCKSTAT, BLOCK_FAIL_PLOCKSTAT);
+ goto out;
+ }
+ /* Not reached */
+
+gotlock:
+ /* check for max readers */
+ ugenval = *useqaddr;
+ if (rw_diffgenseq(lgenval, ugenval) >= PTHRW_MAX_READERS) {
+ error = EAGAIN;
+ goto out;
+ }
+
+ newval = (lgenval + PTHRW_INC);
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lgenval, newval, 0);
+#endif
+
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+ PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, 0, 0, 0);
+#endif
+ return(0);
+ } else
+ goto loop;
+out:
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
+#endif
+ return(error);
+}
+
+
+int
+_new_pthread_rwlock_tryrdlock(pthread_rwlock_t * orwlock)
+{
+ uint32_t lgenval, newval, ugenval;
+ int error = 0;
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ /* check for static initialization */
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
+ return(error);
+ }
+ } else {
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
+ return(EINVAL);
+ }
+ }
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+loop:
+ lgenval = *lseqaddr;
+ if (is_rw_lbit_set(lgenval))
+ goto gotlock;
+ if (is_rw_ewubit_clear(lgenval))
+ goto gotlock;
+
+
+ error = EBUSY;
+ goto out;
+
+gotlock:
+ ugenval = *useqaddr;
+ if (rw_diffgenseq(lgenval, ugenval) >= PTHRW_MAX_READERS) {
+ error = EAGAIN;
+ goto out;
+ }
+
+ newval = (lgenval + PTHRW_INC);
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+ PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
+ return(0);
+ } else
+ goto loop;
+out:
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
+ return(error);
+}
+
+#ifdef NOTYET
+/*****************************************************************************/
+/* TBD need to add towards MAX_READERS */
+int
+_new_pthread_rwlock_longrdlock_np(pthread_rwlock_t * orwlock)
+{
+ pthread_t self;
+ uint32_t lgenval, ugenval, rw_wc, newval, updateval;
+ int error = 0, ret;
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint64_t oldval64, newval64;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
+ return(error);
+ }
+ } else {
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
+ return(EINVAL);
+ }
+ }
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+loop:
+
+ lgenval = *lseqaddr;
+ ugenval = *useqaddr;
+ rw_wc = *wcaddr;
+
+ if (is_rw_ewuybit_clear(lgenval))
+ goto gotlock;
+
+ /* if w bit is set ensure there is no deadlock */
+ if (is_rw_ebit_set(lgenval)) {
+ self = pthread_self();
+ if(rwlock->rw_owner == self) {
+ error = EDEADLK;
+ goto out;
+ }
+ }
+
+ newval = (lgenval + PTHRW_INC);
+ /* update lock seq and block in kernel */
+
+ oldval64 = (((uint64_t)rw_wc) << 32);
+ oldval64 |= lgenval;
+
+ newval64 = (((uint64_t)(rw_wc + 1)) << 32);
+ newval64 |= newval;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
+ goto loop;
+kblock:
+ updateval = __psynch_rw_longrdlock(orwlock, newval, ugenval, (rw_wc+1), rwlock->rw_flags);
+ if (updateval == (uint32_t)-1) {
+ error = errno;
+ } else
+ error = 0;
+
+ if (error == EINTR)
+ goto kblock;
+
+ OSAtomicDecrement32((volatile int32_t *)wcaddr);
+ if (error == 0) {
+
+ if ((updateval & PTHRW_RW_HUNLOCK) != 0) {
+ ret = rwlock_unlock_action_onread(orwlock, (updateval & ~PTHRW_RW_HUNLOCK));
+ if (ret != 0) {
+ LIBC_ABORT("rdlock_unlock handling failed");
+ }
+ }
+
+ error = FALSE;
+ while (error == FALSE) {
+ lgenval = *lseqaddr;
+ newval = lgenval | PTHRW_LBIT;
+ error = OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr);
+ }
+
+ goto successout;
+ } else
+ goto out;
+ goto successout;
+
+gotlock:
+ newval = ((lgenval + PTHRW_INC)| PTHRW_LBIT);
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE)
+ goto loop;
+
+successout:
+ PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
+ return(0);
+out:
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
+ return(error);
+}
+/**************************************************************/
+#endif /* NOTYET */
+
+int
+_new_pthread_rwlock_trywrlock(pthread_rwlock_t * orwlock)
+{
+ int error = 0;
+ uint32_t lgenval, newval;
+#if __DARWIN_UNIX03
+ pthread_t self = pthread_self();
+#endif /* __DARWIN_UNIX03 */
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ /* check for static initialization */
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
+ return(error);
+ }
+ } else {
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
+ return(EINVAL);
+ }
+ }
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+ lgenval = PTHRW_RW_INIT;
+ newval = PTHRW_RW_INIT | PTHRW_INC | PTHRW_EBIT;
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+#if __DARWIN_UNIX03
+ rwlock->rw_owner = self;
+#endif /* __DARWIN_UNIX03 */
+ PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
+ return(0);
+ }
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EBUSY);
+ return(EBUSY);
+}
+
+int
+_new_pthread_rwlock_wrlock(pthread_rwlock_t * orwlock)
+{
+ uint32_t lgenval, newval, ugenval, updateval, rw_wc;
+ int error = 0;
+#if __DARWIN_UNIX03
+ pthread_t self = pthread_self();
+#endif /* __DARWIN_UNIX03 */
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint64_t oldval64, newval64;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ /* check for static initialization */
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
+ return(error);
+ }
+ } else {
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
+ return(EINVAL);
+ }
+ }
+
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
+#endif
+loop:
+ lgenval = *lseqaddr;
+ ugenval = *useqaddr;
+ rw_wc = *wcaddr;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, lgenval, ugenval, rw_wc, 0);
+#endif
+#if __DARWIN_UNIX03
+ if (is_rw_ebit_set(lgenval)) {
+ if(rwlock->rw_owner == self) {
+ error = EDEADLK;
+ goto out;
+ }
+ }
+#endif /* __DARWIN_UNIX03 */
+
+ if (lgenval == PTHRW_RW_INIT) {
+ newval = ( PTHRW_RW_INIT | PTHRW_INC | PTHRW_EBIT);
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+ goto gotit;
+ }
+ }
+
+ newval = (lgenval + PTHRW_INC) | PTHRW_WBIT | PTHRW_SHADOW_W;
+
+ /* update lock seq and block in kernel */
+ oldval64 = (((uint64_t)rw_wc) << 32);
+ oldval64 |= lgenval;
+
+ newval64 = (((uint64_t)(rw_wc + 1)) << 32);
+ newval64 |= newval;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lgenval, newval, 0);
+#endif
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
+ goto loop;
+
+retry:
+ PLOCKSTAT_RW_BLOCK(orwlock, WRITE_LOCK_PLOCKSTAT);
+retry1:
+ updateval = __psynch_rw_wrlock(orwlock, newval, ugenval, (rw_wc+1), rwlock->rw_flags);
+ if (updateval == (uint32_t)-1) {
+ error = errno;
+ } else
+ error = 0;
+
+ if (error == EINTR) {
+ goto retry1;
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x33333333, newval, updateval, 0);
+#endif
+ PLOCKSTAT_RW_BLOCKED(orwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
+ if (error != 0) {
+ OSAtomicDecrement32((volatile int32_t *)wcaddr);
+ goto out;
+ }
+
+ if (is_rw_ebit_clear(updateval)) {
+ /* kernel cannot wakeup without granting E bit */
+ abort();
+ }
+
+ error = rwlock_exclusive_lockreturn(orwlock, updateval);
+ if (error == EAGAIN)
+ goto retry;
+
+ OSAtomicDecrement32((volatile int32_t *)wcaddr);
+ if (error == 0) {
+gotit:
+#if __DARWIN_UNIX03
+ rwlock->rw_owner = self;
+#endif /* __DARWIN_UNIX03 */
+ PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
+#endif
+ return(0);
+ }
+out:
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
+#endif
+ return(error);
+}
+
+
+#ifdef NOTYET
+/*****************************************************************************/
+int
+_new_pthread_rwlock_yieldwrlock_np(pthread_rwlock_t * orwlock)
+{
+ uint32_t lgenval, newval, ugenval, updateval, rw_wc;
+ int error = 0;
+#if __DARWIN_UNIX03
+ pthread_t self = pthread_self();
+#endif /* __DARWIN_UNIX03 */
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint64_t oldval64, newval64;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ /* check for static initialization */
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
+ return(error);
+ }
+ } else {
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
+ return(EINVAL);
+ }
+ }
+
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+ lgenval = *lseqaddr;
+ ugenval = *useqaddr;
+ rw_wc = *wcaddr;
+
+#if __DARWIN_UNIX03
+ if (is_rw_ebit_set(lgenval)) {
+ if (rwlock->rw_owner == self) {
+ error = EDEADLK;
+ goto out;
+ }
+ }
+#endif /* __DARWIN_UNIX03 */
+
+ if (lgenval == PTHRW_RW_INIT) {
+ newval = PTHRW_RW_INIT | PTHRW_INC | PTHRW_EBIT;
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+ goto gotit;
+ }
+ }
+
+ newval = (lgenval + PTHRW_INC);
+ if ((lgenval & PTHRW_WBIT) == 0)
+ newval |= PTHRW_YBIT;
+
+ oldval64 = (((uint64_t)rw_wc) << 32);
+ oldval64 |= lgenval;
+
+ newval64 = (((uint64_t)(rw_wc + 1)) << 32);
+ newval64 |= newval;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
+ PLOCKSTAT_RW_BLOCK(orwlock, WRITE_LOCK_PLOCKSTAT);
+retry:
+ updateval = __psynch_rw_yieldwrlock(orwlock, newval, ugenval, (rw_wc+1), rwlock->rw_flags);
+ if (updateval == (uint32_t)-1) {
+ error = errno;
+ } else
+ error = 0;
+
+ if (error == EINTR)
+ goto retry;
+
+
+ PLOCKSTAT_RW_BLOCKED(orwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
+ if (error != 0) {
+ OSAtomicDecrement32((volatile int32_t *)wcaddr);
+ goto out;
+ }
+
+ if (is_rw_ebit_clear(updateval)) {
+ /* kernel cannot wakeup without granting E bit */
+ abort();
+ }
+
+ error = rwlock_exclusive_lockreturn(orwlock, updateval);
+ if (error == EAGAIN)
+ goto retry;
+
+ OSAtomicDecrement32((volatile int32_t *)wcaddr);
+ if (error == 0) {
+ gotit:
+#if __DARWIN_UNIX03
+ rwlock->rw_owner = self;
+#endif /* __DARWIN_UNIX03 */
+ PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
+ return(0);
+ } else {
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
+ }
+ return(error);
+out:
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
+ return(error);
+}
+/**************************************************************/
+#endif /* NOTYET */
+
+int
+_new_pthread_rwlock_unlock(pthread_rwlock_t * orwlock)
+{
+ uint32_t lgenval, ugenval, rw_wc, newval, nlval, ulval;
+ int error = 0;
+ int wrlock = 0, kern_trans;
+ uint32_t updateval, bits, newbits;
+ uint32_t isupgrade = 0;
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ int retry_count = 0, retry_count1 = 0;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+ pthread_t self = NULL;
+ uint64_t threadid = 0;
+ int ubitchanged = 0, initbitset = 0, num;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ /* check for static initialization */
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ PLOCKSTAT_RW_ERROR(orwlock, wrlock, error);
+ return(error);
+ }
+ } else {
+ PLOCKSTAT_RW_ERROR(orwlock, wrlock, EINVAL);
+ return(EINVAL);
+ }
+ }
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
+#endif
+loop:
+ lgenval = *lseqaddr;
+ ugenval = *useqaddr;
+ rw_wc = *wcaddr;
+
+
+loop1:
+ if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
+ retry_count++;
+ sched_yield();
+ if (retry_count < 1024)
+ goto loop;
+ error = EINVAL;
+ goto out;
+ }
+ retry_count = 0;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lgenval, ugenval, 0);
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, rw_wc, 0, 0);
+#endif
+ if (is_rw_ebit_set(lgenval)) {
+ wrlock = 1;
+#if __DARWIN_UNIX03
+ rwlock->rw_owner = (pthread_t)0;
+#endif /* __DARWIN_UNIX03 */
+ }
+
+ /* last unlock ? */
+ if((lgenval & PTHRW_COUNT_MASK) == (ugenval + PTHRW_INC)) {
+ if (OSAtomicCompareAndSwap32(ugenval, 0, (volatile int32_t *)useqaddr) != TRUE) {
+ goto loop;
+ }
+ if (OSAtomicCompareAndSwap32(lgenval, PTHRW_RW_INIT, (volatile int32_t *)lseqaddr) != TRUE) {
+ if (OSAtomicCompareAndSwap32(0, ugenval, (volatile int32_t *)useqaddr) != TRUE) {
+lp1:
+ ulval = *useqaddr;
+ nlval = ugenval+ulval;
+ if (OSAtomicCompareAndSwap32(ulval, nlval, (volatile int32_t *)useqaddr) != TRUE)
+ goto lp1;
+ }
+
+ goto loop;
+ }
+
+ goto succout;
+ }
+
+ /* do we need kernel trans? */
+
+lp11:
+ nlval = lgenval & PTHRW_COUNT_MASK;
+ if (ubitchanged == 0)
+ ulval = (ugenval + PTHRW_INC) & PTHRW_COUNT_MASK;
+ else
+ ulval = ugenval & PTHRW_COUNT_MASK;
+
+ num = rw_diffgenseq(nlval, ulval);
+ kern_trans = ( num == (rw_wc << PTHRW_COUNT_SHIFT));
+ /* if three more waiters than needed for kernel tras*/
+ if ((ubitchanged ==0) && (kern_trans == 0) && (num < (rw_wc << PTHRW_COUNT_SHIFT))) {
+ retry_count1++;
+ sched_yield();
+ if (retry_count1 < 1024)
+ goto loop;
+ }
+ retry_count1 = 0;
+
+ if (ubitchanged == 0) {
+ if (OSAtomicCompareAndSwap32(ugenval, ugenval+PTHRW_INC, (volatile int32_t *)useqaddr) != TRUE)
+ goto loop;
+ ubitchanged = 1;
+ }
+
+
+ if (kern_trans == 0) {
+ goto succout;
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 1, ugenval+PTHRW_INC, 0);
+#endif
+ initbitset = 0;
+ bits = lgenval & PTHRW_BIT_MASK;
+ newbits = bits;
+ /* if this is first unlock to kernel, notify kernel of init status */
+ if ((bits & PTHRW_RW_INIT) != 0) {
+ /* reset the initbit if present */
+ newbits &= ~PTHRW_RW_INIT;
+ initbitset = PTHRW_RW_INIT;
+ }
+ if (((bits & PTHRW_EBIT) != 0) && ((bits & PTHRW_WBIT) == 0)) {
+ /* reset E bit is no U bit is set */
+ newbits &= ~PTHRW_EBIT;
+ }
+ /* clear shadow bit, as W is going to be sent to kernel */
+ if ((bits & PTHRW_WBIT) != 0) {
+ newbits &= ~PTHRW_SHADOW_W;
+ }
+
+ /* reset L bit */
+ if (bits & PTHRW_LBIT)
+ newbits &= ~PTHRW_LBIT;
+ if (bits & PTHRW_UBIT) {
+ /* reset U and set E bit */
+ newbits &= ~PTHRW_LBIT;
+ newbits |= PTHRW_EBIT;
+ isupgrade = PTHRW_UBIT;
+ }
+
+ /* updates bits on the L */
+ newval = (lgenval & PTHRW_COUNT_MASK) | newbits;
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE) {
+ /* reread the value */
+ lgenval = *lseqaddr;
+ ugenval = *useqaddr;
+ rw_wc = *wcaddr;
+ /* since lgen changed check for trans again */
+ goto lp11;
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 2, newval, 0);
+#endif
+
+ /* send upgrade bit to kernel */
+ newval |= (isupgrade | initbitset);
+ updateval = __psynch_rw_unlock(orwlock, newval, ugenval+PTHRW_INC, rw_wc, rwlock->rw_flags);
+ if (updateval == (uint32_t)-1) {
+ error = errno;
+ } else
+ error = 0;
+
+ if(error != 0) {
+ /* not sure what is the scenario */
+ if(error != EINTR)
+ goto out;
+ }
+
+ /*
+ * If the unlock is spurious return. Also if the
+ * exclusive lock is being granted, let that thread
+ * manage the status bits, otherwise stale bits exclusive
+ * bit can be set, if that thread had already unlocked.
+ */
+ if ((updateval & (PTHRW_RW_SPURIOUS | PTHRW_EBIT)) != 0) {
+ goto succout;
+ }
+
+lp2:
+ lgenval = *lseqaddr;
+
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 3, lgenval, 0);
+#endif
+ /* if the kernel antcipated seq and one on the lock are same, set the one from kernel */
+ if ((lgenval & PTHRW_COUNT_MASK) == (updateval & PTHRW_COUNT_MASK)) {
+ if (OSAtomicCompareAndSwap32(lgenval, updateval, (volatile int32_t *)lseqaddr) != TRUE)
+ goto lp2;
+ goto succout;
+ }
+
+ /* state bits are same? */
+ if ((lgenval & PTHRW_BIT_MASK) == (updateval & PTHRW_BIT_MASK)) {
+ /* nothing to do */
+ goto succout;
+ }
+
+ newval = ((lgenval & PTHRW_UN_BIT_MASK) << PTHRW_COUNT_SHIFT) | (updateval & PTHRW_BIT_MASK);
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 4, newval, 0);
+#endif
+ /* high bits are state on the lock; lowbits are one kernel need to set */
+ switch (newval) {
+ /* W States */
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
+ //goto ktrans;
+ }
+ break;
+
+
+ /* L states */
+ case ((PTHRW_LBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+
+ /* Y states */
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
+ //goto ktrans;
+ }
+ break;
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
+ //goto ktrans;
+ }
+ break;
+
+ /* YU states */
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
+ //goto ktrans;
+ }
+ break;
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
+ //goto ktrans;
+ }
+ break;
+
+ /* E states */
+ case ((PTHRW_EBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+
+ /* WE states */
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+
+ /* WL states */
+ case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+
+ default:
+ /* illegal states */
+ self = pthread_self();
+ threadid = self->thread_id;
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 6, lgenval, 0);
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 7, updateval, 0);
+#endif
+ LIBC_ABORT("incorect state on return 0x%x: lgenval 0x%x, updateval 0x%x; threadid (0x%x)\n", newval, lgenval, updateval, (uint32_t)threadid);
+
+ };
+
+ if (error != 0)
+ goto lp2;
+succout:
+ PLOCKSTAT_RW_RELEASE(orwlock, wrlock);
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
+#endif
+ return(0);
+out:
+ PLOCKSTAT_RW_ERROR(orwlock, wrlock, error);
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
+#endif
+ return(error);
+}
+
+#ifdef NOTYET
+/*****************************************************************************/
+int
+_new_pthread_rwlock_downgrade_np(pthread_rwlock_t * orwlock)
+{
+ uint32_t lgenval, newval, ugenval, rw_wc;
+ int error = 0;
+ pthread_t self = pthread_self();
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ /* check for static initialization */
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ return(error);
+ }
+ } else {
+ return(EINVAL);
+ }
+ }
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+loop:
+ lgenval = *lseqaddr;
+ ugenval = *useqaddr;
+ rw_wc = *wcaddr;
+
+ if ((is_rw_ebit_set(lgenval )) && (rwlock->rw_owner != self)) {
+ return(EINVAL);
+ }
+
+ if ((lgenval & PTHRW_COUNT_MASK) != ugenval) {
+
+ newval = lgenval & ~PTHRW_EBIT;
+
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+#if __DARWIN_UNIX03
+ rwlock->rw_owner = 0;
+#endif /* __DARWIN_UNIX03 */
+ if (rw_wc != 0) {
+ error = __psynch_rw_downgrade(orwlock, newval, ugenval, rw_wc, rwlock->rw_flags);
+
+ }
+ return(0);
+ } else {
+ goto loop;
+ }
+ }
+ return(EINVAL);
+}
+
+
+int
+_new_pthread_rwlock_upgrade_np(pthread_rwlock_t * orwlock)
+{
+ uint32_t lgenval, newval, ugenval, ulval, updateval, rw_wc;
+ int error = 0, kern_trans;
+ pthread_t self = pthread_self();
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint64_t oldval64, newval64;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ /* check for static initialization */
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ return(error);
+ }
+ } else {
+ return(EINVAL);
+ }
+ }
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+loop:
+ lgenval = *lseqaddr;
+ ugenval = *useqaddr;
+ rw_wc = *wcaddr;
+
+ if (is_rw_uebit_set(lgenval)) {
+ return(EINVAL);
+
+ }
+
+ if ((lgenval & PTHRW_COUNT_MASK) == ugenval)
+ return(EINVAL);
+
+ if (lgenval > ugenval)
+ ulval = (lgenval & PTHRW_COUNT_MASK) - (ugenval & PTHRW_COUNT_MASK);
+ else
+ ulval = (ugenval & PTHRW_COUNT_MASK) - (lgenval & PTHRW_COUNT_MASK);
+
+
+ newval = lgenval | PTHRW_UBIT;
+
+ kern_trans = 1;
+ if (rw_wc != 0) {
+ if (ulval == ((rw_wc - 1) << PTHRW_COUNT_SHIFT))
+ kern_trans = 0;
+ } else if (ulval == 1)
+ kern_trans = 0;
+
+ if (kern_trans == 0) {
+ newval = ((lgenval | PTHRW_EBIT) & ~PTHRW_LBIT);
+ } else {
+ newval = lgenval | PTHRW_UBIT;
+ }
+ if (kern_trans == 0) {
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE)
+ goto loop;
+
+ } else {
+ newval = (lgenval + PTHRW_INC);
+
+ oldval64 = (((uint64_t)rw_wc) << 32);
+ oldval64 |= lgenval;
+
+ newval64 = (((uint64_t)(rw_wc + 1)) << 32);
+ newval64 |= newval;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
+ goto loop;
+ /* kern_trans == 1 */
+ retry:
+ updateval = __psynch_rw_upgrade(orwlock, newval, ugenval, rw_wc+1, rwlock->rw_flags);
+ if (updateval == (uint32_t)-1) {
+ error = errno;
+ } else
+ error = 0;
+
+ if (error == EINTR)
+ goto retry;
+
+ if (error != 0) {
+ OSAtomicDecrement32((volatile int32_t *)wcaddr);
+ goto out;
+ }
+
+ if (is_rw_ebit_set(updateval)) {
+ /* kernel cannot wakeup without granting E bit */
+ abort();
+ }
+
+ error = rwlock_exclusive_lockreturn(orwlock, updateval);
+ if (error == EAGAIN)
+ goto retry;
+
+ OSAtomicDecrement32((volatile int32_t *)wcaddr);
+
+ }
+ if (error == 0) {
+ rwlock->rw_owner = self;
+ PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
+ return(0);
+ }
+
+out:
+ return(error);
+}
+
+int
+pthread_rwlock_tryupgrade_np(pthread_rwlock_t *orwlock)
+{
+ pthread_t self = pthread_self();
+ uint32_t lgenval, newval, ugenval, ulval, rw_wc;
+ int error = 0, kern_trans;
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ return(error);
+ }
+ } else {
+ return(EINVAL);
+ }
+ }
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+loop:
+ lgenval = *lseqaddr;
+ ugenval = *useqaddr;
+ rw_wc = *wcaddr;
+
+ if (is_rw_uebit_set(lgenval)) {
+ return(EBUSY);
+ }
+
+ if ((lgenval & PTHRW_COUNT_MASK) == ugenval)
+ return(EINVAL);
+
+ if (lgenval > ugenval)
+ ulval = (lgenval & PTHRW_COUNT_MASK) - (ugenval & PTHRW_COUNT_MASK);
+ else
+ ulval = (ugenval & PTHRW_COUNT_MASK) - (lgenval & PTHRW_COUNT_MASK);
+
+
+ newval = lgenval | PTHRW_UBIT;
+
+ kern_trans = 1;
+ if (rw_wc != 0) {
+ /* there is only one reader thread */
+ if (ulval == (rw_wc - 1))
+ kern_trans = 0;
+ } else if (ulval == 1)
+ kern_trans = 0;
+
+ if (kern_trans == 0) {
+ newval = (lgenval | PTHRW_EBIT) & ~PTHRW_LBIT;
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE)
+ goto loop;
+
+ rwlock->rw_owner = self;
+ PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
+ return(0);
+ }
+ return(EBUSY);
+}
-#define READ_LOCK_PLOCKSTAT 0
-#define WRITE_LOCK_PLOCKSTAT 1
+/* Returns true if the rwlock is held for reading by any thread or held for writing by the current thread */
+int
+pthread_rwlock_held_np(pthread_rwlock_t * orwlock)
+{
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint32_t lgenval, ugenval;
+ int error = 0;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ return(0);
+ }
+ } else {
+ return(-1);
+ }
+ }
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+ lgenval = *lseqaddr;
+ ugenval = *useqaddr;
+
+ if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK))
+ return(0);
+
+ return(1);
+}
-#define BLOCK_FAIL_PLOCKSTAT 0
-#define BLOCK_SUCCESS_PLOCKSTAT 1
+/* Returns true if the rwlock is held for reading by any thread */
+int
+pthread_rwlock_rdheld_np(pthread_rwlock_t * orwlock)
+{
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint32_t lgenval;
+ int error = 0;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ return(0);
+ }
+ } else {
+ return(-1);
+ }
+ }
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
-/* maximum number of times a read lock may be obtained */
-#define MAX_READ_LOCKS (INT_MAX - 1)
+ lgenval = *lseqaddr;
+
+ if (is_rw_ebit_set(lgenval)) {
+ return(0);
+ }
+ return(0);
+}
+/* Returns true if the rwlock is held for writing by the current thread */
+int
+pthread_rwlock_wrheld_np(pthread_rwlock_t * orwlock)
+{
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ pthread_t self;
+ uint32_t lgenval;
+ int error = 0;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ return(0);
+ }
+ } else {
+ return(-1);
+ }
+ }
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
-#ifndef BUILDING_VARIANT /* [ */
+ self = pthread_self();
+ lgenval = *lseqaddr;
+ if ((is_rw_ebit_set(lgenval)) && (rwlock->rw_owner == self)) {
+ return(1);
+ }
+ return(0);
+}
+/**************************************************************/
+#endif /* NOTYET */
-int
-pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
+static int
+rwlock_unlock_action_onread(pthread_rwlock_t * orwlock, uint32_t updateval)
{
- attr->sig = _PTHREAD_RWLOCK_ATTR_SIG;
- attr->pshared = _PTHREAD_DEFAULT_PSHARED;
- return (0);
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ int error = 0;
+ uint32_t lgenval, newval;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+ pthread_t self;
+ uint64_t threadid;
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+ lgenval = *lseqaddr;
+
+lp2:
+ lgenval = *lseqaddr;
+
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 3, lgenval, 0);
+#endif
+ /* if the kernel antcipated seq and one on the lock are same, set the one from kernel */
+ if ((lgenval & PTHRW_COUNT_MASK) == (updateval & PTHRW_COUNT_MASK)) {
+ if (OSAtomicCompareAndSwap32(lgenval, updateval, (volatile int32_t *)lseqaddr) != TRUE)
+ goto lp2;
+ goto succout;
+ }
+
+ /* state bits are same? */
+ if ((lgenval & PTHRW_BIT_MASK) == (updateval & PTHRW_BIT_MASK)) {
+ /* nothing to do */
+ goto succout;
+ }
+
+ newval = ((lgenval & PTHRW_UN_BIT_MASK) << PTHRW_COUNT_SHIFT) | (updateval & PTHRW_BIT_MASK);
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 4, newval, 0);
+#endif
+ /* high bits are state on the lock; lowbits are one kernel need to set */
+ switch (newval) {
+ /* W States */
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
+ //goto ktrans;
+ }
+ break;
+
+
+ /* L states */
+ case ((PTHRW_LBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+
+ /* Y states */
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
+ //goto ktrans;
+ }
+ break;
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
+ //goto ktrans;
+ }
+ break;
+
+ /* YU states */
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
+ //goto ktrans;
+ }
+ break;
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
+ //goto ktrans;
+ }
+ break;
+
+ /* E states */
+ case ((PTHRW_EBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+
+ /* WE states */
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action2(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+
+ /* WL states */
+ case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+
+ default:
+ /* illegal states */
+ self = pthread_self();
+ threadid = self->thread_id;
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 6, lgenval, 0);
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 7, updateval, 0);
+#endif
+ LIBC_ABORT("incorect state on return 0x%x: lgenval 0x%x, updateval 0x%x; threadid (0x%x)\n", newval, lgenval, updateval, (uint32_t)threadid);
+ };
+
+ if (error != 0)
+ goto lp2;
+
+succout:
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_NONE, lgenval, newval, 0, 0, 0);
+#endif
+ return(0);
}
-int
-pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
+
+static uint32_t
+modbits(uint32_t lgenval, uint32_t updateval)
{
- attr->sig = _PTHREAD_NO_SIG; /* Uninitialized */
- attr->pshared = 0;
- return (0);
+ uint32_t lval = lgenval & PTHRW_BIT_MASK;
+ uint32_t uval = updateval & PTHRW_BIT_MASK;
+ uint32_t rval, nlval;
+
+ nlval = (lval | uval);
+ if ((uval & PTHRW_EBIT) == 0)
+ nlval &= ~PTHRW_EBIT;
+ if ((nlval & (PTHRW_WBIT | PTHRW_YBIT)) == (PTHRW_WBIT | PTHRW_YBIT))
+ nlval &= ~PTHRW_YBIT;
+ /* no new writers and kernel resets w bit, reset W bit on the lock */
+ if (((nlval & (PTHRW_WBIT | PTHRW_SHADOW_W)) == PTHRW_WBIT) && ((updateval & PTHRW_WBIT) == 0))
+ nlval &= ~PTHRW_WBIT;
+
+ rval = (lgenval & PTHRW_COUNT_MASK) | nlval;
+ return(rval);
}
-int
-pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr,
- int *pshared)
+static int
+rwlock_unlock_action1(pthread_rwlock_t * orwlock, uint32_t lgenval, uint32_t updateval)
{
- if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG)
- {
- *pshared = (int)attr->pshared;
- return (0);
- } else
- {
- return (EINVAL); /* Not an initialized 'attribute' structure */
- }
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ int error = 0;
+ uint32_t newval;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+ newval = modbits(lgenval, updateval);
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE)
+ error = EINVAL;
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_NONE, lgenval, newval, 0, 0, 0);
+#endif
+ return(error);
}
-/* Temp: untill pshared is fixed right */
-#ifdef PR_5243343
-/* 5243343 - temporary hack to detect if we are running the conformance test */
-extern int PR_5243343_flag;
-#endif /* PR_5243343 */
+static int
+rwlock_unlock_action2(pthread_rwlock_t * orwlock, uint32_t lgenval, uint32_t updateval)
+{
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint32_t newval;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
-int
-pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr, int pshared)
+ newval = modbits(lgenval, updateval);
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+ /* roundtrip kernel */
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT2 | DBG_FUNC_NONE, lgenval, newval, 0, 0, 0);
+#endif
+ (void) __psynch_rw_unlock2(orwlock, lgenval, *useqaddr, *wcaddr, rwlock->rw_flags);
+ return(0);
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT2 | DBG_FUNC_NONE, 0xffffffff, 0, 0, 0, 0);
+#endif
+
+ return(EINVAL);
+}
+
+/* This is used when an exclusive write lock of any kind is being granted. For unlock thread, it needs to try to set the bit, if not move on */
+static int
+rwlock_unlock_action_k(pthread_rwlock_t * orwlock, uint32_t lgenval, uint32_t updateval)
{
- if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG)
- {
-#if __DARWIN_UNIX03
-#ifdef PR_5243343
- if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED && PR_5243343_flag))
-#else /* !PR_5243343 */
- if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
-#endif /* PR_5243343 */
-#else /* __DARWIN_UNIX03 */
- if ( pshared == PTHREAD_PROCESS_PRIVATE)
-#endif /* __DARWIN_UNIX03 */
- {
- attr->pshared = pshared ;
- return (0);
- } else
- {
- return (EINVAL); /* Invalid parameter */
- }
- } else
- {
- return (EINVAL); /* Not an initialized 'attribute' structure */
- }
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint32_t newval;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+ newval = modbits(lgenval, updateval);
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTK | DBG_FUNC_NONE, lgenval, updateval, newval, 0, 0);
+#endif
+ /* try to set, if not not a prolem as the thread taking exclusive will take care of the discrepency */
+
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTK | DBG_FUNC_NONE, 0x55555555, lgenval, newval, 0, 0);
+#endif
+
+ } else {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTK | DBG_FUNC_NONE, 0xAAAAAAAA, lgenval, newval, 0, 0);
+#endif
+
+ }
+
+ return(0);
+}
+
+static int
+rwlock_exclusive_lockreturn(pthread_rwlock_t * orwlock, uint32_t updateval)
+{
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint32_t lgenval, newval;
+ volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+ pthread_t self;
+ uint64_t threadid;
+
+ int error = 0;
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ } else {
+ lseqaddr = rwlock->rw_lseqaddr;
+ useqaddr = rwlock->rw_useqaddr;
+ wcaddr = rwlock->rw_wcaddr;
+ }
+
+lp2:
+ lgenval = *lseqaddr;
+
+ /* if the kernel antcipated seq and one on the lock are same, set the one from kernel */
+ if ((lgenval & PTHRW_COUNT_MASK) == (updateval & PTHRW_COUNT_MASK)) {
+ if (OSAtomicCompareAndSwap32(lgenval, updateval, (volatile int32_t *)lseqaddr) != TRUE)
+ goto lp2;
+ goto out;
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, lgenval, updateval, 1, 0, 0);
+#endif
+ /* state bits are same? */
+ if ((lgenval & PTHRW_BIT_MASK) == (updateval & PTHRW_BIT_MASK)) {
+ /* nothing to do */
+ goto out;
+ }
+
+
+ newval = ((lgenval & PTHRW_UN_BIT_MASK) << PTHRW_COUNT_SHIFT) | (updateval & PTHRW_BIT_MASK);
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, newval, 0, 2, 0, 0);
+#endif
+ /* high bits are state on the lock; lowbits are one kernel need to set */
+ switch (newval) {
+ /* W States */
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
+ error = EAGAIN;
+ }
+ break;
+
+
+ /* All L states illegal here */
+
+ /* Y states */
+ case (PTHRW_YBIT << PTHRW_COUNT_SHIFT) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = EAGAIN;
+ }
+ break;
+ case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
+ error = EAGAIN;
+ }
+ break;
+
+ /* YU states */
+ case ((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = EAGAIN;
+ }
+ break;
+
+ case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
+ error = EAGAIN;
+ }
+ break;
+
+ /* E states */
+ case ((PTHRW_EBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+
+ /* WE states */
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+ case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
+ error = rwlock_unlock_action1(orwlock, lgenval, updateval);
+ }
+ break;
+
+ /* All WL states are illegal*/
+
+ default:
+ /* illegal states */
+ self = pthread_self();
+ threadid = self->thread_id;
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 6, lgenval, 0);
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 7, updateval, 0);
+#endif
+ LIBC_ABORT("rwlock_exclusive_lockreturn: incorect state on return 0x%x: lgenval 0x%x, updateval 0x%x; threadid (0x%x)\n", newval, lgenval, updateval, (uint32_t)threadid);
+ };
+
+ if (error == EINVAL)
+ goto lp2;
+out:
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, error, 0, 0xffffffff, 0, 0);
+#endif
+ return(error);
+}
+
+/* returns are not bit shifted */
+static int
+rw_diffgenseq(uint32_t x, uint32_t y)
+{
+ uint32_t lx = (x & PTHRW_COUNT_MASK);
+ uint32_t ly = (y &PTHRW_COUNT_MASK);
+
+ if (lx > ly) {
+ return(lx-ly);
+ } else {
+ return((PTHRW_MAX_READERS - y) + lx + PTHRW_INC);
+ }
}
+#endif /* i386 || x86_64 ] */
+
+
#endif /* !BUILDING_VARIANT ] */
int
pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
{
+#if defined(__i386__) || defined(__x86_64__) || defined(__DARWIN_UNIX03)
int ret;
+#endif /* __i386__ || __x86_64__ */
+
+
+#if defined(__i386__) || defined(__x86_64__)
+ if ((usenew_impl != 0)) {
+ return(_new_pthread_rwlock_destroy(rwlock));
+ }
+#endif /* __i386__ || __x86_64__ */
if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
return(EINVAL);
- } else {
+ }
+#if defined(__i386__) || defined(__x86_64__)
+ else if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ ret = _new_pthread_rwlock_destroy(rwlock);
+ return(ret);
+ }
+#endif /* __i386__ || __x86_64__ */
+ else {
#if __DARWIN_UNIX03
/* grab the monitor lock */
if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0)
pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
{
int ret;
+
+#if defined(__i386__) || defined(__x86_64__)
+ if ((usenew_impl != 0)) {
+ return(_new_pthread_rwlock_init(rwlock, attr));
+ }
+#endif /* __i386__ || __x86_64__ */
+
#if __DARWIN_UNIX03
if (attr && (attr->sig != _PTHREAD_RWLOCK_ATTR_SIG)) {
return(EINVAL);
}
+#endif /* __DARWIN_UNIX03 */
+
+#if defined(__i386__) || defined(__x86_64__)
+ if ((attr != NULL) && (attr->pshared == PTHREAD_PROCESS_SHARED)) {
+ ret = _new_pthread_rwlock_init(rwlock, attr);
+ return(ret);
+ }
+#endif /* __i386__ || __x86_64__ */
+
+#if __DARWIN_UNIX03
/* if already inited check whether it is in use, then return EBUSY */
if ((rwlock->sig == _PTHREAD_RWLOCK_SIG) && (rwlock->state !=0 )) {
return(EBUSY);
pthread_t self = pthread_self();
#endif
+#if defined(__i386__) || defined(__x86_64__)
+ if ((usenew_impl != 0)) {
+ return(_new_pthread_rwlock_rdlock(rwlock));
+ }
+#endif /* __i386__ || __x86_64__ */
+
if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
if ((ret = pthread_rwlock_init(rwlock, NULL)) != 0) {
PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, EINVAL);
return(EINVAL);
}
+#if defined(__i386__) || defined(__x86_64__)
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ ret = _new_pthread_rwlock_rdlock(rwlock);
+ return(ret);
+ }
+#endif /* __i386__ || __x86_64__ */
/* grab the monitor lock */
if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
{
int ret;
-#if __DARWIN_UNIX03
- pthread_t self = pthread_self();
-#endif
+
+#if defined(__i386__) || defined(__x86_64__)
+ if ((usenew_impl != 0)) {
+ return(_new_pthread_rwlock_tryrdlock(rwlock));
+ }
+#endif /* __i386__ || __x86_64__ */
/* check for static initialization */
if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, EINVAL);
return(EINVAL);
}
+#if defined(__i386__) || defined(__x86_64__)
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ ret = _new_pthread_rwlock_tryrdlock(rwlock);
+ return(ret);
+ }
+#endif /* __i386__ || __x86_64__ */
+
/* grab the monitor lock */
if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
pthread_t self = pthread_self();
#endif /* __DARWIN_UNIX03 */
+#if defined(__i386__) || defined(__x86_64__)
+ if ((usenew_impl != 0)) {
+ return(_new_pthread_rwlock_trywrlock(rwlock));
+ }
+#endif /* __i386__ || __x86_64__ */
+
/* check for static initialization */
if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
if ((ret = pthread_rwlock_init(rwlock, NULL)) != 0) {
PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
return(EINVAL);
}
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ ret = _new_pthread_rwlock_trywrlock(rwlock);
+ return(ret);
+ }
+#endif /* __i386__ || __x86_64__ */
+
/* grab the monitor lock */
if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
{
int ret;
int writer = (rwlock < 0) ? 1:0;
-#if __DARWIN_UNIX03
- pthread_t self = pthread_self();
-#endif /* __DARWIN_UNIX03 */
+
+#if defined(__i386__) || defined(__x86_64__)
+ if ((usenew_impl != 0)) {
+ return(_new_pthread_rwlock_unlock(rwlock));
+ }
+#endif /* __i386__ || __x86_64__ */
if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
PLOCKSTAT_RW_ERROR(rwlock, writer, EINVAL);
return(EINVAL);
}
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ ret = _new_pthread_rwlock_unlock(rwlock);
+ return(ret);
+ }
+#endif /* __i386__ || __x86_64__ */
+
+
/* grab the monitor lock */
if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
PLOCKSTAT_RW_ERROR(rwlock, writer, ret);
pthread_t self = pthread_self();
#endif /* __DARWIN_UNIX03 */
+#if defined(__i386__) || defined(__x86_64__)
+ if ((usenew_impl != 0)) {
+ return(_new_pthread_rwlock_wrlock(rwlock));
+ }
+#endif /* __i386__ || __x86_64__ */
+
/* check for static initialization */
if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
if ((ret = pthread_rwlock_init(rwlock, NULL)) != 0) {
PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
return(EINVAL);
}
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ ret = _new_pthread_rwlock_wrlock(rwlock);
+ return(ret);
+ }
+#endif /* __i386__ || __x86_64__ */
+
+
/* grab the monitor lock */
if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
return(ret);
- }
+ }
#if __DARWIN_UNIX03
if ((rwlock->state < 0) && (rwlock->owner == self)) {
return(ret);
}
-
int i, j;
void *param;
+ /* destroy dynamic keys first */
for (j = 0; j < PTHREAD_DESTRUCTOR_ITERATIONS; j++)
{
- /* The first slot is reserved for pthread_self() */
-
- for (i = __pthread_tsd_first; i <= __pthread_tsd_max; i++)
+ for (i = __pthread_tsd_start; i <= self->max_tsd_key; i++)
{
if (_pthread_keys[i].created && (param = self->tsd[i]))
{
}
}
}
- for (i = __pthread_tsd_start; i <= self->max_tsd_key; i++)
+ }
+
+ self->max_tsd_key = 0;
+
+ /*
+ * The first slot is reserved for pthread_self() and there is no cleanup on it.
+ * Destroy rest of the static keys next only if any destructors registered.
+ */
+ for (j = 0; j < PTHREAD_DESTRUCTOR_ITERATIONS; j++) {
+ for (i = __pthread_tsd_first; i <= __pthread_tsd_max; i++)
{
if (_pthread_keys[i].created && (param = self->tsd[i]))
{
}
}
}
- self->max_tsd_key = 0;
}
int
#define __PTHREAD_WORKQ_SIZE__ 128
#define __PTHREAD_WORKQ_ATTR_SIZE__ 60
-#define PTHEAD_WRKQUEUE_SIG 0xBEBEBEBE
-#define PTHEAD_WRKQUEUE_ATTR_SIG 0xBEBEBEBE
+#define PTHREAD_WORKQUEUE_SIG 0xBEBEBEBE
+#define PTHREAD_WORKQUEUE_ATTR_SIG 0xBEBEBEBE
#ifndef __POSIX_LIB__
typedef struct { unsigned int sig; char opaque[__PTHREAD_WORKQ_SIZE__];} *pthread_workqueue_t;
typedef struct { unsigned int sig; char opaque[__PTHREAD_WORKQ_ATTR_SIZE__]; } pthread_workqueue_attr_t;
#endif
typedef void * pthread_workitem_handle_t;
+/* Kernel expected target concurrency of the workqueue clients for the three priority queues */
+
+#define WORKQ_HIGH_PRIOQUEUE 0
+#define WORKQ_DEFAULT_PRIOQUEUE 1
+#define WORKQ_LOW_PRIOQUEUE 2
+
+#define WORKQ_NUM_PRIOQUEUE 3
+
+extern __int32_t workq_targetconc[WORKQ_NUM_PRIOQUEUE];
__BEGIN_DECLS
int pthread_workqueue_init_np(void);
int pthread_workqueue_attr_init_np(pthread_workqueue_attr_t * attr);
int pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t * attr);
int pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t * attr, int * qprio);
+/* WORKQ_HIGH/DEFAULT/LOW_PRIOQUEUE are the only valid values */
int pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t * attr, int qprio);
+int pthread_workqueue_attr_getovercommit_np(const pthread_workqueue_attr_t * attr, int * ocommp);
+int pthread_workqueue_attr_setovercommit_np(pthread_workqueue_attr_t * attr, int ocomm);
-#ifdef NOTYET
-/* Following attributes not supported yet */
-int pthread_workqueue_attr_getstacksize_np(const pthread_workqueue_attr_t * attr, size_t * stacksizep);
-int pthread_workqueue_attr_setstacksize_np(pthread_workqueue_attr_t * attr, size_t stacksize);
-int pthread_workqueue_attr_getthreadtimeshare_np(const pthread_workqueue_attr_t * attr, int * istimesahrep);
-int pthread_workqueue_attr_settthreadtimeshare_np(pthread_workqueue_attr_t * attr, int istimeshare);
-int pthread_workqueue_attr_getthreadimportance_np(const pthread_workqueue_attr_t * attr, int * importancep);
-int pthread_workqueue_attr_settthreadimportance_np(pthread_workqueue_attr_t * attr, int importance);
-int pthread_workqueue_attr_getthreadaffinity_np(const pthread_workqueue_attr_t * attr, int * affinityp);
-int pthread_workqueue_attr_settthreadaffinity_np(pthread_workqueue_attr_t * attr, int affinity);
-#endif
int pthread_workqueue_create_np(pthread_workqueue_t * workqp, const pthread_workqueue_attr_t * attr);
-int pthread_workqueue_destroy_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg);
-int pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep);
-int pthread_workqueue_removeitem_np(pthread_workqueue_t workq, pthread_workitem_handle_t itemhandle);
-int pthread_workqueue_addbarrier_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg, int waitforcallback, pthread_workitem_handle_t *itemhandlep);
-int pthread_workqueue_suspend_np(pthread_workqueue_t workq);
-int pthread_workqueue_resume_np(pthread_workqueue_t workq);
-
+int pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
+/* If the queue value is WORKQ_NUM_PRIOQUEUE, the request for concurrency is for all queues */
+int pthread_workqueue_requestconcurrency_np(int queue, int concurrency);
+int pthread_workqueue_getovercommit_np(pthread_workqueue_t workq, unsigned int *ocommp);
+/*
+ * If the arg is non zero, it enables kill on current thread.
+ * If the arg of zero, it disables kill on current thread.
+ */
+int __pthread_workqueue_setkill(int);
__END_DECLS
#endif /* _POSIX_PTHREAD_WORKQUEUE_H */
#elif defined(__arm__)
-#define LEAF(name) \
- .align 2 @\
- .globl name @\
-name: @\
- .set __framesize,0
+#import <architecture/arm/asm_help.h>
-LEAF(__sp)
+LEAF(__sp,0)
mov r0,sp
bx lr
/*
* void *_adjust_sp(void *sp)
*/
-LEAF(__adjust_sp)
+LEAF(__adjust_sp,0)
sub r0, r0, #0x100
bx lr
/*
- * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <syslog.h>
#include <sys/sysctl.h>
#include <sys/param.h>
+#include <unistd.h>
extern void __abort(void) __dead2;
s1++;
}
- dstlen++;
-
/* Append the string. */
while (len > 0)
{
s1++;
len--;
}
+ *s1 = 0;
return dest;
-Index: fclose.c
-===================================================================
-RCS file: /cvs/root/Libc/stdio/FreeBSD/fclose.c,v
-retrieving revision 1.2
-diff -u -d -b -w -p -r1.2 fclose.c
---- fclose.c 2003/05/20 22:22:40 1.2
-+++ fclose.c 2005/02/14 21:57:28
-@@ -53,6 +53,10 @@ fclose(FILE *fp)
+--- fclose.c.orig 2009-02-15 03:11:22.000000000 -0800
++++ fclose.c 2009-02-15 19:01:59.000000000 -0800
+@@ -53,6 +53,13 @@ fclose(FILE *fp)
{
int r;
++ if (!__sdidinit)
++ __sinit();
++
+ if (fp == NULL) {
+ errno = EFAULT;
+ return (EOF);
if (fp->_flags == 0) { /* not open! */
errno = EBADF;
return (EOF);
+@@ -69,7 +76,7 @@ fclose(FILE *fp)
+ FREELB(fp);
+ fp->_file = -1;
+ fp->_r = fp->_w = 0; /* Mess up if reaccessed. */
+- fp->_flags = 0; /* Release this FILE for reuse. */
++ __sfprelease(fp); /* Release this FILE for reuse. */
+ FUNLOCKFILE(fp);
+ return (r);
+ }
--- /dev/null
+--- fdopen.c.orig 2009-02-15 03:11:22.000000000 -0800
++++ fdopen.c 2009-02-15 17:52:46.000000000 -0800
+@@ -34,6 +34,15 @@
+ * SUCH DAMAGE.
+ */
+
++#ifdef VARIANT_DARWINEXTSN
++#define _DARWIN_UNLIMITED_STREAMS
++#define COUNT 0
++#elif defined(VARIANT_LEGACY)
++#define COUNT 0
++#else
++#define COUNT 1
++#endif
++
+ #if defined(LIBC_SCCS) && !defined(lint)
+ static char sccsid[] = "@(#)fdopen.c 8.1 (Berkeley) 6/4/93";
+ #endif /* LIBC_SCCS and not lint */
+@@ -73,7 +82,7 @@ fdopen(fd, mode)
+ return (NULL);
+ }
+
+- if ((fp = __sfp()) == NULL)
++ if ((fp = __sfp(COUNT)) == NULL)
+ return (NULL);
+ fp->_flags = flags;
+ /*
---- /Volumes/XDisk/tmp/Libc/stdio/FreeBSD/findfp.c.orig 2004-05-22 08:19:41.000000000 -0700
-+++ /Volumes/XDisk/tmp/Libc/stdio/FreeBSD/findfp.c 2004-10-24 17:08:30.000000000 -0700
-@@ -47,6 +47,7 @@
+--- findfp.c.orig 2009-02-15 03:11:22.000000000 -0800
++++ findfp.c 2009-02-15 18:45:19.000000000 -0800
+@@ -46,7 +46,10 @@ __FBSDID("$FreeBSD: src/lib/libc/stdio/f
+ #include <stdio.h>
#include <stdlib.h>
#include <string.h>
++#include <libkern/OSAtomic.h>
++#include <errno.h>
+#include <pthread.h>
#include <spinlock.h>
#include "libc_private.h"
-@@ -62,12 +63,13 @@
+@@ -62,12 +65,19 @@ int __sdidinit;
{0}, __sFX + file}
/* p r w flags file _bf z cookie close read seek write */
/* _ub _extra */
+#define __sFXInit {0, PTHREAD_MUTEX_INITIALIZER}
++ /* set counted */
++#define __sFXInit3 {0, PTHREAD_MUTEX_INITIALIZER, 0, 0, 0, 1}
/* the usual - (stdin + stdout + stderr) */
++
++static int __scounted; /* streams counted against STREAM_MAX */
++static int __stream_max;
++
static FILE usual[FOPEN_MAX - 3];
static struct __sFILEX usual_extra[FOPEN_MAX - 3];
static struct glue uglue = { NULL, FOPEN_MAX - 3, usual };
-static struct __sFILEX __sFX[3];
-+static struct __sFILEX __sFX[3] = {__sFXInit, __sFXInit, __sFXInit};
++static struct __sFILEX __sFX[3] = {__sFXInit3, __sFXInit3, __sFXInit3};
/*
* We can't make this 'static' until 6.0-current due to binary
-@@ -113,7 +115,7 @@
+@@ -113,7 +123,7 @@ moreglue(n)
{
struct glue *g;
static FILE empty;
FILE *p;
struct __sFILEX *fx;
-@@ -179,6 +181,7 @@
+@@ -139,7 +149,7 @@ moreglue(n)
+ * Find a free FILE for fopen et al.
+ */
+ FILE *
+-__sfp()
++__sfp(int count)
+ {
+ FILE *fp;
+ int n;
+@@ -147,6 +157,15 @@ __sfp()
+
+ if (!__sdidinit)
+ __sinit();
++
++ if (count) {
++ if (__scounted >= __stream_max) {
++ THREAD_UNLOCK();
++ errno = EMFILE;
++ return NULL;
++ }
++ OSAtomicIncrement32(&__scounted);
++ }
+ /*
+ * The list must be locked because a FILE may be updated.
+ */
+@@ -179,12 +198,27 @@ found:
fp->_lb._base = NULL; /* no line buffer */
fp->_lb._size = 0;
/* fp->_lock = NULL; */ /* once set always set (reused) */
+ fp->_extra->fl_mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
fp->_extra->orientation = 0;
++ fp->_extra->counted = count ? 1 : 0;
memset(&fp->_extra->mbstate, 0, sizeof(mbstate_t));
return (fp);
+ }
+
+ /*
++ * Mark as free and update count as needed
++ */
++__private_extern__ void
++__sfprelease(FILE *fp)
++{
++ if (fp->_extra->counted) {
++ OSAtomicDecrement32(&__scounted);
++ fp->_extra->counted = 0;
++ }
++ fp->_flags = 0;
++}
++
++/*
+ * XXX. Force immediate allocation of internal memory. Not used by stdio,
+ * but documented historically for certain applications. Bad applications.
+ */
+@@ -244,6 +278,8 @@ __sinit()
+ /* Make sure we clean up on exit. */
+ __cleanup = _cleanup; /* conservative */
+ __sdidinit = 1;
++ __stream_max = sysconf(_SC_STREAM_MAX);
++ __scounted = 3; /* std{in,out,err} already exists */
+ }
+ THREAD_UNLOCK();
+ }
--- /dev/null
+--- fopen.c.orig 2009-02-15 03:11:22.000000000 -0800
++++ fopen.c 2009-02-15 17:53:37.000000000 -0800
+@@ -34,6 +34,15 @@
+ * SUCH DAMAGE.
+ */
+
++#ifdef VARIANT_DARWINEXTSN
++#define _DARWIN_UNLIMITED_STREAMS
++#define COUNT 0
++#elif defined(VARIANT_LEGACY)
++#define COUNT 0
++#else
++#define COUNT 1
++#endif
++
+ #if defined(LIBC_SCCS) && !defined(lint)
+ static char sccsid[] = "@(#)fopen.c 8.1 (Berkeley) 6/4/93";
+ #endif /* LIBC_SCCS and not lint */
+@@ -61,10 +70,10 @@ fopen(file, mode)
+
+ if ((flags = __sflags(mode, &oflags)) == 0)
+ return (NULL);
+- if ((fp = __sfp()) == NULL)
++ if ((fp = __sfp(COUNT)) == NULL)
+ return (NULL);
+ if ((f = _open(file, oflags, DEFFILEMODE)) < 0) {
+- fp->_flags = 0; /* release */
++ __sfprelease(fp); /* release */
+ return (NULL);
+ }
+ fp->_file = f;
--- /dev/null
+--- fread.c.orig 2008-08-29 21:58:50.000000000 -0700
++++ fread.c 2008-09-02 02:18:06.000000000 -0700
+@@ -55,7 +55,7 @@ fread(buf, size, count, fp)
+ {
+ size_t resid;
+ char *p;
+- int r;
++ int r, ret;
+ size_t total;
+
+ /*
+@@ -71,21 +71,70 @@ fread(buf, size, count, fp)
+ fp->_r = 0;
+ total = resid;
+ p = buf;
++ /* first deal with anything left in buffer, plus any ungetc buffers */
+ while (resid > (r = fp->_r)) {
+ (void)memcpy((void *)p, (void *)fp->_p, (size_t)r);
+ fp->_p += r;
+ /* fp->_r = 0 ... done in __srefill */
+ p += r;
+ resid -= r;
+- if (__srefill(fp)) {
++ if ((ret = __srefill0(fp)) > 0)
++ break;
++ else if (ret) {
+ /* no more input: return partial result */
+ FUNLOCKFILE(fp);
+ return ((total - resid) / size);
+ }
+ }
+- (void)memcpy((void *)p, (void *)fp->_p, resid);
+- fp->_r -= resid;
+- fp->_p += resid;
++ /*
++ * 5980080: don't use optimization if __SMBF not set (meaning setvbuf
++ * was called, and the buffer belongs to the user).
++ * 6180417: but for unbuffered (__SMBF is not set), so specifically
++ * test for it.
++ */
++ if ((fp->_flags & (__SMBF | __SNBF)) && resid > fp->_bf._size) {
++ struct __sbuf save;
++ size_t n;
++
++ save = fp->_bf;
++ fp->_bf._base = p;
++ fp->_bf._size = resid;
++ while (fp->_bf._size > 0) {
++ if ((ret = __srefill1(fp)) != 0) {
++ /* no more input: return partial result */
++ resid = fp->_bf._size;
++ fp->_bf = save;
++ fp->_p = fp->_bf._base;
++ /* fp->_r = 0; already set in __srefill1 */
++ FUNLOCKFILE(fp);
++ return ((total - resid) / size);
++ }
++ fp->_bf._base += fp->_r;
++ fp->_bf._size -= fp->_r;
++ }
++ fp->_bf = save;
++ n = fp->_bf._size * ((resid - 1) / fp->_bf._size);
++ r = resid - n;
++ (void)memcpy((void *)fp->_bf._base, (void *)(p + n), (size_t)r);
++ fp->_p = fp->_bf._base + r;
++ fp->_r = 0;
++ } else {
++ while (resid > (r = fp->_r)) {
++ (void)memcpy((void *)p, (void *)fp->_p, (size_t)r);
++ fp->_p += r;
++ /* fp->_r = 0 ... done in __srefill */
++ p += r;
++ resid -= r;
++ if (__srefill1(fp)) {
++ /* no more input: return partial result */
++ FUNLOCKFILE(fp);
++ return ((total - resid) / size);
++ }
++ }
++ (void)memcpy((void *)p, (void *)fp->_p, resid);
++ fp->_r -= resid;
++ fp->_p += resid;
++ }
+ FUNLOCKFILE(fp);
+ return (count);
+ }
-Index: freopen.c
-===================================================================
-RCS file: /cvs/root/Libc/stdio/FreeBSD/freopen.c,v
-retrieving revision 1.3
-diff -u -d -b -w -p -r1.3 freopen.c
---- freopen.c 2004/11/25 19:38:34 1.3
-+++ freopen.c 2005/01/25 18:01:26
+--- freopen.c.orig 2009-02-15 03:11:22.000000000 -0800
++++ freopen.c 2009-02-15 14:26:16.000000000 -0800
@@ -99,7 +99,7 @@ freopen(file, mode, fp)
(oflags & O_ACCMODE)) {
fclose(fp);
}
/* Get a new descriptor to refer to the new file. */
+@@ -191,7 +200,7 @@ finish:
+ memset(&fp->_extra->mbstate, 0, sizeof(mbstate_t));
+
+ if (f < 0) { /* did not get it after all */
+- fp->_flags = 0; /* set it free */
++ __sfprelease(fp); /* set it free */
+ errno = sverrno; /* restore in case _close clobbered */
+ FUNLOCKFILE(fp);
+ return (NULL);
--- /dev/null
+--- funopen.c.orig 2009-02-15 03:11:22.000000000 -0800
++++ funopen.c 2009-02-15 14:02:06.000000000 -0800
+@@ -67,7 +67,8 @@ funopen(cookie, readfn, writefn, seekfn,
+ else
+ flags = __SRW; /* read-write */
+ }
+- if ((fp = __sfp()) == NULL)
++ /* funopen in not covered in SUSv3, so never count the streams */
++ if ((fp = __sfp(0)) == NULL)
+ return (NULL);
+ fp->_flags = flags;
+ fp->_file = -1;
---- local.h.orig 2004-11-25 11:38:35.000000000 -0800
-+++ local.h 2005-02-23 17:26:30.000000000 -0800
+--- local.h.orig 2009-02-15 03:11:22.000000000 -0800
++++ local.h 2009-02-15 18:12:54.000000000 -0800
@@ -37,8 +37,11 @@
* $FreeBSD: src/lib/libc/stdio/local.h,v 1.26 2004/07/16 05:52:51 tjr Exp $
*/
#include <string.h>
#include <wchar.h>
-@@ -53,8 +56,8 @@
+@@ -53,12 +56,15 @@ extern fpos_t _sseek(FILE *, fpos_t, int
extern int _ftello(FILE *, fpos_t *);
extern int _fseeko(FILE *, off_t, int, int);
extern int __fflush(FILE *fp);
+extern wint_t __fgetwc(FILE *, locale_t);
+extern wint_t __fputwc(wchar_t, FILE *, locale_t);
extern int __sflush(FILE *);
- extern FILE *__sfp(void);
+-extern FILE *__sfp(void);
++extern FILE *__sfp(int); /* arg is whether to count against STREAM_MAX or not */
++extern void __sfprelease(FILE *); /* mark free and update count as needed */
extern int __slbexpand(FILE *, size_t);
-@@ -69,16 +72,16 @@
+ extern int __srefill(FILE *);
++extern int __srefill0(FILE *);
++extern int __srefill1(FILE *);
+ extern int __sread(void *, char *, int);
+ extern int __swrite(void *, char const *, int);
+ extern fpos_t __sseek(void *, fpos_t, int);
+@@ -69,16 +75,16 @@ extern void (*__cleanup)(void);
extern void __smakebuf(FILE *);
extern int __swhatbuf(FILE *, size_t *, int *);
extern int _fwalk(int (*)(FILE *));
extern int __sdidinit;
-@@ -124,7 +127,7 @@
+@@ -89,7 +95,8 @@ struct __sFILEX {
+ pthread_mutex_t fl_mutex; /* used for MT-safety */
+ pthread_t fl_owner; /* current owner */
+ int fl_count; /* recursive lock count */
+- int orientation; /* orientation for fwide() */
++ int orientation:2; /* orientation for fwide() */
++ int counted:1; /* stream counted against STREAM_MAX */
+ mbstate_t mbstate; /* multibyte conversion state */
+ };
+
+@@ -124,7 +131,7 @@ struct __sFILEX {
#define INITEXTRA(fp) { \
(fp)->_extra->_up = NULL; \
---- makebuf.c.orig 2006-10-11 20:54:06.000000000 -0700
-+++ makebuf.c 2006-10-12 10:09:09.000000000 -0700
-@@ -49,6 +49,8 @@
+--- makebuf.c.orig 2008-08-28 17:18:09.000000000 -0700
++++ makebuf.c 2008-09-04 14:20:48.000000000 -0700
+@@ -49,6 +49,9 @@ __FBSDID("$FreeBSD: src/lib/libc/stdio/m
#include "local.h"
#include "un-namespace.h"
++#define MAXBUFSIZE (1 << 16)
+#define TTYBUFSIZE 4096
+
/*
* Allocate a file buffer, or switch to unbuffered I/O.
* Per the ANSI C standard, ALL tty devices default to line buffered.
-@@ -71,6 +73,12 @@
+@@ -71,6 +74,12 @@ __smakebuf(fp)
return;
}
flags = __swhatbuf(fp, &size, &couldbetty);
if ((p = malloc(size)) == NULL) {
fp->_flags |= __SNBF;
fp->_bf._base = fp->_p = fp->_nbuf;
-@@ -81,8 +89,6 @@
+@@ -81,8 +90,6 @@ __smakebuf(fp)
flags |= __SMBF;
fp->_bf._base = fp->_p = p;
fp->_bf._size = size;
fp->_flags |= flags;
}
+@@ -115,8 +122,7 @@ __swhatbuf(fp, bufsize, couldbetty)
+ * __sseek is mainly paranoia.) It is safe to set _blksize
+ * unconditionally; it will only be used if __SOPT is also set.
+ */
+- *bufsize = st.st_blksize;
+- fp->_blksize = st.st_blksize;
++ fp->_blksize = *bufsize = st.st_blksize > MAXBUFSIZE ? MAXBUFSIZE : st.st_blksize;
+ return ((st.st_mode & S_IFMT) == S_IFREG && fp->_seek == __sseek ?
+ __SOPT : __SNPT);
+ }
---- mktemp.3 2004-11-25 11:38:35.000000000 -0800
-+++ mktemp.3.edit 2006-07-12 11:24:49.000000000 -0700
+--- mktemp.3.orig 2008-02-29 10:45:39.000000000 -0800
++++ mktemp.3 2008-02-29 11:21:10.000000000 -0800
@@ -36,20 +36,33 @@
.Dt MKTEMP 3
.Os
or
.Fn mkdtemp .
This is common with programs that were developed before
-@@ -226,12 +239,19 @@
- You must provide your own locking around this and other consumers of the
- .Xr arc4random 3
- API.
+@@ -219,19 +232,19 @@
+ This will ensure that the program does not continue blindly
+ in the event that an attacker has already created the file
+ with the intention of manipulating or reading its contents.
+.Sh LEGACY SYNOPSIS
+.Fd #include <unistd.h>
-+.Pp
+ .Pp
+-The implementation of these functions calls
+-.Xr arc4random 3 ,
+-which is not reentrant.
+-You must provide your own locking around this and other consumers of the
+-.Xr arc4random 3
+-API.
+The include file
+.In unistd.h
+is necessary and sufficient for all functions.
---- printf.3 2004-11-25 11:38:35.000000000 -0800
-+++ printf.3.edit 2006-09-06 16:56:37.000000000 -0700
+--- printf.3.orig 2008-02-29 10:45:39.000000000 -0800
++++ printf.3 2008-02-29 12:19:06.000000000 -0800
@@ -40,39 +40,83 @@
.Dt PRINTF 3
.Os
argument, the string was too short
and some of the printed characters were discarded.
The output is always null-terminated.
-@@ -167,7 +217,7 @@
+@@ -167,7 +217,11 @@
.Fn vsprintf
functions
effectively assume an infinite
-.Fa size .
+.Fa n .
++.Pp
++For those routines that write to a user-provided character string,
++that string and the format strings should not overlap, as the
++behavior is undefined.
.Pp
The format string is composed of zero or more directives:
ordinary
-@@ -287,6 +337,20 @@
+@@ -287,6 +341,20 @@
.Xr localeconv 3 .
.El
.It
An optional decimal digit string specifying a minimum field width.
If the converted value has fewer characters than the field width, it will
be padded with spaces on the left (or right, if the left-adjustment
-@@ -379,6 +443,34 @@
+@@ -379,6 +447,34 @@
.It Sy Modifier Ta Cm c Ta Cm s
.It Cm l No (ell) Ta Vt wint_t Ta Vt "wchar_t *"
.El
.It
A character that specifies the type of conversion to be applied.
.El
-@@ -790,14 +882,11 @@
+@@ -790,14 +886,11 @@
.Sh SEE ALSO
.Xr printf 1 ,
.Xr fmtcheck 3 ,
--- /dev/null
+--- refill.c.orig 2008-01-24 17:13:42.000000000 -0800
++++ refill.c 2008-02-17 13:19:27.000000000 -0800
+@@ -68,8 +68,8 @@ lflush(FILE *fp)
+ * Refill a stdio buffer.
+ * Return EOF on eof or error, 0 otherwise.
+ */
+-int
+-__srefill(FILE *fp)
++__private_extern__ int
++__srefill0(FILE *fp)
+ {
+
+ /* make sure stdio is set up */
+@@ -134,6 +134,13 @@ __srefill(FILE *fp)
+ if ((fp->_flags & (__SLBF|__SWR)) == (__SLBF|__SWR))
+ __sflush(fp);
+ }
++ return (1);
++}
++
++__private_extern__ int
++__srefill1(FILE *fp)
++{
++
+ fp->_p = fp->_bf._base;
+ fp->_r = _sread(fp, (char *)fp->_p, fp->_bf._size);
+ fp->_flags &= ~__SMOD; /* buffer contents are again pristine */
+@@ -148,3 +155,13 @@ __srefill(FILE *fp)
+ }
+ return (0);
+ }
++
++int
++__srefill(FILE *fp)
++{
++ int ret;
++
++ if ((ret = __srefill0(fp)) <= 0)
++ return ret;
++ return __srefill1(fp);
++}
---- scanf.3.orig 2007-04-08 18:49:37.000000000 -0700
-+++ scanf.3 2007-04-08 20:14:03.000000000 -0700
+--- scanf.3.orig 2008-07-30 01:54:48.000000000 -0700
++++ scanf.3 2008-07-30 02:06:07.000000000 -0700
@@ -40,35 +40,55 @@
.Dt SCANF 3
.Os
there may be a number of
.Em flag
characters, as follows:
-@@ -440,13 +468,10 @@
+@@ -415,7 +443,8 @@
+ in
+ .Xr printf 3 ) ;
+ the next pointer must be a pointer to
+-.Vt void .
++.Vt "void *"
++(or other pointer type).
+ .It Cm n
+ Nothing is expected;
+ instead, the number of characters consumed thus far from the input
+@@ -440,13 +469,10 @@
causes an immediate return of
.Dv EOF .
.Sh RETURN VALUES
no conversions were assigned;
typically this is due to an invalid input character,
such as an alphabetic character for a
-@@ -463,6 +488,7 @@
+@@ -463,6 +489,7 @@
.Xr getc 3 ,
.Xr mbrtowc 3 ,
.Xr printf 3 ,
.Xr strtod 3 ,
.Xr strtol 3 ,
.Xr strtoul 3 ,
-@@ -473,7 +499,7 @@
+@@ -473,7 +500,7 @@
.Fn scanf ,
.Fn sscanf ,
.Fn vfscanf ,
---- tempnam.c.orig 2006-09-15 00:33:51.000000000 -0700
-+++ tempnam.c 2006-09-15 01:19:22.000000000 -0700
-@@ -38,6 +38,7 @@
- __FBSDID("$FreeBSD: src/lib/libc/stdio/tempnam.c,v 1.10 2002/03/22 21:53:04 obrien Exp $");
-
- #include <sys/param.h>
-+#include <sys/stat.h>
- #include <errno.h>
- #include <stdio.h>
- #include <stdlib.h>
-@@ -57,35 +58,61 @@
+--- tempnam.c.orig 2008-11-12 17:08:45.000000000 -0800
++++ tempnam.c 2008-11-12 17:41:23.000000000 -0800
+@@ -57,35 +57,68 @@ tempnam(dir, pfx)
int sverrno;
char *f, *name;
- if (!(name = malloc(MAXPATHLEN)))
-+#if __DARWIN_UNIX03
-+ struct stat sb;
-+#endif /* __DARWIN_UNIX03 */
+ if (!(name = malloc(MAXPATHLEN))) {
return(NULL);
+ }
+#endif /* !__DARWIN_UNIX03 */
if ((f = (char *)dir)) {
+#if __DARWIN_UNIX03
-+ if (!access(dir, W_OK)) {
++ if (access(dir, W_OK) == 0) {
+#endif /* __DARWIN_UNIX03 */
(void)snprintf(name, MAXPATHLEN, "%s%s%sXXXXXX", f,
*(f + strlen(f) - 1) == '/'? "": "/", pfx);
return(f);
+ }
+#if __DARWIN_UNIX03
-+ }
++ }
+#endif /* __DARWIN_UNIX03 */
}
f = P_tmpdir;
+#if __DARWIN_UNIX03
-+ if (stat(f, &sb) == 0) { /* directory accessible? */
++ if (access(f, W_OK) == 0) { /* directory accessible? */
+#endif /* __DARWIN_UNIX03 */
(void)snprintf(name, MAXPATHLEN, "%s%sXXXXXX", f, pfx);
- if ((f = _mktemp(name)))
+#if __DARWIN_UNIX03
+ }
++ if (issetugid() == 0 && (f = getenv("TMPDIR")) && access(f, W_OK) == 0) {
++ (void)snprintf(name, MAXPATHLEN, "%s%s%sXXXXXX", f,
++ *(f + strlen(f) - 1) == '/'? "": "/", pfx);
++ if ((f = _mktemp(name))) {
++ return(f);
++ }
++ }
+#endif /* __DARWIN_UNIX03 */
f = _PATH_TMP;
+#if __DARWIN_UNIX03
-+ if (stat(f, &sb) < 0) {
++ if (access(f, W_OK) < 0) {
+ f = "./"; /* directory inaccessible */
++ if (access(f, W_OK) < 0) {
++ return(NULL);
++ }
+ }
+#endif /* __DARWIN_UNIX03 */
(void)snprintf(name, MAXPATHLEN, "%s%sXXXXXX", f, pfx);
---- _SB/Libc/stdio/FreeBSD/tmpnam.3 2004-11-25 11:38:35.000000000 -0800
-+++ _SB/Libc/stdio/FreeBSD/tmpnam.3.edit 2006-06-28 16:55:52.000000000 -0700
+--- tmpnam.3.orig 2008-11-13 16:39:10.000000000 -0800
++++ tmpnam.3 2008-11-13 16:45:15.000000000 -0800
+@@ -36,7 +36,7 @@
+ .\" @(#)tmpnam.3 8.2 (Berkeley) 11/17/93
+ .\" $FreeBSD: src/lib/libc/stdio/tmpnam.3,v 1.16 2004/06/21 19:38:25 mpp Exp $
+ .\"
+-.Dd November 17, 1993
++.Dd November 12, 2008
+ .Dt TMPFILE 3
+ .Os
+ .Sh NAME
@@ -49,11 +49,18 @@
.Sh SYNOPSIS
.In stdio.h
is expected to be at least
.Dv L_tmpnam
bytes in length.
-@@ -116,7 +124,7 @@
- The environment variable
- .Ev TMPDIR
- (if set), the argument
+@@ -113,21 +121,23 @@
+ but provides the ability to specify the directory which will
+ contain the temporary file and the file name prefix.
+ .Pp
+-The environment variable
+-.Ev TMPDIR
+-(if set), the argument
-.Fa tmpdir
++The argument
+.Fa dir
(if
.Pf non- Dv NULL ) ,
the directory
-@@ -127,7 +135,7 @@
+ .Dv P_tmpdir ,
+-and the directory
++the environment variable
++.Ev TMPDIR
++(if set),
++the directory
+ .Pa /tmp
++and finally, the current directory,
+ are tried, in the listed order, as directories in which to store the
temporary file.
.Pp
The argument
if
.Pf non- Dv NULL ,
is used to specify a file name prefix, which will be the
-@@ -229,11 +237,6 @@
+@@ -226,14 +236,15 @@
+ interface should not be used in software expected to be used on other systems
+ if there is any possibility that the user does not wish the temporary file to
+ be publicly readable and writable.
++.Sh LEGACY DESCRIPTION
++In legacy mode, the order directories are tried by the
++.Fn tempnam
++function is different; the environment variable
++.Ev TMPDIR
++(if defined) is used first.
.Sh SEE ALSO
.Xr mkstemp 3 ,
.Xr mktemp 3
---- vfprintf.c.orig 2006-10-01 00:03:16.000000000 -0700
-+++ vfprintf.c 2006-10-01 00:21:05.000000000 -0700
-@@ -40,6 +40,8 @@
+--- vfprintf.c.orig 2008-09-07 11:37:54.000000000 -0700
++++ vfprintf.c 2008-09-07 17:33:16.000000000 -0700
+@@ -40,6 +40,8 @@ static char sccsid[] = "@(#)vfprintf.c 8
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/stdio/vfprintf.c,v 1.68 2004/08/26 06:25:28 des Exp $");
/*
* Actual printf innards.
*
-@@ -58,6 +60,7 @@
+@@ -58,6 +60,7 @@ __FBSDID("$FreeBSD: src/lib/libc/stdio/v
#include <stdlib.h>
#include <string.h>
#include <wchar.h>
#include <stdarg.h>
#include "un-namespace.h"
-@@ -66,6 +69,13 @@
+@@ -66,6 +69,13 @@ __FBSDID("$FreeBSD: src/lib/libc/stdio/v
#include "local.h"
#include "fvwrite.h"
union arg {
int intarg;
u_int uintarg;
-@@ -93,6 +103,21 @@
+@@ -93,6 +103,21 @@ union arg {
#endif
wint_t wintarg;
wchar_t *pwchararg;
};
/*
-@@ -103,16 +128,20 @@
+@@ -103,16 +128,20 @@ enum typeid {
T_LONG, T_U_LONG, TP_LONG, T_LLONG, T_U_LLONG, TP_LLONG,
T_PTRDIFFT, TP_PTRDIFFT, T_SIZET, TP_SIZET,
T_INTMAXT, T_UINTMAXT, TP_INTMAXT, TP_VOID, TP_CHAR, TP_SCHAR,
static void __find_arguments(const char *, va_list, union arg **);
static void __grow_type_table(int, enum typeid **, int *);
-@@ -141,7 +170,7 @@
+@@ -141,7 +170,7 @@ __sprint(FILE *fp, struct __suio *uio)
* worries about ungetc buffers and so forth.
*/
static int
{
int ret;
FILE fake;
-@@ -160,7 +189,7 @@
+@@ -160,7 +189,7 @@ __sbprintf(FILE *fp, const char *fmt, va
fake._lbfsize = 0; /* not actually used, but Just In Case */
/* do the work, then copy any error status */
if (ret >= 0 && __fflush(&fake))
ret = EOF;
if (fake._flags & __SERR)
-@@ -336,14 +365,14 @@
+@@ -252,7 +281,7 @@ __ultoa(u_long val, char *endp, int base
+ break;
+
+ default: /* oops */
+- abort();
++ LIBC_ABORT("base = %d", base);
+ }
+ return (cp);
+ }
+@@ -324,7 +353,7 @@ __ujtoa(uintmax_t val, char *endp, int b
+ break;
+
+ default:
+- abort();
++ LIBC_ABORT("base = %d", base);
+ }
+ return (cp);
+ }
+@@ -336,14 +365,14 @@ __ujtoa(uintmax_t val, char *endp, int b
* that the wide char. string ends in a null character.
*/
static char *
/*
* Determine the number of bytes to output and allocate space for
-@@ -354,7 +383,7 @@
+@@ -354,7 +383,7 @@ __wcsconv(wchar_t *wcsarg, int prec)
p = wcsarg;
mbs = initial;
for (;;) {
if (clen == 0 || clen == (size_t)-1 ||
nbytes + clen > prec)
break;
-@@ -363,7 +392,7 @@
+@@ -363,7 +392,7 @@ __wcsconv(wchar_t *wcsarg, int prec)
} else {
p = wcsarg;
mbs = initial;
if (nbytes == (size_t)-1)
return (NULL);
}
-@@ -378,7 +407,7 @@
+@@ -378,7 +407,7 @@ __wcsconv(wchar_t *wcsarg, int prec)
p = wcsarg;
mbs = initial;
while (mbp - convbuf < nbytes) {
if (clen == 0 || clen == (size_t)-1)
break;
mbp += clen;
-@@ -395,6 +424,8 @@
+@@ -395,6 +424,8 @@ __wcsconv(wchar_t *wcsarg, int prec)
/*
* MT-safe version
*/
int
vfprintf(FILE * __restrict fp, const char * __restrict fmt0, va_list ap)
-@@ -402,7 +433,21 @@
+@@ -402,7 +433,21 @@ vfprintf(FILE * __restrict fp, const cha
int ret;
FLOCKFILE(fp);
FUNLOCKFILE(fp);
return (ret);
}
-@@ -451,12 +496,15 @@
+@@ -451,12 +496,15 @@ static int exponent(char *, int, int);
#define PTRDIFFT 0x800 /* ptrdiff_t */
#define INTMAXT 0x1000 /* intmax_t */
#define CHARINT 0x2000 /* print char using int format */
{
char *fmt; /* format string */
int ch; /* character from fmt */
-@@ -502,6 +550,11 @@
+@@ -502,6 +550,11 @@ __vfprintf(FILE *fp, const char *fmt0, v
int nseps; /* number of group separators with ' */
int nrepeats; /* number of repeats of the last group */
#endif
u_long ulval; /* integer arguments %[diouxX] */
uintmax_t ujval; /* %j, %ll, %q, %t, %z integers */
int base; /* base for [diouxX] conversion */
-@@ -599,7 +652,7 @@
+@@ -599,13 +652,13 @@ __vfprintf(FILE *fp, const char *fmt0, v
#define INTMAX_SIZE (INTMAXT|SIZET|PTRDIFFT|LLONGINT)
#define SJARG() \
(flags&INTMAXT ? GETARG(intmax_t) : \
flags&PTRDIFFT ? (intmax_t)GETARG(ptrdiff_t) : \
(intmax_t)GETARG(long long))
#define UJARG() \
-@@ -633,22 +686,24 @@
+ (flags&INTMAXT ? GETARG(uintmax_t) : \
+ flags&SIZET ? (uintmax_t)GETARG(size_t) : \
+- flags&PTRDIFFT ? (uintmax_t)GETARG(ptrdiff_t) : \
++ flags&PTRDIFFT ? (uintmax_t)(unsigned)GETARG(ptrdiff_t) : \
+ (uintmax_t)GETARG(unsigned long long))
+
+ /*
+@@ -633,22 +686,24 @@ __vfprintf(FILE *fp, const char *fmt0, v
val = GETARG (int); \
}
fmt = (char *)fmt0;
argtable = NULL;
-@@ -675,6 +730,9 @@
+@@ -675,6 +730,9 @@ __vfprintf(FILE *fp, const char *fmt0, v
}
if (ch == '\0')
goto done;
fmt++; /* skip over '%' */
flags = 0;
-@@ -683,6 +741,9 @@
+@@ -683,6 +741,9 @@ __vfprintf(FILE *fp, const char *fmt0, v
prec = -1;
sign = '\0';
ox[1] = '\0';
rflag: ch = *fmt++;
reswitch: switch (ch) {
-@@ -698,6 +759,11 @@
+@@ -698,6 +759,11 @@ reswitch: switch (ch) {
case '#':
flags |= ALT;
goto rflag;
case '*':
/*-
* ``A negative field width argument is taken as a
-@@ -718,8 +784,8 @@
+@@ -718,8 +784,8 @@ reswitch: switch (ch) {
goto rflag;
case '\'':
flags |= GROUPING;
goto rflag;
case '.':
if ((ch = *fmt++) == '*') {
-@@ -793,14 +859,18 @@
+@@ -793,14 +859,18 @@ reswitch: switch (ch) {
flags |= LONGINT;
/*FALLTHROUGH*/
case 'c':
if (mbseqlen == (size_t)-1) {
fp->_flags |= __SERR;
goto error;
-@@ -817,6 +887,10 @@
+@@ -817,6 +887,10 @@ reswitch: switch (ch) {
/*FALLTHROUGH*/
case 'd':
case 'i':
if (flags & INTMAX_SIZE) {
ujval = SJARG();
if ((intmax_t)ujval < 0) {
-@@ -835,6 +909,12 @@
+@@ -835,6 +909,12 @@ reswitch: switch (ch) {
#ifndef NO_FLOATING_POINT
case 'a':
case 'A':
if (ch == 'a') {
ox[1] = 'x';
xdigs = xdigs_lower;
-@@ -848,6 +928,12 @@
+@@ -848,6 +928,12 @@ reswitch: switch (ch) {
prec++;
if (dtoaresult != NULL)
freedtoa(dtoaresult);
if (flags & LONGDBL) {
fparg.ldbl = GETARG(long double);
dtoaresult = cp =
-@@ -859,6 +945,7 @@
+@@ -859,6 +945,7 @@ reswitch: switch (ch) {
__hdtoa(fparg.dbl, xdigs, prec,
&expt, &signflag, &dtoaend);
}
if (prec < 0)
prec = dtoaend - cp;
if (expt == INT_MAX)
-@@ -866,6 +953,12 @@
+@@ -866,6 +953,12 @@ reswitch: switch (ch) {
goto fp_common;
case 'e':
case 'E':
expchar = ch;
if (prec < 0) /* account for digit before decpt */
prec = DEFPREC + 1;
-@@ -874,10 +967,22 @@
+@@ -874,10 +967,22 @@ reswitch: switch (ch) {
goto fp_begin;
case 'f':
case 'F':
expchar = ch - ('g' - 'e');
if (prec == 0)
prec = 1;
-@@ -886,6 +991,14 @@
+@@ -886,6 +991,14 @@ fp_begin:
prec = DEFPREC;
if (dtoaresult != NULL)
freedtoa(dtoaresult);
if (flags & LONGDBL) {
fparg.ldbl = GETARG(long double);
dtoaresult = cp =
-@@ -899,6 +1012,7 @@
+@@ -899,6 +1012,7 @@ fp_begin:
if (expt == 9999)
expt = INT_MAX;
}
fp_common:
if (signflag)
sign = '-';
-@@ -993,6 +1107,10 @@
+@@ -993,6 +1107,10 @@ fp_common:
flags |= LONGINT;
/*FALLTHROUGH*/
case 'o':
if (flags & INTMAX_SIZE)
ujval = UJARG();
else
-@@ -1007,6 +1125,10 @@
+@@ -1007,6 +1125,10 @@ fp_common:
* defined manner.''
* -- ANSI X3J11
*/
ujval = (uintmax_t)(uintptr_t)GETARG(void *);
base = 16;
xdigs = xdigs_lower;
-@@ -1025,7 +1147,7 @@
+@@ -1025,7 +1147,7 @@ fp_common:
if ((wcp = GETARG(wchar_t *)) == NULL)
cp = "(null)";
else {
if (convbuf == NULL) {
fp->_flags |= __SERR;
goto error;
-@@ -1056,6 +1178,10 @@
+@@ -1056,6 +1178,10 @@ fp_common:
flags |= LONGINT;
/*FALLTHROUGH*/
case 'u':
if (flags & INTMAX_SIZE)
ujval = UJARG();
else
-@@ -1068,6 +1194,10 @@
+@@ -1068,6 +1194,10 @@ fp_common:
case 'x':
xdigs = xdigs_lower;
hex:
if (flags & INTMAX_SIZE)
ujval = UJARG();
else
-@@ -1093,6 +1223,7 @@
+@@ -1093,6 +1223,7 @@ number: if ((dprec = prec) >= 0)
* ``The result of converting a zero value with an
* explicit precision of zero is no characters.''
* -- ANSI X3J11
*/
cp = buf + BUF;
if (flags & INTMAX_SIZE) {
-@@ -1102,7 +1233,7 @@
+@@ -1102,7 +1233,7 @@ number: if ((dprec = prec) >= 0)
flags & GROUPING, thousands_sep,
grouping);
} else {
cp = __ultoa(ulval, cp, base,
flags & ALT, xdigs,
flags & GROUPING, thousands_sep,
-@@ -1112,6 +1243,11 @@
+@@ -1110,8 +1241,13 @@ number: if ((dprec = prec) >= 0)
+ }
+ size = buf + BUF - cp;
if (size > BUF) /* should never happen */
- abort();
+- abort();
++ LIBC_ABORT("size %d > BUF %d", size, BUF);
break;
+#ifdef VECTORS
+ case 'v':
default: /* "%?" prints ?, unless ? is NUL */
if (ch == '\0')
goto done;
-@@ -1123,6 +1259,290 @@
+@@ -1123,6 +1259,290 @@ number: if ((dprec = prec) >= 0)
break;
}
/*
* All reasonable formats wind up here. At this point, `cp'
* points to a string which (if not flags&LADJUST) should be
-@@ -1178,7 +1598,7 @@
+@@ -1178,7 +1598,7 @@ number: if ((dprec = prec) >= 0)
if (expt <= 0) {
PRINT(zeroes, 1);
if (prec || flags & ALT)
PAD(-expt, zeroes);
/* already handled initial 0's */
prec += expt;
-@@ -1203,14 +1623,14 @@
+@@ -1203,14 +1623,14 @@ number: if ((dprec = prec) >= 0)
cp = dtoaend;
}
if (prec || flags & ALT)
PRINT(cp, ndig-1);
PAD(prec - ndig, zeroes);
} else /* XeYYY */
-@@ -1406,6 +1826,11 @@
+@@ -1406,6 +1826,11 @@ reswitch: switch (ch) {
if (flags & LONGINT)
ADDTYPE(T_WINT);
else
ADDTYPE(T_INT);
break;
case 'D':
-@@ -1413,6 +1838,11 @@
+@@ -1413,6 +1838,11 @@ reswitch: switch (ch) {
/*FALLTHROUGH*/
case 'd':
case 'i':
ADDSARG();
break;
#ifndef NO_FLOATING_POINT
-@@ -1421,8 +1851,14 @@
+@@ -1421,8 +1851,14 @@ reswitch: switch (ch) {
case 'e':
case 'E':
case 'f':
if (flags & LONGDBL)
ADDTYPE(T_LONG_DOUBLE);
else
-@@ -1451,9 +1887,19 @@
+@@ -1451,9 +1887,19 @@ reswitch: switch (ch) {
flags |= LONGINT;
/*FALLTHROUGH*/
case 'o':
ADDTYPE(TP_VOID);
break;
case 'S':
-@@ -1471,6 +1917,11 @@
+@@ -1471,6 +1917,11 @@ reswitch: switch (ch) {
case 'u':
case 'X':
case 'x':
ADDUARG();
break;
default: /* "%?" prints ?, unless ? is NUL */
-@@ -1537,7 +1988,7 @@
+@@ -1537,7 +1988,7 @@ done:
(*argtable) [n].sizearg = va_arg (ap, size_t);
break;
case TP_SIZET:
break;
case T_INTMAXT:
(*argtable) [n].intmaxarg = va_arg (ap, intmax_t);
-@@ -1556,6 +2007,11 @@
+@@ -1556,6 +2007,11 @@ done:
(*argtable) [n].longdoublearg = va_arg (ap, long double);
break;
#endif
case TP_CHAR:
(*argtable) [n].pchararg = va_arg (ap, char *);
break;
+@@ -1590,12 +2046,12 @@ __grow_type_table (int nextarg, enum typ
+ newsize = nextarg + 1;
+ if (oldsize == STATIC_ARG_TBL_SIZE) {
+ if ((newtable = malloc(newsize * sizeof(enum typeid))) == NULL)
+- abort(); /* XXX handle better */
++ LIBC_ABORT("malloc: %s", strerror(errno)); /* XXX handle better */
+ bcopy(oldtable, newtable, oldsize * sizeof(enum typeid));
+ } else {
+ newtable = reallocf(oldtable, newsize * sizeof(enum typeid));
+ if (newtable == NULL)
+- abort(); /* XXX handle better */
++ LIBC_ABORT("reallocf: %s", strerror(errno)); /* XXX handle better */
+ }
+ for (n = oldsize; n < newsize; n++)
+ newtable[n] = T_UNUSED;
---- vfscanf.c.orig 2004-11-25 11:38:35.000000000 -0800
-+++ vfscanf.c 2005-05-20 00:46:37.000000000 -0700
-@@ -40,6 +40,8 @@
+--- vfscanf.c.orig 2009-02-15 03:11:22.000000000 -0800
++++ vfscanf.c 2009-02-16 00:22:06.000000000 -0800
+@@ -40,6 +40,8 @@ static char sccsid[] = "@(#)vfscanf.c 8.
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/stdio/vfscanf.c,v 1.37 2004/05/02 10:55:05 das Exp $");
#include "namespace.h"
#include <ctype.h>
#include <inttypes.h>
-@@ -97,10 +99,21 @@
+@@ -50,6 +52,7 @@ __FBSDID("$FreeBSD: src/lib/libc/stdio/v
+ #include <string.h>
+ #include <wchar.h>
+ #include <wctype.h>
++#include <pthread.h>
+ #include "un-namespace.h"
+
+ #include "collate.h"
+@@ -97,10 +100,21 @@ __FBSDID("$FreeBSD: src/lib/libc/stdio/v
#define CT_INT 3 /* %[dioupxX] conversion */
#define CT_FLOAT 4 /* %[efgEFG] conversion */
__weak_reference(__vfscanf, vfscanf);
-@@ -108,12 +121,24 @@
+@@ -108,12 +122,24 @@ __weak_reference(__vfscanf, vfscanf);
* __vfscanf - MT-safe version
*/
int
-__vfscanf(FILE *fp, char const *fmt0, va_list ap)
+__vfscanf(FILE * __restrict fp, char const * __restrict fmt0, va_list ap)
-+{
-+ int ret;
-+
-+ FLOCKFILE(fp);
+ {
+ int ret;
+
+ FLOCKFILE(fp);
+- ret = __svfscanf(fp, fmt0, ap);
+ ret = __svfscanf_l(fp, __current_locale(), fmt0, ap);
+ FUNLOCKFILE(fp);
+ return (ret);
+
+int
+vfscanf_l(FILE * __restrict fp, locale_t loc, char const * __restrict fmt0, va_list ap)
- {
- int ret;
-
++{
++ int ret;
++
+ NORMALIZE_LOCALE(loc);
- FLOCKFILE(fp);
-- ret = __svfscanf(fp, fmt0, ap);
++ FLOCKFILE(fp);
+ ret = __svfscanf_l(fp, loc, fmt0, ap);
FUNLOCKFILE(fp);
return (ret);
}
-@@ -121,8 +146,8 @@
+@@ -121,8 +147,8 @@ __vfscanf(FILE *fp, char const *fmt0, va
/*
* __svfscanf - non-MT-safe version of __vfscanf
*/
{
const u_char *fmt = (const u_char *)fmt0;
int c; /* character from format, or conversion */
-@@ -132,7 +157,6 @@
+@@ -132,7 +158,6 @@ __svfscanf(FILE *fp, const char *fmt0, v
int flags; /* flags as defined above */
char *p0; /* saves original value of p when necessary */
int nassigned; /* number of fields assigned */
int nread; /* number of characters consumed from fp */
int base; /* base argument to conversion function */
char ccltab[256]; /* character class table for %[...] */
-@@ -140,24 +164,29 @@
+@@ -140,29 +165,37 @@ __svfscanf(FILE *fp, const char *fmt0, v
wchar_t *wcp; /* handy wide character pointer */
wchar_t *wcp0; /* saves original value of wcp */
size_t nconv; /* length of multibyte sequence converted */
nread++, fp->_r--, fp->_p++;
continue;
}
-@@ -181,6 +210,18 @@
+- if (c != '%')
++ if (c != '%') {
++ if (fp->_r <= 0 && __srefill(fp))
++ goto input_failure;
+ goto literal;
++ }
+ width = 0;
+ flags = 0;
+ /*
+@@ -172,15 +205,35 @@ __svfscanf(FILE *fp, const char *fmt0, v
+ again: c = *fmt++;
+ switch (c) {
+ case '%':
++ /* Consume leading white space */
++ for(;;) {
++ if (fp->_r <= 0 && __srefill(fp))
++ goto input_failure;
++ if (!isspace_l(*fp->_p, loc))
++ break;
++ nread++;
++ fp->_r--;
++ fp->_p++;
++ }
+ literal:
+- if (fp->_r <= 0 && __srefill(fp))
+- goto input_failure;
+ if (*fp->_p != c)
+ goto match_failure;
+ fp->_r--, fp->_p++;
nread++;
continue;
case '*':
flags |= SUPPRESS;
goto again;
-@@ -267,7 +308,7 @@
+@@ -267,7 +320,7 @@ literal:
break;
case '[':
flags |= NOSKIP;
c = CT_CCL;
break;
-@@ -288,7 +329,6 @@
+@@ -288,7 +341,6 @@ literal:
break;
case 'n':
if (flags & SUPPRESS) /* ??? */
continue;
if (flags & SHORTSHORT)
-@@ -330,7 +370,7 @@
+@@ -330,7 +382,7 @@ literal:
* that suppress this.
*/
if ((flags & NOSKIP) == 0) {
nread++;
if (--fp->_r > 0)
fp->_p++;
-@@ -360,7 +400,7 @@
+@@ -360,7 +412,7 @@ literal:
wcp = NULL;
n = 0;
while (width != 0) {
fp->_flags |= __SERR;
goto input_failure;
}
-@@ -368,7 +408,7 @@
+@@ -368,7 +420,7 @@ literal:
fp->_p++;
fp->_r--;
mbs = initial;
if (nconv == (size_t)-1) {
fp->_flags |= __SERR;
goto input_failure;
-@@ -421,7 +461,6 @@
+@@ -421,7 +473,6 @@ literal:
nread += r;
nassigned++;
}
break;
case CT_CCL:
-@@ -440,7 +479,7 @@
+@@ -440,7 +491,7 @@ literal:
n = 0;
nchars = 0;
while (width != 0) {
fp->_flags |= __SERR;
goto input_failure;
}
-@@ -448,7 +487,7 @@
+@@ -448,7 +499,7 @@ literal:
fp->_p++;
fp->_r--;
mbs = initial;
if (nconv == (size_t)-1) {
fp->_flags |= __SERR;
goto input_failure;
-@@ -456,8 +495,8 @@
+@@ -456,8 +507,8 @@ literal:
if (nconv == 0)
*wcp = L'\0';
if (nconv != (size_t)-2) {
while (n != 0) {
n--;
__ungetc(buf[n],
-@@ -525,7 +564,6 @@
+@@ -525,7 +576,6 @@ literal:
nassigned++;
}
nread += n;
break;
case CT_STRING:
-@@ -540,8 +578,8 @@
+@@ -540,8 +590,8 @@ literal:
else
wcp = &twc;
n = 0;
fp->_flags |= __SERR;
goto input_failure;
}
-@@ -549,7 +587,7 @@
+@@ -549,7 +599,7 @@ literal:
fp->_p++;
fp->_r--;
mbs = initial;
if (nconv == (size_t)-1) {
fp->_flags |= __SERR;
goto input_failure;
-@@ -557,7 +595,7 @@
+@@ -557,7 +607,7 @@ literal:
if (nconv == 0)
*wcp = L'\0';
if (nconv != (size_t)-2) {
while (n != 0) {
n--;
__ungetc(buf[n],
-@@ -585,7 +623,7 @@
+@@ -585,7 +635,7 @@ literal:
}
} else if (flags & SUPPRESS) {
n = 0;
n++, fp->_r--, fp->_p++;
if (--width == 0)
break;
-@@ -595,7 +633,7 @@
+@@ -595,7 +645,7 @@ literal:
nread += n;
} else {
p0 = p = va_arg(ap, char *);
fp->_r--;
*p++ = *fp->_p++;
if (--width == 0)
-@@ -607,7 +645,6 @@
+@@ -607,7 +657,6 @@ literal:
nread += p - p0;
nassigned++;
}
continue;
case CT_INT:
-@@ -738,9 +775,9 @@
+@@ -738,9 +787,9 @@ literal:
*p = 0;
if ((flags & UNSIGNED) == 0)
if (flags & POINTER)
*va_arg(ap, void **) =
(void *)(uintptr_t)res;
-@@ -763,43 +800,48 @@
+@@ -763,43 +812,48 @@ literal:
nassigned++;
}
nread += p - buf;
*va_arg(ap, float *) = res;
}
- if (__scanfdebug && p - buf != width)
+- abort();
+ if (__scanfdebug && p - pbuf != width)
- abort();
++ LIBC_ABORT("p - pbuf %ld != width %ld", (long)(p - pbuf), width);
nassigned++;
}
nread += width;
/*
* Fill in the given table from the scanset at the given format
* (just after `['). Return a pointer to the character past the
-@@ -807,9 +849,10 @@
+@@ -807,9 +861,10 @@ match_failure:
* considered part of the scanset.
*/
static const u_char *
{
int c, n, v, i;
-@@ -845,6 +888,7 @@
+@@ -845,6 +900,7 @@ doswitch:
return (fmt - 1);
case '-':
/*
* A scanset of the form
* [01+-]
-@@ -865,8 +909,8 @@
+@@ -865,8 +921,8 @@ doswitch:
*/
n = *fmt;
if (n == ']'
)
) {
c = '-';
-@@ -874,14 +918,14 @@
+@@ -874,14 +930,14 @@ doswitch:
}
fmt++;
/* fill in the range */
)
tab[i] = v;
}
-@@ -901,7 +945,7 @@
+@@ -901,7 +957,7 @@ doswitch:
return (fmt);
#endif
break;
case ']': /* end of scanset */
return (fmt);
-@@ -915,18 +959,42 @@
+@@ -914,19 +970,75 @@ doswitch:
+ }
#ifndef NO_FLOATING_POINT
++/*
++ * Maintain a per-thread parsefloat buffer, shared by __svfscanf_l and
++ * __vfwscanf.
++ */
++#ifdef BUILDING_VARIANT
++extern char *__parsefloat_buf(size_t s);
++#else /* !BUILDING_VARIANT */
++__private_extern__ char *
++__parsefloat_buf(size_t s)
++{
++ char *b;
++ static pthread_key_t parsefloat_tsd_key = (pthread_key_t)-1;
++ static pthread_mutex_t parsefloat_tsd_lock = PTHREAD_MUTEX_INITIALIZER;
++ static size_t bsiz = 0;
++
++ if (parsefloat_tsd_key == (pthread_key_t)-1) {
++ pthread_mutex_lock(&parsefloat_tsd_lock);
++ if (parsefloat_tsd_key == (pthread_key_t)-1) {
++ parsefloat_tsd_key = __LIBC_PTHREAD_KEY_PARSEFLOAT;
++ pthread_key_init_np(parsefloat_tsd_key, free);
++ }
++ pthread_mutex_unlock(&parsefloat_tsd_lock);
++ }
++ if ((b = (char *)pthread_getspecific(parsefloat_tsd_key)) == NULL) {
++ bsiz = s > BUF ? s : BUF;
++ b = (char *)malloc(bsiz);
++ if (b == NULL) {
++ bsiz = 0;
++ return NULL;
++ }
++ pthread_setspecific(parsefloat_tsd_key, b);
++ return b;
++ }
++ if (s > bsiz) {
++ b = (char *)reallocf(b, s);
++ pthread_setspecific(parsefloat_tsd_key, b);
++ if (b == NULL) {
++ bsiz = 0;
++ return NULL;
++ }
++ bsiz = s;
++ }
++ return b;
++}
++#endif /* BUILDING_VARIANT */
++
static int
-parsefloat(FILE *fp, char *buf, char *end)
+parsefloat(FILE *fp, char **buf, size_t width, locale_t loc)
+ char *decpt_start;
_Bool gotmantdig = 0, ishex = 0;
-
-+ static char *b = NULL;
-+ static size_t bsiz = 0;
++ char *b;
+ char *e;
+ size_t s;
+
-+ if (bsiz = 0) {
-+ b = (char *)malloc(BUF);
-+ if (b == NULL) {
-+ *buf = NULL;
-+ return 0;
-+ }
-+ bsiz = BUF;
-+ }
+ s = (width == 0 ? BUF : (width + 1));
-+ if (s > bsiz) {
-+ b = (char *)reallocf(b, s);
-+ if (b == NULL) {
-+ bsiz = 0;
-+ *buf = NULL;
-+ return 0;
-+ }
-+ bsiz = s;
++ if ((b = __parsefloat_buf(s)) == NULL) {
++ *buf = NULL;
++ return 0;
+ }
+ e = b + (s - 1);
/*
* We set commit = p whenever the string we have read so far
* constitutes a valid representation of a floating point
-@@ -936,8 +1004,8 @@
+@@ -936,8 +1048,8 @@ parsefloat(FILE *fp, char *buf, char *en
* always necessary to read at least one character that doesn't
* match; thus, we can't short-circuit "infinity" or "nan(...)".
*/
c = *fp->_p;
reswitch:
switch (state) {
-@@ -997,7 +1065,7 @@
+@@ -997,7 +1109,7 @@ reswitch:
if (c == ')') {
commit = p;
infnanpos = -2;
goto parsedone;
break;
}
-@@ -1013,16 +1081,33 @@
+@@ -1013,16 +1125,33 @@ reswitch:
goto reswitch;
}
case S_DIGITS:
case S_FRAC:
if (((c == 'E' || c == 'e') && !ishex) ||
((c == 'P' || c == 'p') && ishex)) {
-@@ -1030,7 +1115,7 @@
+@@ -1030,7 +1159,7 @@ reswitch:
goto parsedone;
else
state = S_EXP;
commit = p;
gotmantdig = 1;
} else
-@@ -1043,7 +1128,7 @@
+@@ -1043,13 +1172,26 @@ reswitch:
else
goto reswitch;
case S_EXPDIGITS:
commit = p;
else
goto parsedone;
-@@ -1051,6 +1136,21 @@
+ break;
default:
- abort();
- }
+- abort();
++ LIBC_ABORT("unknown state %d", state);
++ }
+ if (p >= e) {
+ ssize_t diff = (p - b);
+ ssize_t com = (commit - b);
+ s += BUF;
-+ b = (char *)reallocf(b, s);
++ b = __parsefloat_buf(s);
+ if (b == NULL) {
-+ bsiz = 0;
+ *buf = NULL;
+ return 0;
+ }
-+ bsiz = s;
+ e = b + (s - 1);
+ p = b + diff;
+ commit = b + com;
-+ }
+ }
*p++ = c;
if (--fp->_r > 0)
- fp->_p++;
-@@ -1062,6 +1162,7 @@
+@@ -1062,6 +1204,7 @@ parsedone:
while (commit < --p)
__ungetc(*(u_char *)p, fp);
*++commit = '\0';
---- vfwprintf.c.orig 2006-10-01 00:03:16.000000000 -0700
-+++ vfwprintf.c 2006-10-01 00:21:40.000000000 -0700
-@@ -42,6 +42,8 @@
+--- vfwprintf.c.orig 2008-09-07 11:37:54.000000000 -0700
++++ vfwprintf.c 2008-09-07 17:47:18.000000000 -0700
+@@ -42,6 +42,8 @@ static char sccsid[] = "@(#)vfprintf.c 8
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/stdio/vfwprintf.c,v 1.23 2004/08/26 06:25:28 des Exp $");
/*
* Actual wprintf innards.
*
-@@ -63,12 +65,20 @@
+@@ -63,12 +65,20 @@ __FBSDID("$FreeBSD: src/lib/libc/stdio/v
#include <string.h>
#include <wchar.h>
#include <wctype.h>
union arg {
int intarg;
u_int uintarg;
-@@ -96,6 +106,21 @@
+@@ -96,6 +106,21 @@ union arg {
#endif
wint_t wintarg;
wchar_t *pwchararg;
};
/*
-@@ -106,16 +131,20 @@
+@@ -106,16 +131,20 @@ enum typeid {
T_LONG, T_U_LONG, TP_LONG, T_LLONG, T_U_LLONG, TP_LLONG,
T_PTRDIFFT, TP_PTRDIFFT, T_SIZET, TP_SIZET,
T_INTMAXT, T_UINTMAXT, TP_INTMAXT, TP_VOID, TP_CHAR, TP_SCHAR,
static void __find_arguments(const wchar_t *, va_list, union arg **);
static void __grow_type_table(int, enum typeid **, int *);
-@@ -125,7 +154,7 @@
+@@ -125,7 +154,7 @@ static void __grow_type_table(int, enum
* worries about ungetc buffers and so forth.
*/
static int
{
int ret;
FILE fake;
-@@ -144,7 +173,7 @@
+@@ -144,7 +173,7 @@ __sbprintf(FILE *fp, const wchar_t *fmt,
fake._lbfsize = 0; /* not actually used, but Just In Case */
/* do the work, then copy any error status */
if (ret >= 0 && __fflush(&fake))
ret = WEOF;
if (fake._flags & __SERR)
-@@ -157,7 +186,7 @@
+@@ -157,7 +186,7 @@ __sbprintf(FILE *fp, const wchar_t *fmt,
* File must already be locked.
*/
static wint_t
{
static const mbstate_t initial;
mbstate_t mbs;
-@@ -167,10 +196,10 @@
+@@ -167,10 +196,10 @@ __xfputwc(wchar_t wc, FILE *fp)
size_t len;
if ((fp->_flags & __SSTR) == 0)
fp->_flags |= __SERR;
return (WEOF);
}
-@@ -350,13 +379,14 @@
+@@ -266,7 +295,7 @@ __ultoa(u_long val, wchar_t *endp, int b
+ break;
+
+ default: /* oops */
+- abort();
++ LIBC_ABORT("base = %d", base);
+ }
+ return (cp);
+ }
+@@ -338,7 +367,7 @@ __ujtoa(uintmax_t val, wchar_t *endp, in
+ break;
+
+ default:
+- abort();
++ LIBC_ABORT("base = %d", base);
+ }
+ return (cp);
+ }
+@@ -350,13 +379,14 @@ __ujtoa(uintmax_t val, wchar_t *endp, in
* that the multibyte char. string ends in a null character.
*/
static wchar_t *
if (mbsarg == NULL)
return (NULL);
-@@ -374,7 +404,7 @@
+@@ -374,7 +404,7 @@ __mbsconv(char *mbsarg, int prec)
insize = nchars = 0;
mbs = initial;
while (nchars != (size_t)prec) {
if (nconv == 0 || nconv == (size_t)-1 ||
nconv == (size_t)-2)
break;
-@@ -399,7 +429,7 @@
+@@ -399,7 +429,7 @@ __mbsconv(char *mbsarg, int prec)
p = mbsarg;
mbs = initial;
while (insize != 0) {
if (nconv == 0 || nconv == (size_t)-1 || nconv == (size_t)-2)
break;
wcp++;
-@@ -418,6 +448,8 @@
+@@ -418,6 +448,8 @@ __mbsconv(char *mbsarg, int prec)
/*
* MT-safe version
*/
int
vfwprintf(FILE * __restrict fp, const wchar_t * __restrict fmt0, va_list ap)
-@@ -425,7 +457,21 @@
+@@ -425,7 +457,21 @@ vfwprintf(FILE * __restrict fp, const wc
int ret;
FLOCKFILE(fp);
FUNLOCKFILE(fp);
return (ret);
}
-@@ -474,12 +520,15 @@
+@@ -474,12 +520,15 @@ static int exponent(wchar_t *, int, wcha
#define PTRDIFFT 0x800 /* ptrdiff_t */
#define INTMAXT 0x1000 /* intmax_t */
#define CHARINT 0x2000 /* print char using int format */
{
wchar_t *fmt; /* format string */
wchar_t ch; /* character from fmt */
-@@ -507,7 +556,8 @@
+@@ -507,7 +556,8 @@ __vfwprintf(FILE *fp, const wchar_t *fmt
* D: expchar holds this character; '\0' if no exponent, e.g. %f
* F: at least two digits for decimal, at least one digit for hex
*/
int signflag; /* true if float is negative */
union { /* floating point arguments %[aAeEfFgG] */
double dbl;
-@@ -524,6 +574,11 @@
+@@ -524,6 +574,11 @@ __vfwprintf(FILE *fp, const wchar_t *fmt
int nseps; /* number of group separators with ' */
int nrepeats; /* number of repeats of the last group */
#endif
u_long ulval; /* integer arguments %[diouxX] */
uintmax_t ujval; /* %j, %ll, %q, %t, %z integers */
int base; /* base for [diouxX] conversion */
-@@ -560,7 +615,7 @@
+@@ -560,7 +615,7 @@ __vfwprintf(FILE *fp, const wchar_t *fmt
*/
#define PRINT(ptr, len) do { \
for (n3 = 0; n3 < (len); n3++) \
} while (0)
#define PAD(howmany, with) do { \
if ((n = (howmany)) > 0) { \
-@@ -606,7 +661,7 @@
+@@ -606,13 +661,13 @@ __vfwprintf(FILE *fp, const wchar_t *fmt
#define INTMAX_SIZE (INTMAXT|SIZET|PTRDIFFT|LLONGINT)
#define SJARG() \
(flags&INTMAXT ? GETARG(intmax_t) : \
flags&PTRDIFFT ? (intmax_t)GETARG(ptrdiff_t) : \
(intmax_t)GETARG(long long))
#define UJARG() \
-@@ -640,21 +695,24 @@
+ (flags&INTMAXT ? GETARG(uintmax_t) : \
+ flags&SIZET ? (uintmax_t)GETARG(size_t) : \
+- flags&PTRDIFFT ? (uintmax_t)GETARG(ptrdiff_t) : \
++ flags&PTRDIFFT ? (uintmax_t)(unsigned)GETARG(ptrdiff_t) : \
+ (uintmax_t)GETARG(unsigned long long))
+
+ /*
+@@ -640,21 +695,24 @@ __vfwprintf(FILE *fp, const wchar_t *fmt
val = GETARG (int); \
}
fmt = (wchar_t *)fmt0;
argtable = NULL;
-@@ -678,6 +736,9 @@
+@@ -678,6 +736,9 @@ __vfwprintf(FILE *fp, const wchar_t *fmt
}
if (ch == '\0')
goto done;
fmt++; /* skip over '%' */
flags = 0;
-@@ -686,6 +747,9 @@
+@@ -686,6 +747,9 @@ __vfwprintf(FILE *fp, const wchar_t *fmt
prec = -1;
sign = '\0';
ox[1] = '\0';
rflag: ch = *fmt++;
reswitch: switch (ch) {
-@@ -701,6 +765,11 @@
+@@ -701,6 +765,11 @@ reswitch: switch (ch) {
case '#':
flags |= ALT;
goto rflag;
case '*':
/*-
* ``A negative field width argument is taken as a
-@@ -721,8 +790,8 @@
+@@ -721,8 +790,8 @@ reswitch: switch (ch) {
goto rflag;
case '\'':
flags |= GROUPING;
goto rflag;
case '.':
if ((ch = *fmt++) == '*') {
-@@ -796,10 +865,14 @@
+@@ -796,10 +865,14 @@ reswitch: switch (ch) {
flags |= LONGINT;
/*FALLTHROUGH*/
case 'c':
size = 1;
sign = '\0';
break;
-@@ -808,6 +881,10 @@
+@@ -808,6 +881,10 @@ reswitch: switch (ch) {
/*FALLTHROUGH*/
case 'd':
case 'i':
if (flags & INTMAX_SIZE) {
ujval = SJARG();
if ((intmax_t)ujval < 0) {
-@@ -826,6 +903,12 @@
+@@ -826,6 +903,12 @@ reswitch: switch (ch) {
#ifndef NO_FLOATING_POINT
case 'a':
case 'A':
if (ch == 'a') {
ox[1] = 'x';
xdigs = xdigs_lower;
-@@ -837,6 +920,12 @@
+@@ -837,6 +920,12 @@ reswitch: switch (ch) {
}
if (prec >= 0)
prec++;
if (flags & LONGDBL) {
fparg.ldbl = GETARG(long double);
dtoaresult =
-@@ -848,6 +937,7 @@
+@@ -848,6 +937,7 @@ reswitch: switch (ch) {
__hdtoa(fparg.dbl, xdigs, prec,
&expt, &signflag, &dtoaend);
}
if (prec < 0)
prec = dtoaend - dtoaresult;
if (expt == INT_MAX)
-@@ -855,11 +945,17 @@
+@@ -855,11 +945,17 @@ reswitch: switch (ch) {
if (convbuf != NULL)
free(convbuf);
ndig = dtoaend - dtoaresult;
expchar = ch;
if (prec < 0) /* account for digit before decpt */
prec = DEFPREC + 1;
-@@ -868,10 +964,22 @@
+@@ -868,10 +964,22 @@ reswitch: switch (ch) {
goto fp_begin;
case 'f':
case 'F':
expchar = ch - ('g' - 'e');
if (prec == 0)
prec = 1;
-@@ -880,6 +988,14 @@
+@@ -880,6 +988,14 @@ fp_begin:
prec = DEFPREC;
if (convbuf != NULL)
free(convbuf);
if (flags & LONGDBL) {
fparg.ldbl = GETARG(long double);
dtoaresult =
-@@ -893,8 +1009,9 @@
+@@ -893,8 +1009,9 @@ fp_begin:
if (expt == 9999)
expt = INT_MAX;
}
freedtoa(dtoaresult);
fp_common:
if (signflag)
-@@ -989,6 +1106,10 @@
+@@ -989,6 +1106,10 @@ fp_common:
flags |= LONGINT;
/*FALLTHROUGH*/
case 'o':
if (flags & INTMAX_SIZE)
ujval = UJARG();
else
-@@ -1003,6 +1124,10 @@
+@@ -1003,6 +1124,10 @@ fp_common:
* defined manner.''
* -- ANSI X3J11
*/
ujval = (uintmax_t)(uintptr_t)GETARG(void *);
base = 16;
xdigs = xdigs_lower;
-@@ -1024,7 +1149,7 @@
+@@ -1024,7 +1149,7 @@ fp_common:
if ((mbp = GETARG(char *)) == NULL)
cp = L"(null)";
else {
if (convbuf == NULL) {
fp->_flags |= __SERR;
goto error;
-@@ -1055,6 +1180,10 @@
+@@ -1055,6 +1180,10 @@ fp_common:
flags |= LONGINT;
/*FALLTHROUGH*/
case 'u':
if (flags & INTMAX_SIZE)
ujval = UJARG();
else
-@@ -1067,6 +1196,10 @@
+@@ -1067,6 +1196,10 @@ fp_common:
case 'x':
xdigs = xdigs_lower;
hex:
if (flags & INTMAX_SIZE)
ujval = UJARG();
else
-@@ -1092,6 +1225,7 @@
+@@ -1092,6 +1225,7 @@ number: if ((dprec = prec) >= 0)
* ``The result of converting a zero value with an
* explicit precision of zero is no characters.''
* -- ANSI X3J11
*/
cp = buf + BUF;
if (flags & INTMAX_SIZE) {
-@@ -1101,7 +1235,7 @@
+@@ -1101,7 +1235,7 @@ number: if ((dprec = prec) >= 0)
flags & GROUPING, thousands_sep,
grouping);
} else {
cp = __ultoa(ulval, cp, base,
flags & ALT, xdigs,
flags & GROUPING, thousands_sep,
-@@ -1111,6 +1245,11 @@
+@@ -1109,8 +1243,13 @@ number: if ((dprec = prec) >= 0)
+ }
+ size = buf + BUF - cp;
if (size > BUF) /* should never happen */
- abort();
+- abort();
++ LIBC_ABORT("size %d > BUF %d", size, BUF);
break;
+#ifdef VECTORS
+ case 'v':
default: /* "%?" prints ?, unless ? is NUL */
if (ch == '\0')
goto done;
-@@ -1122,6 +1261,288 @@
+@@ -1122,6 +1261,288 @@ number: if ((dprec = prec) >= 0)
break;
}
/*
* All reasonable formats wind up here. At this point, `cp'
* points to a string which (if not flags&LADJUST) should be
-@@ -1177,7 +1598,7 @@
+@@ -1177,7 +1598,7 @@ number: if ((dprec = prec) >= 0)
if (expt <= 0) {
PRINT(zeroes, 1);
if (prec || flags & ALT)
PAD(-expt, zeroes);
/* already handled initial 0's */
prec += expt;
-@@ -1203,15 +1624,14 @@
+@@ -1203,15 +1624,14 @@ number: if ((dprec = prec) >= 0)
cp = convbuf + ndig;
}
if (prec || flags & ALT) {
PRINT(buf, 2);
PRINT(cp, ndig-1);
PAD(prec - ndig, zeroes);
-@@ -1401,6 +1821,11 @@
+@@ -1401,6 +1821,11 @@ reswitch: switch (ch) {
if (flags & LONGINT)
ADDTYPE(T_WINT);
else
ADDTYPE(T_INT);
break;
case 'D':
-@@ -1408,6 +1833,11 @@
+@@ -1408,6 +1833,11 @@ reswitch: switch (ch) {
/*FALLTHROUGH*/
case 'd':
case 'i':
ADDSARG();
break;
#ifndef NO_FLOATING_POINT
-@@ -1416,8 +1846,14 @@
+@@ -1416,8 +1846,14 @@ reswitch: switch (ch) {
case 'e':
case 'E':
case 'f':
if (flags & LONGDBL)
ADDTYPE(T_LONG_DOUBLE);
else
-@@ -1446,9 +1882,19 @@
+@@ -1446,9 +1882,19 @@ reswitch: switch (ch) {
flags |= LONGINT;
/*FALLTHROUGH*/
case 'o':
ADDTYPE(TP_VOID);
break;
case 'S':
-@@ -1466,6 +1912,11 @@
+@@ -1466,6 +1912,11 @@ reswitch: switch (ch) {
case 'u':
case 'X':
case 'x':
ADDUARG();
break;
default: /* "%?" prints ?, unless ? is NUL */
-@@ -1532,7 +1983,7 @@
+@@ -1532,7 +1983,7 @@ done:
(*argtable) [n].sizearg = va_arg (ap, size_t);
break;
case TP_SIZET:
break;
case T_INTMAXT:
(*argtable) [n].intmaxarg = va_arg (ap, intmax_t);
-@@ -1551,6 +2002,11 @@
+@@ -1551,6 +2002,11 @@ done:
(*argtable) [n].longdoublearg = va_arg (ap, long double);
break;
#endif
case TP_CHAR:
(*argtable) [n].pchararg = va_arg (ap, char *);
break;
+@@ -1585,12 +2041,12 @@ __grow_type_table (int nextarg, enum typ
+ newsize = nextarg + 1;
+ if (oldsize == STATIC_ARG_TBL_SIZE) {
+ if ((newtable = malloc(newsize * sizeof(enum typeid))) == NULL)
+- abort(); /* XXX handle better */
++ LIBC_ABORT("malloc: %s", strerror(errno)); /* XXX handle better */
+ bcopy(oldtable, newtable, oldsize * sizeof(enum typeid));
+ } else {
+ newtable = reallocf(oldtable, newsize * sizeof(enum typeid));
+ if (newtable == NULL)
+- abort(); /* XXX handle better */
++ LIBC_ABORT("reallocf: %s", strerror(errno)); /* XXX handle better */
+ }
+ for (n = oldsize; n < newsize; n++)
+ newtable[n] = T_UNUSED;
---- vfwscanf.c.orig 2004-11-25 11:38:36.000000000 -0800
-+++ vfwscanf.c 2005-05-20 00:24:42.000000000 -0700
-@@ -42,6 +42,8 @@
+--- vfwscanf.c.orig 2009-02-15 03:11:22.000000000 -0800
++++ vfwscanf.c 2009-02-16 00:10:06.000000000 -0800
+@@ -42,6 +42,8 @@ static char sccsid[] = "@(#)vfscanf.c 8.
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/lib/libc/stdio/vfwscanf.c,v 1.12 2004/05/02 20:13:29 obrien Exp $");
#include "namespace.h"
#include <ctype.h>
#include <inttypes.h>
-@@ -98,7 +100,9 @@
+@@ -98,7 +100,9 @@ __FBSDID("$FreeBSD: src/lib/libc/stdio/v
#define CT_INT 3 /* %[dioupxX] conversion */
#define CT_FLOAT 4 /* %[efgEFG] conversion */
extern int __scanfdebug;
-@@ -116,7 +120,21 @@
+@@ -116,7 +120,21 @@ vfwscanf(FILE * __restrict fp, const wch
FLOCKFILE(fp);
ORIENT(fp, 1);
FUNLOCKFILE(fp);
return (ret);
}
-@@ -124,8 +142,9 @@
+@@ -124,8 +142,9 @@ vfwscanf(FILE * __restrict fp, const wch
/*
* Non-MT-safe version.
*/
{
wint_t c; /* character from format, or conversion */
size_t width; /* field width, or 0 */
-@@ -134,7 +153,6 @@
+@@ -134,7 +153,6 @@ __vfwscanf(FILE * __restrict fp, const w
int flags; /* flags as defined above */
wchar_t *p0; /* saves original value of p when necessary */
int nassigned; /* number of fields assigned */
int nread; /* number of characters consumed from fp */
int base; /* base argument to conversion function */
wchar_t buf[BUF]; /* buffer for numeric conversions */
-@@ -145,27 +163,30 @@
+@@ -145,31 +163,37 @@ __vfwscanf(FILE * __restrict fp, const w
char *mbp; /* multibyte string pointer for %c %s %[ */
size_t nconv; /* number of bytes in mb. conversion */
char mbbuf[MB_LEN_MAX]; /* temporary mb. character buffer */
+ __ungetwc(c, fp, loc);
continue;
}
- if (c != '%')
-@@ -180,15 +201,27 @@
+- if (c != '%')
++ if (c != '%') {
++ if ((wi = __fgetwc(fp, loc)) == WEOF)
++ goto input_failure;
+ goto literal;
++ }
+ width = 0;
+ flags = 0;
+ /*
+@@ -179,16 +203,34 @@ __vfwscanf(FILE * __restrict fp, const w
+ again: c = *fmt++;
switch (c) {
case '%':
++ /* Consume leading white space */
++ for(;;) {
++ if ((wi = __fgetwc(fp, loc)) == WEOF)
++ goto input_failure;
++ if (!iswspace_l(wi, loc))
++ break;
++ nread++;
++ }
literal:
- if ((wi = __fgetwc(fp)) == WEOF)
-+ if ((wi = __fgetwc(fp, loc)) == WEOF)
- goto input_failure;
+- goto input_failure;
if (wi != c) {
- __ungetwc(wi, fp);
- goto input_failure;
case '*':
flags |= SUPPRESS;
goto again;
-@@ -307,7 +340,6 @@
+@@ -307,7 +349,6 @@ literal:
break;
case 'n':
if (flags & SUPPRESS) /* ??? */
continue;
if (flags & SHORTSHORT)
-@@ -343,11 +375,11 @@
+@@ -343,11 +384,11 @@ literal:
* that suppress this.
*/
if ((flags & NOSKIP) == 0) {
}
/*
-@@ -364,7 +396,7 @@
+@@ -364,7 +405,7 @@ literal:
p = va_arg(ap, wchar_t *);
n = 0;
while (width-- != 0 &&
if (!(flags & SUPPRESS))
*p++ = (wchar_t)wi;
n++;
-@@ -380,19 +412,19 @@
+@@ -380,19 +421,19 @@ literal:
n = 0;
mbs = initial;
while (width != 0 &&
break;
}
if (!(flags & SUPPRESS))
-@@ -410,7 +442,6 @@
+@@ -410,7 +451,6 @@ literal:
if (!(flags & SUPPRESS))
nassigned++;
}
break;
case CT_CCL:
-@@ -420,20 +451,20 @@
+@@ -420,20 +460,20 @@ literal:
/* take only those things in the class */
if ((flags & SUPPRESS) && (flags & LONG)) {
n = 0;
n = p - p0;
if (n == 0)
goto match_failure;
-@@ -444,16 +475,16 @@
+@@ -444,16 +484,16 @@ literal:
mbp = va_arg(ap, char *);
n = 0;
mbs = initial;
if (nconv == (size_t)-1)
goto input_failure;
if (nconv > width)
-@@ -468,14 +499,15 @@
+@@ -468,14 +508,15 @@ literal:
n++;
}
if (wi != WEOF)
break;
case CT_STRING:
-@@ -483,39 +515,39 @@
+@@ -483,39 +524,39 @@ literal:
if (width == 0)
width = (size_t)~0;
if ((flags & SUPPRESS) && (flags & LONG)) {
if (nconv == (size_t)-1)
goto input_failure;
if (nconv > width)
-@@ -530,13 +562,12 @@
+@@ -530,13 +571,12 @@ literal:
nread++;
}
if (wi != WEOF)
continue;
case CT_INT:
-@@ -546,7 +577,7 @@
+@@ -546,7 +586,7 @@ literal:
width = sizeof(buf) / sizeof(*buf) - 1;
flags |= SIGNOK | NDIGITS | NZDIGITS;
for (p = buf; width; width--) {
/*
* Switch on the character; `goto ok'
* if we accept it as a part of number.
-@@ -630,7 +661,7 @@
+@@ -630,7 +670,7 @@ literal:
* for a number. Stop accumulating digits.
*/
if (c != WEOF)
break;
ok:
/*
-@@ -646,22 +677,22 @@
+@@ -646,22 +686,22 @@ literal:
*/
if (flags & NDIGITS) {
if (p > buf)
if (flags & POINTER)
*va_arg(ap, void **) =
(void *)(uintptr_t)res;
-@@ -684,47 +715,45 @@
+@@ -684,47 +724,47 @@ literal:
nassigned++;
}
nread += p - buf;
*va_arg(ap, float *) = res;
}
- if (__scanfdebug && p - buf != width)
+- abort();
+ if (__scanfdebug && p - pbuf != width)
- abort();
++ LIBC_ABORT("p - pbuf %ld != width %ld", (long)(p - pbuf), width);
nassigned++;
}
nread += width;
}
#ifndef NO_FLOATING_POINT
++extern char *__parsefloat_buf(size_t s); /* see vfscanf-fbsd.c */
++
static int
-parsefloat(FILE *fp, wchar_t *buf, wchar_t *end)
+parsefloat(FILE *fp, wchar_t **buf, size_t width, locale_t loc)
{
wchar_t *commit, *p;
int infnanpos = 0;
-@@ -733,9 +762,33 @@
+@@ -733,9 +773,19 @@ parsefloat(FILE *fp, wchar_t *buf, wchar
S_DIGITS, S_FRAC, S_EXP, S_EXPDIGITS
} state = S_START;
wchar_t c;
+ wchar_t decpt;
_Bool gotmantdig = 0, ishex = 0;
-
-+ static wchar_t *b = NULL;
-+ static size_t bsiz = 0;
++ wchar_t *b;
+ wchar_t *e;
+ size_t s;
+
-+ if (bsiz == 0) {
-+ b = (wchar_t *)malloc(BUF * sizeof(wchar_t));
-+ if (b == NULL) {
-+ *buf = NULL;
-+ return 0;
-+ }
-+ bsiz = BUF;
-+ }
-+ s = (width == 0 ? bsiz : (width + 1));
-+ if (s > bsiz) {
-+ b = (wchar_t *)reallocf(b, s * sizeof(wchar_t));
-+ if (b == NULL) {
-+ bsiz = 0;
-+ *buf = NULL;
-+ return 0;
-+ }
-+ bsiz = s;
++ s = (width == 0 ? BUF : (width + 1));
++ if ((b = (wchar_t *)__parsefloat_buf(s * sizeof(wchar_t))) == NULL) {
++ *buf = NULL;
++ return 0;
+ }
+ e = b + (s - 1);
/*
* We set commit = p whenever the string we have read so far
* constitutes a valid representation of a floating point
-@@ -745,10 +798,12 @@
+@@ -745,10 +795,12 @@ parsefloat(FILE *fp, wchar_t *buf, wchar
* always necessary to read at least one character that doesn't
* match; thus, we can't short-circuit "infinity" or "nan(...)".
*/
break;
reswitch:
switch (state) {
-@@ -808,7 +863,7 @@
+@@ -808,7 +860,7 @@ reswitch:
if (c == ')') {
commit = p;
infnanpos = -2;
goto parsedone;
break;
}
-@@ -824,7 +879,7 @@
+@@ -824,7 +876,7 @@ reswitch:
goto reswitch;
}
case S_DIGITS:
gotmantdig = 1;
else {
state = S_FRAC;
-@@ -841,7 +896,7 @@
+@@ -841,7 +893,7 @@ reswitch:
goto parsedone;
else
state = S_EXP;
commit = p;
gotmantdig = 1;
} else
-@@ -854,7 +909,7 @@
+@@ -854,13 +906,26 @@ reswitch:
else
goto reswitch;
case S_EXPDIGITS:
commit = p;
else
goto parsedone;
-@@ -862,16 +917,32 @@
+ break;
default:
- abort();
- }
+- abort();
++ LIBC_ABORT("unknown state %d", state);
++ }
+ if (p >= e) {
+ ssize_t diff = (p - b);
+ ssize_t com = (commit - b);
+ s += BUF;
-+ b = (wchar_t *)reallocf(b, s * sizeof(wchar_t));
++ b = (wchar_t *)__parsefloat_buf(s * sizeof(wchar_t));
+ if (b == NULL) {
-+ bsiz = 0;
+ *buf = NULL;
+ return 0;
+ }
-+ bsiz = s;
+ e = b + (s - 1);
+ p = b + diff;
+ commit = b + com;
-+ }
+ }
*p++ = c;
c = WEOF;
- }
+@@ -868,10 +933,11 @@ reswitch:
parsedone:
if (c != WEOF)
CFLAGS-${_src} += -fshort-enums -DVECTORS
.endfor
-LEGACYSRCS+= fputs.c freopen.c fwrite.c tempnam.c
+LEGACYSRCS+= fdopen.c fopen.c fputs.c freopen.c fwrite.c tempnam.c
+DARWINEXTSNSRCS+= fdopen.c fopen.c
# set the LIBC_ALIAS_* macros so we can decorate the symbol independent
# of other macro settings
+CFLAGS-fdopen-fbsd.c += -DLIBC_ALIAS_FDOPEN
+CFLAGS-fopen-fbsd.c += -DLIBC_ALIAS_FOPEN
CFLAGS-fputs-fbsd.c += -DLIBC_ALIAS_FPUTS
CFLAGS-freopen-fbsd.c += -DLIBC_ALIAS_FREOPEN
CFLAGS-fwrite-fbsd.c += -DLIBC_ALIAS_FWRITE
CFLAGS-tempnam-fbsd.c += -DLIBC_ALIAS_TEMPNAM
+CFLAGS-tmpfile-fbsd.c += -D_DARWIN_UNLIMITED_STREAMS
.if ${LIB} == "c"
MAN3+= getwc_l.3 putwc_l.3 printf_l.3 scanf_l.3 wprintf_l.3 wscanf_l.3
{
int r;
+ if (!__sdidinit)
+ __sinit();
+
if (fp == NULL) {
errno = EFAULT;
return (EOF);
FREELB(fp);
fp->_file = -1;
fp->_r = fp->_w = 0; /* Mess up if reaccessed. */
- fp->_flags = 0; /* Release this FILE for reuse. */
+ __sfprelease(fp); /* Release this FILE for reuse. */
FUNLOCKFILE(fp);
return (r);
}
+++ /dev/null
-./fdopen.c
\ No newline at end of file
--- /dev/null
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef VARIANT_DARWINEXTSN
+#define _DARWIN_UNLIMITED_STREAMS
+#define COUNT 0
+#elif defined(VARIANT_LEGACY)
+#define COUNT 0
+#else
+#define COUNT 1
+#endif
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)fdopen.c 8.1 (Berkeley) 6/4/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdio/fdopen.c,v 1.7 2002/03/22 21:53:04 obrien Exp $");
+
+#include "namespace.h"
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include "un-namespace.h"
+#include "local.h"
+
+FILE *
+fdopen(fd, mode)
+ int fd;
+ const char *mode;
+{
+ FILE *fp;
+ static int nofile;
+ int flags, oflags, fdflags, tmp;
+
+ if (nofile == 0)
+ nofile = getdtablesize();
+
+ if ((flags = __sflags(mode, &oflags)) == 0)
+ return (NULL);
+
+ /* Make sure the mode the user wants is a subset of the actual mode. */
+ if ((fdflags = _fcntl(fd, F_GETFL, 0)) < 0)
+ return (NULL);
+ tmp = fdflags & O_ACCMODE;
+ if (tmp != O_RDWR && (tmp != (oflags & O_ACCMODE))) {
+ errno = EINVAL;
+ return (NULL);
+ }
+
+ if ((fp = __sfp(COUNT)) == NULL)
+ return (NULL);
+ fp->_flags = flags;
+ /*
+ * If opened for appending, but underlying descriptor does not have
+ * O_APPEND bit set, assert __SAPP so that __swrite() caller
+ * will _sseek() to the end before write.
+ */
+ if ((oflags & O_APPEND) && !(fdflags & O_APPEND))
+ fp->_flags |= __SAPP;
+ fp->_file = fd;
+ fp->_cookie = fp;
+ fp->_read = __sread;
+ fp->_write = __swrite;
+ fp->_seek = __sseek;
+ fp->_close = __sclose;
+ return (fp);
+}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <libkern/OSAtomic.h>
+#include <errno.h>
#include <pthread.h>
#include <spinlock.h>
/* p r w flags file _bf z cookie close read seek write */
/* _ub _extra */
#define __sFXInit {0, PTHREAD_MUTEX_INITIALIZER}
+ /* set counted */
+#define __sFXInit3 {0, PTHREAD_MUTEX_INITIALIZER, 0, 0, 0, 1}
/* the usual - (stdin + stdout + stderr) */
+
+static int __scounted; /* streams counted against STREAM_MAX */
+static int __stream_max;
+
static FILE usual[FOPEN_MAX - 3];
static struct __sFILEX usual_extra[FOPEN_MAX - 3];
static struct glue uglue = { NULL, FOPEN_MAX - 3, usual };
-static struct __sFILEX __sFX[3] = {__sFXInit, __sFXInit, __sFXInit};
+static struct __sFILEX __sFX[3] = {__sFXInit3, __sFXInit3, __sFXInit3};
/*
* We can't make this 'static' until 6.0-current due to binary
* Find a free FILE for fopen et al.
*/
FILE *
-__sfp()
+__sfp(int count)
{
FILE *fp;
int n;
if (!__sdidinit)
__sinit();
+
+ if (count) {
+ if (__scounted >= __stream_max) {
+ THREAD_UNLOCK();
+ errno = EMFILE;
+ return NULL;
+ }
+ OSAtomicIncrement32(&__scounted);
+ }
/*
* The list must be locked because a FILE may be updated.
*/
/* fp->_lock = NULL; */ /* once set always set (reused) */
fp->_extra->fl_mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
fp->_extra->orientation = 0;
+ fp->_extra->counted = count ? 1 : 0;
memset(&fp->_extra->mbstate, 0, sizeof(mbstate_t));
return (fp);
}
+/*
+ * Mark as free and update count as needed
+ */
+__private_extern__ void
+__sfprelease(FILE *fp)
+{
+ if (fp->_extra->counted) {
+ OSAtomicDecrement32(&__scounted);
+ fp->_extra->counted = 0;
+ }
+ fp->_flags = 0;
+}
+
/*
* XXX. Force immediate allocation of internal memory. Not used by stdio,
* but documented historically for certain applications. Bad applications.
/* Make sure we clean up on exit. */
__cleanup = _cleanup; /* conservative */
__sdidinit = 1;
+ __stream_max = sysconf(_SC_STREAM_MAX);
+ __scounted = 3; /* std{in,out,err} already exists */
}
THREAD_UNLOCK();
}
+++ /dev/null
-./fopen.c
\ No newline at end of file
--- /dev/null
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef VARIANT_DARWINEXTSN
+#define _DARWIN_UNLIMITED_STREAMS
+#define COUNT 0
+#elif defined(VARIANT_LEGACY)
+#define COUNT 0
+#else
+#define COUNT 1
+#endif
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)fopen.c 8.1 (Berkeley) 6/4/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdio/fopen.c,v 1.10 2002/10/12 16:13:37 mike Exp $");
+
+#include "namespace.h"
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <errno.h>
+#include "un-namespace.h"
+
+#include "local.h"
+
+FILE *
+fopen(file, mode)
+ const char * __restrict file;
+ const char * __restrict mode;
+{
+ FILE *fp;
+ int f;
+ int flags, oflags;
+
+ if ((flags = __sflags(mode, &oflags)) == 0)
+ return (NULL);
+ if ((fp = __sfp(COUNT)) == NULL)
+ return (NULL);
+ if ((f = _open(file, oflags, DEFFILEMODE)) < 0) {
+ __sfprelease(fp); /* release */
+ return (NULL);
+ }
+ fp->_file = f;
+ fp->_flags = flags;
+ fp->_cookie = fp;
+ fp->_read = __sread;
+ fp->_write = __swrite;
+ fp->_seek = __sseek;
+ fp->_close = __sclose;
+ /*
+ * When opening in append mode, even though we use O_APPEND,
+ * we need to seek to the end so that ftell() gets the right
+ * answer. If the user then alters the seek pointer, or
+ * the file extends, this will fail, but there is not much
+ * we can do about this. (We could set __SAPP and check in
+ * fseek and ftell.)
+ */
+ if (oflags & O_APPEND)
+ (void)_sseek(fp, (fpos_t)0, SEEK_END);
+ return (fp);
+}
+++ /dev/null
-./fread.c
\ No newline at end of file
--- /dev/null
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)fread.c 8.2 (Berkeley) 12/11/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdio/fread.c,v 1.12 2002/10/12 16:13:37 mike Exp $");
+
+#include "namespace.h"
+#include <stdio.h>
+#include <string.h>
+#include "un-namespace.h"
+#include "local.h"
+#include "libc_private.h"
+
+size_t
+fread(buf, size, count, fp)
+ void * __restrict buf;
+ size_t size, count;
+ FILE * __restrict fp;
+{
+ size_t resid;
+ char *p;
+ int r, ret;
+ size_t total;
+
+ /*
+ * The ANSI standard requires a return value of 0 for a count
+ * or a size of 0. Peculiarily, it imposes no such requirements
+ * on fwrite; it only requires fread to be broken.
+ */
+ if ((resid = count * size) == 0)
+ return (0);
+ FLOCKFILE(fp);
+ ORIENT(fp, -1);
+ if (fp->_r < 0)
+ fp->_r = 0;
+ total = resid;
+ p = buf;
+ /* first deal with anything left in buffer, plus any ungetc buffers */
+ while (resid > (r = fp->_r)) {
+ (void)memcpy((void *)p, (void *)fp->_p, (size_t)r);
+ fp->_p += r;
+ /* fp->_r = 0 ... done in __srefill */
+ p += r;
+ resid -= r;
+ if ((ret = __srefill0(fp)) > 0)
+ break;
+ else if (ret) {
+ /* no more input: return partial result */
+ FUNLOCKFILE(fp);
+ return ((total - resid) / size);
+ }
+ }
+ /*
+ * 5980080: don't use optimization if __SMBF not set (meaning setvbuf
+ * was called, and the buffer belongs to the user).
+ * 6180417: but for unbuffered (__SMBF is not set), so specifically
+ * test for it.
+ */
+ if ((fp->_flags & (__SMBF | __SNBF)) && resid > fp->_bf._size) {
+ struct __sbuf save;
+ size_t n;
+
+ save = fp->_bf;
+ fp->_bf._base = p;
+ fp->_bf._size = resid;
+ while (fp->_bf._size > 0) {
+ if ((ret = __srefill1(fp)) != 0) {
+ /* no more input: return partial result */
+ resid = fp->_bf._size;
+ fp->_bf = save;
+ fp->_p = fp->_bf._base;
+ /* fp->_r = 0; already set in __srefill1 */
+ FUNLOCKFILE(fp);
+ return ((total - resid) / size);
+ }
+ fp->_bf._base += fp->_r;
+ fp->_bf._size -= fp->_r;
+ }
+ fp->_bf = save;
+ n = fp->_bf._size * ((resid - 1) / fp->_bf._size);
+ r = resid - n;
+ (void)memcpy((void *)fp->_bf._base, (void *)(p + n), (size_t)r);
+ fp->_p = fp->_bf._base + r;
+ fp->_r = 0;
+ } else {
+ while (resid > (r = fp->_r)) {
+ (void)memcpy((void *)p, (void *)fp->_p, (size_t)r);
+ fp->_p += r;
+ /* fp->_r = 0 ... done in __srefill */
+ p += r;
+ resid -= r;
+ if (__srefill1(fp)) {
+ /* no more input: return partial result */
+ FUNLOCKFILE(fp);
+ return ((total - resid) / size);
+ }
+ }
+ (void)memcpy((void *)p, (void *)fp->_p, resid);
+ fp->_r -= resid;
+ fp->_p += resid;
+ }
+ FUNLOCKFILE(fp);
+ return (count);
+}
memset(&fp->_extra->mbstate, 0, sizeof(mbstate_t));
if (f < 0) { /* did not get it after all */
- fp->_flags = 0; /* set it free */
+ __sfprelease(fp); /* set it free */
errno = sverrno; /* restore in case _close clobbered */
FUNLOCKFILE(fp);
return (NULL);
+++ /dev/null
-./funopen.c
\ No newline at end of file
--- /dev/null
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)funopen.c 8.1 (Berkeley) 6/4/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdio/funopen.c,v 1.5 2002/05/28 16:59:39 alfred Exp $");
+
+#include <stdio.h>
+#include <errno.h>
+
+#include "local.h"
+
+FILE *
+funopen(cookie, readfn, writefn, seekfn, closefn)
+ const void *cookie;
+ int (*readfn)(), (*writefn)();
+ fpos_t (*seekfn)(void *cookie, fpos_t off, int whence);
+ int (*closefn)();
+{
+ FILE *fp;
+ int flags;
+
+ if (readfn == NULL) {
+ if (writefn == NULL) { /* illegal */
+ errno = EINVAL;
+ return (NULL);
+ } else
+ flags = __SWR; /* write only */
+ } else {
+ if (writefn == NULL)
+ flags = __SRD; /* read only */
+ else
+ flags = __SRW; /* read-write */
+ }
+ /* funopen in not covered in SUSv3, so never count the streams */
+ if ((fp = __sfp(0)) == NULL)
+ return (NULL);
+ fp->_flags = flags;
+ fp->_file = -1;
+ fp->_cookie = (void *)cookie;
+ fp->_read = readfn;
+ fp->_write = writefn;
+ fp->_seek = seekfn;
+ fp->_close = closefn;
+ return (fp);
+}
extern wint_t __fgetwc(FILE *, locale_t);
extern wint_t __fputwc(wchar_t, FILE *, locale_t);
extern int __sflush(FILE *);
-extern FILE *__sfp(void);
+extern FILE *__sfp(int); /* arg is whether to count against STREAM_MAX or not */
+extern void __sfprelease(FILE *); /* mark free and update count as needed */
extern int __slbexpand(FILE *, size_t);
extern int __srefill(FILE *);
+extern int __srefill0(FILE *);
+extern int __srefill1(FILE *);
extern int __sread(void *, char *, int);
extern int __swrite(void *, char const *, int);
extern fpos_t __sseek(void *, fpos_t, int);
pthread_mutex_t fl_mutex; /* used for MT-safety */
pthread_t fl_owner; /* current owner */
int fl_count; /* recursive lock count */
- int orientation; /* orientation for fwide() */
+ int orientation:2; /* orientation for fwide() */
+ int counted:1; /* stream counted against STREAM_MAX */
mbstate_t mbstate; /* multibyte conversion state */
};
#include "local.h"
#include "un-namespace.h"
+#define MAXBUFSIZE (1 << 16)
#define TTYBUFSIZE 4096
/*
* __sseek is mainly paranoia.) It is safe to set _blksize
* unconditionally; it will only be used if __SOPT is also set.
*/
- *bufsize = st.st_blksize;
- fp->_blksize = st.st_blksize;
+ fp->_blksize = *bufsize = st.st_blksize > MAXBUFSIZE ? MAXBUFSIZE : st.st_blksize;
return ((st.st_mode & S_IFMT) == S_IFREG && fp->_seek == __sseek ?
__SOPT : __SNPT);
}
This will ensure that the program does not continue blindly
in the event that an attacker has already created the file
with the intention of manipulating or reading its contents.
-.Pp
-The implementation of these functions calls
-.Xr arc4random 3 ,
-which is not reentrant.
-You must provide your own locking around this and other consumers of the
-.Xr arc4random 3
-API.
.Sh LEGACY SYNOPSIS
.Fd #include <unistd.h>
.Pp
effectively assume an infinite
.Fa n .
.Pp
+For those routines that write to a user-provided character string,
+that string and the format strings should not overlap, as the
+behavior is undefined.
+.Pp
The format string is composed of zero or more directives:
ordinary
.\" multibyte
+++ /dev/null
-./refill.c
\ No newline at end of file
--- /dev/null
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)refill.c 8.1 (Berkeley) 6/4/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdio/refill.c,v 1.18 2002/08/13 09:30:41 tjr Exp $");
+
+#include "namespace.h"
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include "un-namespace.h"
+
+#include "libc_private.h"
+#include "local.h"
+
+static int lflush(FILE *);
+
+static int
+lflush(FILE *fp)
+{
+ int ret = 0;
+
+ if ((fp->_flags & (__SLBF|__SWR)) == (__SLBF|__SWR)) {
+ FLOCKFILE(fp);
+ ret = __sflush(fp);
+ FUNLOCKFILE(fp);
+ }
+ return (ret);
+}
+
+/*
+ * Refill a stdio buffer.
+ * Return EOF on eof or error, 0 otherwise.
+ */
+__private_extern__ int
+__srefill0(FILE *fp)
+{
+
+ /* make sure stdio is set up */
+ if (!__sdidinit)
+ __sinit();
+
+ ORIENT(fp, -1);
+
+ fp->_r = 0; /* largely a convenience for callers */
+
+ /* SysV does not make this test; take it out for compatibility */
+ if (fp->_flags & __SEOF)
+ return (EOF);
+
+ /* if not already reading, have to be reading and writing */
+ if ((fp->_flags & __SRD) == 0) {
+ if ((fp->_flags & __SRW) == 0) {
+ errno = EBADF;
+ fp->_flags |= __SERR;
+ return (EOF);
+ }
+ /* switch to reading */
+ if (fp->_flags & __SWR) {
+ if (__sflush(fp))
+ return (EOF);
+ fp->_flags &= ~__SWR;
+ fp->_w = 0;
+ fp->_lbfsize = 0;
+ }
+ fp->_flags |= __SRD;
+ } else {
+ /*
+ * We were reading. If there is an ungetc buffer,
+ * we must have been reading from that. Drop it,
+ * restoring the previous buffer (if any). If there
+ * is anything in that buffer, return.
+ */
+ if (HASUB(fp)) {
+ FREEUB(fp);
+ if ((fp->_r = fp->_ur) != 0) {
+ fp->_p = fp->_extra->_up;
+ return (0);
+ }
+ }
+ }
+
+ if (fp->_bf._base == NULL)
+ __smakebuf(fp);
+
+ /*
+ * Before reading from a line buffered or unbuffered file,
+ * flush all line buffered output files, per the ANSI C
+ * standard.
+ */
+ if (fp->_flags & (__SLBF|__SNBF)) {
+ /* Ignore this file in _fwalk to avoid potential deadlock. */
+ fp->_flags |= __SIGN;
+ (void) _fwalk(lflush);
+ fp->_flags &= ~__SIGN;
+
+ /* Now flush this file without locking it. */
+ if ((fp->_flags & (__SLBF|__SWR)) == (__SLBF|__SWR))
+ __sflush(fp);
+ }
+ return (1);
+}
+
+__private_extern__ int
+__srefill1(FILE *fp)
+{
+
+ fp->_p = fp->_bf._base;
+ fp->_r = _sread(fp, (char *)fp->_p, fp->_bf._size);
+ fp->_flags &= ~__SMOD; /* buffer contents are again pristine */
+ if (fp->_r <= 0) {
+ if (fp->_r == 0)
+ fp->_flags |= __SEOF;
+ else {
+ fp->_r = 0;
+ fp->_flags |= __SERR;
+ }
+ return (EOF);
+ }
+ return (0);
+}
+
+int
+__srefill(FILE *fp)
+{
+ int ret;
+
+ if ((ret = __srefill0(fp)) <= 0)
+ return ret;
+ return __srefill1(fp);
+}
in
.Xr printf 3 ) ;
the next pointer must be a pointer to
-.Vt void .
+.Vt "void *"
+(or other pointer type).
.It Cm n
Nothing is expected;
instead, the number of characters consumed thus far from the input
__FBSDID("$FreeBSD: src/lib/libc/stdio/tempnam.c,v 1.10 2002/03/22 21:53:04 obrien Exp $");
#include <sys/param.h>
-#include <sys/stat.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
int sverrno;
char *f, *name;
-#if __DARWIN_UNIX03
- struct stat sb;
-#endif /* __DARWIN_UNIX03 */
if (!(name = malloc(MAXPATHLEN))) {
return(NULL);
}
#endif /* !__DARWIN_UNIX03 */
if ((f = (char *)dir)) {
#if __DARWIN_UNIX03
- if (!access(dir, W_OK)) {
+ if (access(dir, W_OK) == 0) {
#endif /* __DARWIN_UNIX03 */
(void)snprintf(name, MAXPATHLEN, "%s%s%sXXXXXX", f,
*(f + strlen(f) - 1) == '/'? "": "/", pfx);
return(f);
}
#if __DARWIN_UNIX03
- }
+ }
#endif /* __DARWIN_UNIX03 */
}
f = P_tmpdir;
#if __DARWIN_UNIX03
- if (stat(f, &sb) == 0) { /* directory accessible? */
+ if (access(f, W_OK) == 0) { /* directory accessible? */
#endif /* __DARWIN_UNIX03 */
(void)snprintf(name, MAXPATHLEN, "%s%sXXXXXX", f, pfx);
if ((f = _mktemp(name))) {
#if __DARWIN_UNIX03
}
+ if (issetugid() == 0 && (f = getenv("TMPDIR")) && access(f, W_OK) == 0) {
+ (void)snprintf(name, MAXPATHLEN, "%s%s%sXXXXXX", f,
+ *(f + strlen(f) - 1) == '/'? "": "/", pfx);
+ if ((f = _mktemp(name))) {
+ return(f);
+ }
+ }
#endif /* __DARWIN_UNIX03 */
f = _PATH_TMP;
#if __DARWIN_UNIX03
- if (stat(f, &sb) < 0) {
+ if (access(f, W_OK) < 0) {
f = "./"; /* directory inaccessible */
+ if (access(f, W_OK) < 0) {
+ return(NULL);
+ }
}
#endif /* __DARWIN_UNIX03 */
(void)snprintf(name, MAXPATHLEN, "%s%sXXXXXX", f, pfx);
.\" @(#)tmpnam.3 8.2 (Berkeley) 11/17/93
.\" $FreeBSD: src/lib/libc/stdio/tmpnam.3,v 1.16 2004/06/21 19:38:25 mpp Exp $
.\"
-.Dd November 17, 1993
+.Dd November 12, 2008
.Dt TMPFILE 3
.Os
.Sh NAME
but provides the ability to specify the directory which will
contain the temporary file and the file name prefix.
.Pp
-The environment variable
-.Ev TMPDIR
-(if set), the argument
+The argument
.Fa dir
(if
.Pf non- Dv NULL ) ,
the directory
.Dv P_tmpdir ,
-and the directory
+the environment variable
+.Ev TMPDIR
+(if set),
+the directory
.Pa /tmp
+and finally, the current directory,
are tried, in the listed order, as directories in which to store the
temporary file.
.Pp
interface should not be used in software expected to be used on other systems
if there is any possibility that the user does not wish the temporary file to
be publicly readable and writable.
+.Sh LEGACY DESCRIPTION
+In legacy mode, the order directories are tried by the
+.Fn tempnam
+function is different; the environment variable
+.Ev TMPDIR
+(if defined) is used first.
.Sh SEE ALSO
.Xr mkstemp 3 ,
.Xr mktemp 3
break;
default: /* oops */
- abort();
+ LIBC_ABORT("base = %d", base);
}
return (cp);
}
break;
default:
- abort();
+ LIBC_ABORT("base = %d", base);
}
return (cp);
}
#define UJARG() \
(flags&INTMAXT ? GETARG(uintmax_t) : \
flags&SIZET ? (uintmax_t)GETARG(size_t) : \
- flags&PTRDIFFT ? (uintmax_t)GETARG(ptrdiff_t) : \
+ flags&PTRDIFFT ? (uintmax_t)(unsigned)GETARG(ptrdiff_t) : \
(uintmax_t)GETARG(unsigned long long))
/*
}
size = buf + BUF - cp;
if (size > BUF) /* should never happen */
- abort();
+ LIBC_ABORT("size %d > BUF %d", size, BUF);
break;
#ifdef VECTORS
case 'v':
newsize = nextarg + 1;
if (oldsize == STATIC_ARG_TBL_SIZE) {
if ((newtable = malloc(newsize * sizeof(enum typeid))) == NULL)
- abort(); /* XXX handle better */
+ LIBC_ABORT("malloc: %s", strerror(errno)); /* XXX handle better */
bcopy(oldtable, newtable, oldsize * sizeof(enum typeid));
} else {
newtable = reallocf(oldtable, newsize * sizeof(enum typeid));
if (newtable == NULL)
- abort(); /* XXX handle better */
+ LIBC_ABORT("reallocf: %s", strerror(errno)); /* XXX handle better */
}
for (n = oldsize; n < newsize; n++)
newtable[n] = T_UNUSED;
#include <string.h>
#include <wchar.h>
#include <wctype.h>
+#include <pthread.h>
#include "un-namespace.h"
#include "collate.h"
nread++, fp->_r--, fp->_p++;
continue;
}
- if (c != '%')
+ if (c != '%') {
+ if (fp->_r <= 0 && __srefill(fp))
+ goto input_failure;
goto literal;
+ }
width = 0;
flags = 0;
/*
again: c = *fmt++;
switch (c) {
case '%':
+ /* Consume leading white space */
+ for(;;) {
+ if (fp->_r <= 0 && __srefill(fp))
+ goto input_failure;
+ if (!isspace_l(*fp->_p, loc))
+ break;
+ nread++;
+ fp->_r--;
+ fp->_p++;
+ }
literal:
- if (fp->_r <= 0 && __srefill(fp))
- goto input_failure;
if (*fp->_p != c)
goto match_failure;
fp->_r--, fp->_p++;
*va_arg(ap, float *) = res;
}
if (__scanfdebug && p - pbuf != width)
- abort();
+ LIBC_ABORT("p - pbuf %ld != width %ld", (long)(p - pbuf), width);
nassigned++;
}
nread += width;
}
#ifndef NO_FLOATING_POINT
+/*
+ * Maintain a per-thread parsefloat buffer, shared by __svfscanf_l and
+ * __vfwscanf.
+ */
+#ifdef BUILDING_VARIANT
+extern char *__parsefloat_buf(size_t s);
+#else /* !BUILDING_VARIANT */
+__private_extern__ char *
+__parsefloat_buf(size_t s)
+{
+ char *b;
+ static pthread_key_t parsefloat_tsd_key = (pthread_key_t)-1;
+ static pthread_mutex_t parsefloat_tsd_lock = PTHREAD_MUTEX_INITIALIZER;
+ static size_t bsiz = 0;
+
+ if (parsefloat_tsd_key == (pthread_key_t)-1) {
+ pthread_mutex_lock(&parsefloat_tsd_lock);
+ if (parsefloat_tsd_key == (pthread_key_t)-1) {
+ parsefloat_tsd_key = __LIBC_PTHREAD_KEY_PARSEFLOAT;
+ pthread_key_init_np(parsefloat_tsd_key, free);
+ }
+ pthread_mutex_unlock(&parsefloat_tsd_lock);
+ }
+ if ((b = (char *)pthread_getspecific(parsefloat_tsd_key)) == NULL) {
+ bsiz = s > BUF ? s : BUF;
+ b = (char *)malloc(bsiz);
+ if (b == NULL) {
+ bsiz = 0;
+ return NULL;
+ }
+ pthread_setspecific(parsefloat_tsd_key, b);
+ return b;
+ }
+ if (s > bsiz) {
+ b = (char *)reallocf(b, s);
+ pthread_setspecific(parsefloat_tsd_key, b);
+ if (b == NULL) {
+ bsiz = 0;
+ return NULL;
+ }
+ bsiz = s;
+ }
+ return b;
+}
+#endif /* BUILDING_VARIANT */
+
static int
parsefloat(FILE *fp, char **buf, size_t width, locale_t loc)
{
unsigned char *decpt = (unsigned char *)localeconv_l(loc)->decimal_point;
char *decpt_start;
_Bool gotmantdig = 0, ishex = 0;
- static char *b = NULL;
- static size_t bsiz = 0;
+ char *b;
char *e;
size_t s;
- if (bsiz = 0) {
- b = (char *)malloc(BUF);
- if (b == NULL) {
- *buf = NULL;
- return 0;
- }
- bsiz = BUF;
- }
s = (width == 0 ? BUF : (width + 1));
- if (s > bsiz) {
- b = (char *)reallocf(b, s);
- if (b == NULL) {
- bsiz = 0;
- *buf = NULL;
- return 0;
- }
- bsiz = s;
+ if ((b = __parsefloat_buf(s)) == NULL) {
+ *buf = NULL;
+ return 0;
}
e = b + (s - 1);
/*
goto parsedone;
break;
default:
- abort();
+ LIBC_ABORT("unknown state %d", state);
}
if (p >= e) {
ssize_t diff = (p - b);
ssize_t com = (commit - b);
s += BUF;
- b = (char *)reallocf(b, s);
+ b = __parsefloat_buf(s);
if (b == NULL) {
- bsiz = 0;
*buf = NULL;
return 0;
}
- bsiz = s;
e = b + (s - 1);
p = b + diff;
commit = b + com;
break;
default: /* oops */
- abort();
+ LIBC_ABORT("base = %d", base);
}
return (cp);
}
break;
default:
- abort();
+ LIBC_ABORT("base = %d", base);
}
return (cp);
}
#define UJARG() \
(flags&INTMAXT ? GETARG(uintmax_t) : \
flags&SIZET ? (uintmax_t)GETARG(size_t) : \
- flags&PTRDIFFT ? (uintmax_t)GETARG(ptrdiff_t) : \
+ flags&PTRDIFFT ? (uintmax_t)(unsigned)GETARG(ptrdiff_t) : \
(uintmax_t)GETARG(unsigned long long))
/*
}
size = buf + BUF - cp;
if (size > BUF) /* should never happen */
- abort();
+ LIBC_ABORT("size %d > BUF %d", size, BUF);
break;
#ifdef VECTORS
case 'v':
newsize = nextarg + 1;
if (oldsize == STATIC_ARG_TBL_SIZE) {
if ((newtable = malloc(newsize * sizeof(enum typeid))) == NULL)
- abort(); /* XXX handle better */
+ LIBC_ABORT("malloc: %s", strerror(errno)); /* XXX handle better */
bcopy(oldtable, newtable, oldsize * sizeof(enum typeid));
} else {
newtable = reallocf(oldtable, newsize * sizeof(enum typeid));
if (newtable == NULL)
- abort(); /* XXX handle better */
+ LIBC_ABORT("reallocf: %s", strerror(errno)); /* XXX handle better */
}
for (n = oldsize; n < newsize; n++)
newtable[n] = T_UNUSED;
__ungetwc(c, fp, loc);
continue;
}
- if (c != '%')
+ if (c != '%') {
+ if ((wi = __fgetwc(fp, loc)) == WEOF)
+ goto input_failure;
goto literal;
+ }
width = 0;
flags = 0;
/*
again: c = *fmt++;
switch (c) {
case '%':
+ /* Consume leading white space */
+ for(;;) {
+ if ((wi = __fgetwc(fp, loc)) == WEOF)
+ goto input_failure;
+ if (!iswspace_l(wi, loc))
+ break;
+ nread++;
+ }
literal:
- if ((wi = __fgetwc(fp, loc)) == WEOF)
- goto input_failure;
if (wi != c) {
__ungetwc(wi, fp, loc);
goto match_failure;
*va_arg(ap, float *) = res;
}
if (__scanfdebug && p - pbuf != width)
- abort();
+ LIBC_ABORT("p - pbuf %ld != width %ld", (long)(p - pbuf), width);
nassigned++;
}
nread += width;
}
#ifndef NO_FLOATING_POINT
+extern char *__parsefloat_buf(size_t s); /* see vfscanf-fbsd.c */
+
static int
parsefloat(FILE *fp, wchar_t **buf, size_t width, locale_t loc)
{
char *decimal_point;
wchar_t decpt;
_Bool gotmantdig = 0, ishex = 0;
- static wchar_t *b = NULL;
- static size_t bsiz = 0;
+ wchar_t *b;
wchar_t *e;
size_t s;
- if (bsiz == 0) {
- b = (wchar_t *)malloc(BUF * sizeof(wchar_t));
- if (b == NULL) {
- *buf = NULL;
- return 0;
- }
- bsiz = BUF;
- }
- s = (width == 0 ? bsiz : (width + 1));
- if (s > bsiz) {
- b = (wchar_t *)reallocf(b, s * sizeof(wchar_t));
- if (b == NULL) {
- bsiz = 0;
- *buf = NULL;
- return 0;
- }
- bsiz = s;
+ s = (width == 0 ? BUF : (width + 1));
+ if ((b = (wchar_t *)__parsefloat_buf(s * sizeof(wchar_t))) == NULL) {
+ *buf = NULL;
+ return 0;
}
e = b + (s - 1);
/*
goto parsedone;
break;
default:
- abort();
+ LIBC_ABORT("unknown state %d", state);
}
if (p >= e) {
ssize_t diff = (p - b);
ssize_t com = (commit - b);
s += BUF;
- b = (wchar_t *)reallocf(b, s * sizeof(wchar_t));
+ b = (wchar_t *)__parsefloat_buf(s * sizeof(wchar_t));
if (b == NULL) {
- bsiz = 0;
*buf = NULL;
return 0;
}
- bsiz = s;
e = b + (s - 1);
p = b + diff;
commit = b + com;
---- abort.c.orig 2007-04-17 01:31:46.000000000 -0700
-+++ abort.c 2007-04-17 01:36:22.000000000 -0700
-@@ -45,7 +45,10 @@
+--- abort.c.orig 2008-09-07 11:37:51.000000000 -0700
++++ abort.c 2008-09-07 11:56:01.000000000 -0700
+@@ -39,19 +39,26 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/
+
+ #include "namespace.h"
+ #include <signal.h>
++#include <stdarg.h>
+ #include <stdlib.h>
+ #include <stddef.h>
+ #include <unistd.h>
#include <pthread.h>
#include "un-namespace.h"
-void (*__cleanup)();
+extern void (*__cleanup)();
+extern void __abort(void) __dead2;
++extern const char *__crashreporter_info__;
+
+#define TIMEOUT 10000 /* 10 milliseconds */
void
abort()
-@@ -67,11 +70,20 @@
+ {
+ struct sigaction act;
+
++ if (!__crashreporter_info__)
++ __crashreporter_info__ = "abort() called";
+ /*
+ * POSIX requires we flush stdio buffers on abort.
+ * XXX ISO C requires that abort() be async-signal-safe.
+@@ -67,11 +74,22 @@ abort()
sigdelset(&act.sa_mask, SIGABRT);
(void)_sigprocmask(SIG_SETMASK, &act.sa_mask, NULL);
(void)raise(SIGABRT);
+{
+ struct sigaction act;
+
++ if (!__crashreporter_info__)
++ __crashreporter_info__ = "__abort() called";
act.sa_handler = SIG_DFL;
act.sa_flags = 0;
sigfillset(&act.sa_mask);
-@@ -79,5 +91,6 @@
+@@ -79,5 +97,19 @@ abort()
sigdelset(&act.sa_mask, SIGABRT);
(void)_sigprocmask(SIG_SETMASK, &act.sa_mask, NULL);
(void)raise(SIGABRT);
- exit(1);
+ usleep(TIMEOUT); /* give time for signal to happen */
+ __builtin_trap(); /* never exit normally */
++}
++
++__private_extern__ void
++abort_report_np(const char *fmt, ...)
++{
++ char *str;
++ va_list ap;
++
++ va_start(ap, fmt);
++ vasprintf(&str, fmt, ap);
++ va_end(ap);
++ __crashreporter_info__ = str ? str : fmt;
++ abort();
}
---- _SB/Libc/stdlib/FreeBSD/atexit.3 2003-05-20 15:23:24.000000000 -0700
-+++ _SB/Libc/stdlib/FreeBSD/atexit.3.edit 2006-06-28 16:55:52.000000000 -0700
-@@ -47,13 +47,13 @@
+--- atexit.3.orig 2009-05-12 11:21:33.000000000 -0700
++++ atexit.3 2009-05-20 14:13:00.000000000 -0700
+@@ -36,46 +36,69 @@
+ .\" @(#)atexit.3 8.1 (Berkeley) 6/4/93
+ .\" $FreeBSD: src/lib/libc/stdlib/atexit.3,v 1.10 2002/12/18 13:33:03 ru Exp $
+ .\"
+-.Dd September 6, 2002
++.Dd May 20, 2008
+ .Dt ATEXIT 3
+ .Os
+ .Sh NAME
+ .Nm atexit
+ .Nd register a function to be called on exit
+-.Sh LIBRARY
+-.Lb libc
.Sh SYNOPSIS
.In stdlib.h
.Ft int
-.Fn atexit "void (*function)(void)"
+.Fn atexit "void (*func)(void)"
++#ifdef UNIFDEF_BLOCKS
++.Ft int
++.Fn atexit_b "void (^block)(void)"
++#endif
.Sh DESCRIPTION
The
.Fn atexit
to be called at program exit, whether via
.Xr exit 3
or via return from the program's
-@@ -71,8 +71,8 @@
+ .Fn main .
+ Functions so registered are called in reverse order;
+ no arguments are passed.
++#ifdef UNIFDEF_BLOCKS
++.Pp
++The
++.Fn atexit_b
++function is like
++.Fn atexit
++except the callback is a block pointer instead of a function pointer.
++.Bd -ragged -offset indent
++Note: The
++.Fn Block_copy
++function (defined in
++.In Blocks.h )
++is used by
++.Fn atexit_b
++to make a copy of the block, especially for the case when a stack-based
++block might go out of scope when the subroutine returns.
++.Ed
++#endif
+ .Pp
+-These functions must not call
++These callbacks must not call
+ .Fn exit ;
+ if it should be necessary to terminate the process while in such a
+ function, the
+ .Xr _exit 2
+ function should be used.
+-(Alternatively, the function may cause abnormal
++(Alternatively, the callbacks may cause abnormal
process termination, for example by calling
.Xr abort 3 . )
.Pp
-At least 32 functions can always be registered,
-and more are allowed as long as sufficient memory can be allocated.
-+At least 32 functions can always be registered;
++At least 32 callbacks can always be registered;
+more are allowed as long as sufficient memory can be allocated.
.\" XXX {ATEXIT_MAX} is not implemented yet
.Sh RETURN VALUES
- .Rv -std atexit
+-.Rv -std atexit
++#ifdef UNIFDEF_BLOCKS
++.ds ATEXIT_B atexit_b
++#endif
++.Rv -std atexit \*[ATEXIT_B]
+ .Sh ERRORS
+ .Bl -tag -width Er
+ .It Bq Er ENOMEM
---- atexit.c.orig 2008-02-01 22:43:20.000000000 -0800
-+++ atexit.c 2008-02-01 22:47:49.000000000 -0800
-@@ -45,6 +45,9 @@
+--- atexit.c.orig 2009-05-12 11:21:33.000000000 -0700
++++ atexit.c 2009-05-23 13:46:33.000000000 -0700
+@@ -45,14 +45,23 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/
#include <stdlib.h>
#include <unistd.h>
#include <pthread.h>
#include "atexit.h"
#include "un-namespace.h"
-@@ -74,6 +77,7 @@
++#ifdef __BLOCKS__
++#include <Block.h>
++#endif /* __BLOCKS__ */
+ #include "libc_private.h"
+
+ #define ATEXIT_FN_EMPTY 0
+ #define ATEXIT_FN_STD 1
+ #define ATEXIT_FN_CXA 2
++#ifdef __BLOCKS__
++#define ATEXIT_FN_BLK 3
++#endif /* __BLOCKS__ */
+
+ static pthread_mutex_t atexit_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+@@ -67,6 +76,9 @@ struct atexit {
+ union {
+ void (*std_func)(void);
+ void (*cxa_func)(void *);
++#ifdef __BLOCKS__
++ void (^block)(void);
++#endif /* __BLOCKS__ */
+ } fn_ptr; /* function pointer */
+ void *fn_arg; /* argument for CXA callback */
+ void *fn_dso; /* shared module handle */
+@@ -74,6 +86,7 @@ struct atexit {
};
static struct atexit *__atexit; /* points to head of LIFO stack */
/*
* Register the function described by 'fptr' to be called at application
-@@ -109,6 +113,7 @@
+@@ -109,6 +122,7 @@ atexit_register(struct atexit_fn *fptr)
__atexit = p;
}
p->fns[p->ind++] = *fptr;
_MUTEX_UNLOCK(&atexit_mutex);
return 0;
}
-@@ -120,12 +125,20 @@
+@@ -120,17 +134,50 @@ int
atexit(void (*func)(void))
{
struct atexit_fn fn;
int error;
fn.fn_type = ATEXIT_FN_STD;
- fn.fn_ptr.std_func = func;;
+- fn.fn_ptr.std_func = func;;
++ fn.fn_ptr.std_func = func;
fn.fn_arg = NULL;
+#if defined(__DYNAMIC__)
+ if ( dladdr(func, &info) )
error = atexit_register(&fn);
return (error);
-@@ -156,13 +169,14 @@
+ }
+
++#ifdef __BLOCKS__
++int
++atexit_b(void (^block)(void))
++{
++ struct atexit_fn fn;
++ struct dl_info info;
++ int error;
++
++ fn.fn_type = ATEXIT_FN_BLK;
++ fn.fn_ptr.block = Block_copy(block);
++ fn.fn_arg = NULL;
++#if defined(__DYNAMIC__)
++ if ( dladdr(block, &info) )
++ fn.fn_dso = info.dli_fbase;
++ else
++ fn.fn_dso = NULL;
++#else /* ! defined(__DYNAMIC__) */
++ fn.fn_dso = NULL;
++#endif /* defined(__DYNAMIC__) */
++
++ error = atexit_register(&fn);
++ return (error);
++}
++#endif /* __BLOCKS__ */
++
+ /*
+ * Register a function to be performed at exit or when an shared object
+ * with given dso handle is unloaded dynamically.
+@@ -156,13 +203,14 @@ __cxa_atexit(void (*func)(void *), void
* handlers are called.
*/
void
for (p = __atexit; p; p = p->next) {
for (n = p->ind; --n >= 0;) {
if (p->fns[n].fn_type == ATEXIT_FN_EMPTY)
-@@ -175,6 +189,7 @@
+@@ -175,6 +223,7 @@ __cxa_finalize(void *dso)
has already been called.
*/
p->fns[n].fn_type = ATEXIT_FN_EMPTY;
_MUTEX_UNLOCK(&atexit_mutex);
/* Call the function of correct type. */
-@@ -183,6 +198,8 @@
+@@ -182,7 +231,13 @@ __cxa_finalize(void *dso)
+ fn.fn_ptr.cxa_func(fn.fn_arg);
else if (fn.fn_type == ATEXIT_FN_STD)
fn.fn_ptr.std_func();
++#ifdef __BLOCKS__
++ else if (fn.fn_type == ATEXIT_FN_BLK)
++ fn.fn_ptr.block();
++#endif /* __BLOCKS__ */
_MUTEX_LOCK(&atexit_mutex);
+ if (new_registration)
+ goto restart;
---- _SB/Libc/stdlib/FreeBSD/bsearch.3 2003-05-20 15:23:24.000000000 -0700
-+++ _SB/Libc/stdlib/FreeBSD/bsearch.3.edit 2006-06-28 16:55:52.000000000 -0700
-@@ -47,19 +47,19 @@
+--- bsearch.3.orig 2009-05-12 11:21:33.000000000 -0700
++++ bsearch.3 2009-05-20 14:53:48.000000000 -0700
+@@ -36,30 +36,37 @@
+ .\" @(#)bsearch.3 8.3 (Berkeley) 4/19/94
+ .\" $FreeBSD: src/lib/libc/stdlib/bsearch.3,v 1.8 2001/09/07 14:46:35 asmodai Exp $
+ .\"
+-.Dd April 19, 1994
++.Dd May 20, 2008
+ .Dt BSEARCH 3
+ .Os
+ .Sh NAME
++#ifdef UNIFDEF_BLOCKS
++.Nm bsearch ,
++.Nm bsearch_b
++#else
+ .Nm bsearch
++#endif
+ .Nd binary search of a sorted table
+-.Sh LIBRARY
+-.Lb libc
.Sh SYNOPSIS
.In stdlib.h
.Ft void *
-.Fn bsearch "const void *key" "const void *base" "size_t nmemb" "size_t size" "int (*compar) (const void *, const void *)"
+.Fn bsearch "const void *key" "const void *base" "size_t nel" "size_t width" "int (*compar) (const void *, const void *)"
++#ifdef UNIFDEF_BLOCKS
++.Ft void *
++.Fn bsearch_b "const void *key" "const void *base" "size_t nel" "size_t width" "int (^compar) (const void *, const void *)"
++#endif
.Sh DESCRIPTION
The
.Fn bsearch
.Pp
The contents of the array should be in ascending sorted order according
to the comparison function referenced by
-@@ -70,7 +70,8 @@
+@@ -70,15 +77,33 @@
is expected to have
two arguments which point to the
.Fa key
less than, equal to, or greater than zero if the
.Fa key
object is found, respectively, to be less than, to match, or be
+ greater than the array member.
++#ifdef UNIFDEF_BLOCKS
++.Pp
++The
++.Fn bsearch_b
++function is like
++.Fn bsearch
++except the callback
++.Fa compar
++is a block pointer instead of a function pointer.
++#endif
+ .Sh RETURN VALUES
+ The
+ .Fn bsearch
+-function returns a pointer to a matching member of the array, or a null
++#ifdef UNIFDEF_BLOCKS
++and
++.Fn bsearch_b
++functions
++#else
++function
++#endif
++returns a pointer to a matching member of the array, or a null
+ pointer if no match is found.
+ If two members compare as equal, which member is matched is unspecified.
+ .Sh SEE ALSO
--- /dev/null
+--- bsearch.c.orig 2009-05-12 11:21:33.000000000 -0700
++++ bsearch.c 2009-05-20 13:11:05.000000000 -0700
+@@ -81,3 +81,31 @@ bsearch(key, base0, nmemb, size, compar)
+ }
+ return (NULL);
+ }
++
++#ifdef __BLOCKS__
++void *
++bsearch_b(key, base0, nmemb, size, compar)
++ const void *key;
++ const void *base0;
++ size_t nmemb;
++ size_t size;
++ int (^compar)(const void *, const void *);
++{
++ const char *base = base0;
++ size_t lim;
++ int cmp;
++ const void *p;
++
++ for (lim = nmemb; lim != 0; lim >>= 1) {
++ p = base + (lim >> 1) * size;
++ cmp = compar(key, p);
++ if (cmp == 0)
++ return ((void *)p);
++ if (cmp > 0) { /* key > p: move right */
++ base = (char *)p + size;
++ lim--;
++ } /* else move left */
++ }
++ return (NULL);
++}
++#endif /* __BLOCKS__ */
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
static char sccsid[] = "@(#)heapsort.c 8.1 (Berkeley) 6/4/93";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/stdlib/heapsort.c,v 1.4 2002/03/21 22:48:41 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/heapsort.c,v 1.6 2008/01/13 02:11:10 das Exp $");
#include <errno.h>
#include <stddef.h>
size_t nmemb, size;
int (*compar)(const void *, const void *);
{
- int cnt, i, j, l;
+ size_t cnt, i, j, l;
char tmp, *tmp1, *tmp2;
char *base, *k, *p, *t;
--- /dev/null
+--- heapsort_b.c.orig 2008-09-24 13:48:45.000000000 -0700
++++ heapsort_b.c 2008-09-24 13:48:56.000000000 -0700
+@@ -136,10 +136,10 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/
+ * only advantage over quicksort is that it requires little additional memory.
+ */
+ int
+-heapsort(vbase, nmemb, size, compar)
++heapsort_b(vbase, nmemb, size, compar)
+ void *vbase;
+ size_t nmemb, size;
+- int (*compar)(const void *, const void *);
++ int (^compar)(const void *, const void *);
+ {
+ size_t cnt, i, j, l;
+ char tmp, *tmp1, *tmp2;
--- /dev/null
+--- heapsort_r.c.orig 2008-09-24 13:48:45.000000000 -0700
++++ heapsort_r.c 2008-09-24 13:59:08.000000000 -0700
+@@ -77,12 +77,12 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/
+ for (par_i = initval; (child_i = par_i * 2) <= nmemb; \
+ par_i = child_i) { \
+ child = base + child_i * size; \
+- if (child_i < nmemb && compar(child, child + size) < 0) { \
++ if (child_i < nmemb && compar(thunk, child, child + size) < 0) { \
+ child += size; \
+ ++child_i; \
+ } \
+ par = base + par_i * size; \
+- if (compar(child, par) <= 0) \
++ if (compar(thunk, child, par) <= 0) \
+ break; \
+ SWAP(par, child, count, size, tmp); \
+ } \
+@@ -108,7 +108,7 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/
+ #define SELECT(par_i, child_i, nmemb, par, child, size, k, count, tmp1, tmp2) { \
+ for (par_i = 1; (child_i = par_i * 2) <= nmemb; par_i = child_i) { \
+ child = base + child_i * size; \
+- if (child_i < nmemb && compar(child, child + size) < 0) { \
++ if (child_i < nmemb && compar(thunk, child, child + size) < 0) { \
+ child += size; \
+ ++child_i; \
+ } \
+@@ -120,7 +120,7 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/
+ par_i = child_i / 2; \
+ child = base + child_i * size; \
+ par = base + par_i * size; \
+- if (child_i == 1 || compar(k, par) < 0) { \
++ if (child_i == 1 || compar(thunk, k, par) < 0) { \
+ COPY(child, k, count, size, tmp1, tmp2); \
+ break; \
+ } \
+@@ -135,11 +135,12 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/
+ * a data set that will trigger the worst case is nonexistent. Heapsort's
+ * only advantage over quicksort is that it requires little additional memory.
+ */
+-int
+-heapsort(vbase, nmemb, size, compar)
++__private_extern__ int
++__heapsort_r(vbase, nmemb, size, thunk, compar)
+ void *vbase;
+ size_t nmemb, size;
+- int (*compar)(const void *, const void *);
++ void *thunk;
++ int (*compar)(void *, const void *, const void *);
+ {
+ size_t cnt, i, j, l;
+ char tmp, *tmp1, *tmp2;
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
static char sccsid[] = "@(#)merge.c 8.2 (Berkeley) 2/14/94";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/stdlib/merge.c,v 1.6 2002/03/21 22:48:42 obrien Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/merge.c,v 1.8 2007/01/09 00:28:10 imp Exp $");
/*
* Hybrid exponential search/linear search merge sort with hybrid
#include <stdlib.h>
#include <string.h>
-static void setup(u_char *, u_char *, size_t, size_t, int (*)());
-static void insertionsort(u_char *, size_t, size_t, int (*)());
+static void setup(u_char *, u_char *, size_t, size_t,
+ int (*)(const void *, const void *));
+static void insertionsort(u_char *, size_t, size_t,
+ int (*)(const void *, const void *));
#define ISIZE sizeof(int)
#define PSIZE sizeof(u_char *)
size_t size;
int (*cmp)(const void *, const void *);
{
- int i, sense;
+ size_t i;
+ int sense;
int big, iflag;
u_char *f1, *f2, *t, *b, *tp2, *q, *l1, *l2;
u_char *list2, *list1, *p2, *p, *last, **p1;
--- /dev/null
+--- merge.c.orig 2008-02-17 16:11:51.000000000 -0800
++++ merge.c 2008-02-17 20:26:24.000000000 -0800
+@@ -264,7 +264,8 @@ setup(list1, list2, n, size, cmp)
+ int (*cmp)(const void *, const void *);
+ u_char *list1, *list2;
+ {
+- int i, length, size2, tmp, sense;
++ size_t i, size2;
++ int length, tmp, sense;
+ u_char *f1, *f2, *s, *l2, *last, *p2;
+
+ size2 = size*2;
--- /dev/null
+--- merge_b.c.orig 2008-05-21 02:51:18.000000000 -0700
++++ merge_b.c 2008-05-21 02:54:59.000000000 -0700
+@@ -57,9 +57,9 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/
+ #include <string.h>
+
+ static void setup(u_char *, u_char *, size_t, size_t,
+- int (*)(const void *, const void *));
++ int (^)(const void *, const void *));
+ static void insertionsort(u_char *, size_t, size_t,
+- int (*)(const void *, const void *));
++ int (^)(const void *, const void *));
+
+ #define ISIZE sizeof(int)
+ #define PSIZE sizeof(u_char *)
+@@ -95,11 +95,11 @@ static void insertionsort(u_char *, size
+ * Arguments are as for qsort.
+ */
+ int
+-mergesort(base, nmemb, size, cmp)
++mergesort_b(base, nmemb, size, cmp)
+ void *base;
+ size_t nmemb;
+ size_t size;
+- int (*cmp)(const void *, const void *);
++ int (^cmp)(const void *, const void *);
+ {
+ size_t i;
+ int sense;
+@@ -141,7 +141,7 @@ mergesort(base, nmemb, size, cmp)
+ p2 = *EVAL(p2);
+ l2 = list1 + (p2 - list2);
+ while (f1 < l1 && f2 < l2) {
+- if ((*cmp)(f1, f2) <= 0) {
++ if (cmp(f1, f2) <= 0) {
+ q = f2;
+ b = f1, t = l1;
+ sense = -1;
+@@ -160,12 +160,12 @@ mergesort(base, nmemb, size, cmp)
+ EXPONENTIAL: for (i = size; ; i <<= 1)
+ if ((p = (b + i)) >= t) {
+ if ((p = t - size) > b &&
+- (*cmp)(q, p) <= sense)
++ cmp(q, p) <= sense)
+ t = p;
+ else
+ b = p;
+ break;
+- } else if ((*cmp)(q, p) <= sense) {
++ } else if (cmp(q, p) <= sense) {
+ t = p;
+ if (i == size)
+ big = 0;
+@@ -174,14 +174,14 @@ EXPONENTIAL: for (i = size; ; i <<
+ b = p;
+ while (t > b+size) {
+ i = (((t - b) / size) >> 1) * size;
+- if ((*cmp)(q, p = b + i) <= sense)
++ if (cmp(q, p = b + i) <= sense)
+ t = p;
+ else
+ b = p;
+ }
+ goto COPY;
+ FASTCASE: while (i > size)
+- if ((*cmp)(q,
++ if (cmp(q,
+ p = b + (i >>= 1)) <= sense)
+ t = p;
+ else
+@@ -261,10 +261,11 @@ COPY: b = t;
+ void
+ setup(list1, list2, n, size, cmp)
+ size_t n, size;
+- int (*cmp)(const void *, const void *);
++ int (^cmp)(const void *, const void *);
+ u_char *list1, *list2;
+ {
+- int i, length, size2, tmp, sense;
++ size_t i, size2;
++ int length, tmp, sense;
+ u_char *f1, *f2, *s, *l2, *last, *p2;
+
+ size2 = size*2;
+@@ -336,7 +337,7 @@ static void
+ insertionsort(a, n, size, cmp)
+ u_char *a;
+ size_t n, size;
+- int (*cmp)(const void *, const void *);
++ int (^cmp)(const void *, const void *);
+ {
+ u_char *ai, *s, *t, *u, tmp;
+ int i;
---- _SB/Libc/stdlib/FreeBSD/qsort.3 2004-11-25 11:38:42.000000000 -0800
-+++ _SB/Libc/stdlib/FreeBSD/qsort.3.edit 2006-06-28 16:55:53.000000000 -0700
-@@ -40,41 +40,44 @@
+--- qsort.3.orig 2009-05-12 11:21:33.000000000 -0700
++++ qsort.3 2009-05-20 15:00:21.000000000 -0700
+@@ -40,41 +40,78 @@
.Dt QSORT 3
.Os
.Sh NAME
-.Nm qsort , qsort_r , heapsort , mergesort
+.Nm heapsort ,
++#ifdef UNIFDEF_BLOCKS
++.Nm heapsort_b ,
++#endif
+.Nm mergesort ,
++#ifdef UNIFDEF_BLOCKS
++.Nm mergesort_b ,
++#endif
+.Nm qsort ,
++#ifdef UNIFDEF_BLOCKS
++.Nm qsort_b ,
++#endif
+.Nm qsort_r
.Nd sort functions
- .Sh LIBRARY
- .Lb libc
+-.Sh LIBRARY
+-.Lb libc
.Sh SYNOPSIS
.In stdlib.h
+-.Ft void
+-.Fo qsort
+.Ft int
+.Fo heapsort
-+.Fa "void *base"
-+.Fa "size_t nel"
-+.Fa "size_t width"
-+.Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
-+.Fc
-+.Ft int
-+.Fo mergesort
-+.Fa "void *base"
-+.Fa "size_t nel"
-+.Fa "size_t width"
-+.Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
-+.Fc
- .Ft void
- .Fo qsort
.Fa "void *base"
-.Fa "size_t nmemb"
-.Fa "size_t size"
+.Fa "size_t width"
.Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
.Fc
- .Ft void
- .Fo qsort_r
+-.Ft void
+-.Fo qsort_r
++#ifdef UNIFDEF_BLOCKS
++.Ft int
++.Fo heapsort_b
.Fa "void *base"
-.Fa "size_t nmemb"
-.Fa "size_t size"
+-.Fa "void *thunk"
+-.Fa "int \*[lp]*compar\*[rp]\*[lp]void *, const void *, const void *\*[rp]"
+.Fa "size_t nel"
+.Fa "size_t width"
- .Fa "void *thunk"
- .Fa "int \*[lp]*compar\*[rp]\*[lp]void *, const void *, const void *\*[rp]"
++.Fa "int \*[lp]^compar\*[rp]\*[lp]const void *, const void *\*[rp]"
.Fc
--.Ft int
++#endif
+ .Ft int
-.Fo heapsort
--.Fa "void *base"
++.Fo mergesort
+ .Fa "void *base"
-.Fa "size_t nmemb"
-.Fa "size_t size"
--.Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
--.Fc
--.Ft int
++.Fa "size_t nel"
++.Fa "size_t width"
+ .Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
+ .Fc
++#ifdef UNIFDEF_BLOCKS
+ .Ft int
-.Fo mergesort
--.Fa "void *base"
++.Fo mergesort_b
+ .Fa "void *base"
-.Fa "size_t nmemb"
-.Fa "size_t size"
--.Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
--.Fc
++.Fa "size_t nel"
++.Fa "size_t width"
++.Fa "int \*[lp]^compar\*[rp]\*[lp]const void *, const void *\*[rp]"
++.Fc
++#endif
++.Ft void
++.Fo qsort
++.Fa "void *base"
++.Fa "size_t nel"
++.Fa "size_t width"
+ .Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
+ .Fc
++#ifdef UNIFDEF_BLOCKS
++.Ft void
++.Fo qsort_b
++.Fa "void *base"
++.Fa "size_t nel"
++.Fa "size_t width"
++.Fa "int \*[lp]^compar\*[rp]\*[lp]const void *, const void *\*[rp]"
++.Fc
++#endif
++.Ft void
++.Fo qsort_r
++.Fa "void *base"
++.Fa "size_t nel"
++.Fa "size_t width"
++.Fa "void *thunk"
++.Fa "int \*[lp]*compar\*[rp]\*[lp]void *, const void *, const void *\*[rp]"
++.Fc
.Sh DESCRIPTION
The
.Fn qsort
-@@ -84,7 +87,7 @@
+@@ -84,7 +121,7 @@
function is a modified selection sort.
The
.Fn mergesort
intended for sorting data with pre-existing order.
.Pp
The
-@@ -92,18 +95,18 @@
+@@ -92,19 +129,19 @@
and
.Fn heapsort
functions sort an array of
.Em requires
that
-.Fa size
+-be greater than
+.Fa width
- be greater than
++be greater than or equal to
.Dq "sizeof(void *) / 2" .
.Pp
-@@ -139,7 +142,7 @@
+ The contents of the array
+@@ -139,7 +176,7 @@
.Fn heapsort
are
.Em not
the sorted array is undefined.
The
.Fn mergesort
-@@ -183,8 +186,8 @@
+@@ -183,8 +220,8 @@
The function
.Fn mergesort
requires additional memory of size
bytes; it should be used only when space is not at a premium.
The
.Fn mergesort
-@@ -195,8 +198,8 @@
+@@ -195,42 +232,83 @@
Normally,
.Fn qsort
is faster than
.Fn heapsort .
Memory availability and pre-existing order in the data can make this
untrue.
-@@ -218,10 +221,10 @@
++#ifdef UNIFDEF_BLOCKS
++.Pp
++The
++.Fn heapsort_b ,
++.Fn mergesort_b ,
++and
++.Fn qsort_b
++routines are like the corresponding routines without the _b suffix, expect
++that the
++.Fa compar
++callback is a block pointer instead of a function pointer.
++#endif
+ .Sh RETURN VALUES
+ The
++#ifdef UNIFDEF_BLOCKS
++.Fn qsort ,
++.Fn qsort_b
++#else
+ .Fn qsort
++#endif
+ and
+ .Fn qsort_r
+ functions
+ return no value.
+ .Pp
+-.Rv -std heapsort mergesort
++#ifdef UNIFDEF_BLOCKS
++.ds HEAPSORT_B heapsort_b
++.ds MERGESORT_B mergesort_b
++#endif
++.Rv -std heapsort \*[HEAPSORT_B] mergesort \*[MERGESORT_B]
+ .Sh ERRORS
+ The
++#ifdef UNIFDEF_BLOCKS
++.Fn heapsort ,
++.Fn heapsort_b ,
++.Fn mergesort
++and
++.Fn mergesort_b
++#else
+ .Fn heapsort
+ and
+ .Fn mergesort
++#endif
+ functions succeed unless:
.Bl -tag -width Er
.It Bq Er EINVAL
The
+.Fa width
argument to
.Fn mergesort
++#ifdef UNIFDEF_BLOCKS
++or
++.Fn mergesort_b
++#endif
is less than
+ .Dq "sizeof(void *) / 2" .
+ .It Bq Er ENOMEM
+ The
++#ifdef UNIFDEF_BLOCKS
++.Fn heapsort ,
++.Fn heapsort_b ,
++.Fn mergesort
++and
++.Fn mergesort_b
++#else
+ .Fn heapsort
+-or
++and
+ .Fn mergesort
++#endif
+ functions
+ were unable to allocate memory.
+ .El
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
static char sccsid[] = "@(#)qsort.c 8.1 (Berkeley) 6/4/93";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/stdlib/qsort.c,v 1.12 2002/09/10 02:04:49 wollman Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/qsort.c,v 1.15 2008/01/14 09:21:34 das Exp $");
#include <stdlib.h>
#endif
{
char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
- int d, r, swaptype, swap_cnt;
+ size_t d, r;
+ int cmp_result;
+ int swaptype, swap_cnt;
loop: SWAPINIT(a, es);
swap_cnt = 0;
pc = pd = (char *)a + (n - 1) * es;
for (;;) {
- while (pb <= pc && (r = CMP(thunk, pb, a)) <= 0) {
- if (r == 0) {
+ while (pb <= pc && (cmp_result = CMP(thunk, pb, a)) <= 0) {
+ if (cmp_result == 0) {
swap_cnt = 1;
swap(pa, pb);
pa += es;
}
pb += es;
}
- while (pb <= pc && (r = CMP(thunk, pc, a)) >= 0) {
- if (r == 0) {
+ while (pb <= pc && (cmp_result = CMP(thunk, pc, a)) >= 0) {
+ if (cmp_result == 0) {
swap_cnt = 1;
swap(pc, pd);
pd -= es;
---- qsort.c.orig 2004-12-01 20:08:48.000000000 -0800
-+++ qsort.c 2004-12-01 20:10:50.000000000 -0800
-@@ -44,8 +44,8 @@
+--- qsort.c.orig 2008-09-24 19:55:30.000000000 -0700
++++ qsort.c 2008-09-25 12:28:18.000000000 -0700
+@@ -34,14 +34,19 @@ static char sccsid[] = "@(#)qsort.c 8.1
+ __FBSDID("$FreeBSD: src/lib/libc/stdlib/qsort.c,v 1.15 2008/01/14 09:21:34 das Exp $");
+
+ #include <stdlib.h>
++#include <string.h>
+
+ #ifdef I_AM_QSORT_R
+ typedef int cmp_t(void *, const void *, const void *);
#else
typedef int cmp_t(const void *, const void *);
#endif
-static inline char *med3(char *, char *, char *, cmp_t *, void *);
-static inline void swapfunc(char *, char *, int, int);
++#ifdef I_AM_QSORT_B
++static inline char *med3(char *, char *, char *, cmp_t ^, void *) __attribute__((always_inline));
++#else
+static inline char *med3(char *, char *, char *, cmp_t *, void *) __attribute__((always_inline));
++#endif
+static inline void swapfunc(char *, char *, int, int) __attribute__((always_inline));
#define min(a, b) (a) < (b) ? a : b
+@@ -90,7 +95,13 @@ swapfunc(a, b, n, swaptype)
+ #endif
+
+ static inline char *
+-med3(char *a, char *b, char *c, cmp_t *cmp, void *thunk
++med3(char *a, char *b, char *c,
++#ifdef I_AM_QSORT_B
++cmp_t ^cmp,
++#else
++cmp_t *cmp,
++#endif
++void *thunk
+ #ifndef I_AM_QSORT_R
+ __unused
+ #endif
+@@ -101,21 +112,47 @@ __unused
+ :(CMP(thunk, b, c) > 0 ? b : (CMP(thunk, a, c) < 0 ? a : c ));
+ }
+
++#ifdef __LP64__
++#define DEPTH(x) (2 * (flsl((long)(x)) - 1))
++#else /* !__LP64__ */
++#define DEPTH(x) (2 * (fls((int)(x)) - 1))
++#endif /* __LP64__ */
++
+ #ifdef I_AM_QSORT_R
+-void
+-qsort_r(void *a, size_t n, size_t es, void *thunk, cmp_t *cmp)
++int __heapsort_r(void *, size_t, size_t, void *, int (*)(void *, const void *, const void *));
++#endif
++
++static void
++_qsort(void *a, size_t n, size_t es,
++#ifdef I_AM_QSORT_R
++void *thunk,
+ #else
+-#define thunk NULL
+-void
+-qsort(void *a, size_t n, size_t es, cmp_t *cmp)
++#define thunk NULL
++#endif
++#ifdef I_AM_QSORT_B
++cmp_t ^cmp,
++#else
++cmp_t *cmp,
+ #endif
++int depth_limit)
+ {
+ char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
+ size_t d, r;
+ int cmp_result;
+ int swaptype, swap_cnt;
+
+-loop: SWAPINIT(a, es);
++loop:
++ if (depth_limit-- <= 0) {
++#ifdef I_AM_QSORT_B
++ heapsort_b(a, n, es, cmp);
++#elif defined(I_AM_QSORT_R)
++ __heapsort_r(a, n, es, thunk, cmp);
++#else
++ heapsort(a, n, es, cmp);
++#endif
++ return;
++ }
++ SWAPINIT(a, es);
+ swap_cnt = 0;
+ if (n < 7) {
+ for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
+@@ -165,25 +202,31 @@ loop: SWAPINIT(a, es);
+ pb += es;
+ pc -= es;
+ }
++
++ pn = (char *)a + n * es;
++ r = min(pa - (char *)a, pb - pa);
++ vecswap(a, pb - r, r);
++ r = min(pd - pc, pn - pd - es);
++ vecswap(pb, pn - r, r);
++
+ if (swap_cnt == 0) { /* Switch to insertion sort */
++ r = 1 + n / 4; /* n >= 7, so r >= 2 */
+ for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
+ for (pl = pm;
+ pl > (char *)a && CMP(thunk, pl - es, pl) > 0;
+- pl -= es)
++ pl -= es) {
+ swap(pl, pl - es);
++ if (++swap_cnt > r) goto nevermind;
++ }
+ return;
+ }
+
+- pn = (char *)a + n * es;
+- r = min(pa - (char *)a, pb - pa);
+- vecswap(a, pb - r, r);
+- r = min(pd - pc, pn - pd - es);
+- vecswap(pb, pn - r, r);
++nevermind:
+ if ((r = pb - pa) > es)
+ #ifdef I_AM_QSORT_R
+- qsort_r(a, r / es, es, thunk, cmp);
++ _qsort(a, r / es, es, thunk, cmp, depth_limit);
+ #else
+- qsort(a, r / es, es, cmp);
++ _qsort(a, r / es, es, cmp, depth_limit);
+ #endif
+ if ((r = pd - pc) > es) {
+ /* Iterate rather than recurse to save stack space */
+@@ -193,3 +236,19 @@ loop: SWAPINIT(a, es);
+ }
+ /* qsort(pn - r, r / es, es, cmp);*/
+ }
++
++void
++#ifdef I_AM_QSORT_R
++qsort_r(void *a, size_t n, size_t es, void *thunk, cmp_t *cmp)
++#elif defined(I_AM_QSORT_B)
++qsort_b(void *a, size_t n, size_t es, cmp_t ^cmp)
++#else
++qsort(void *a, size_t n, size_t es, cmp_t *cmp)
++#endif
++{
++ _qsort(a, n, es,
++#ifdef I_AM_QSORT_R
++ thunk,
++#endif
++ cmp, DEPTH(n));
++}
---- realpath.3 2003-05-20 15:23:25.000000000 -0700
-+++ realpath.3.edit 2006-09-06 15:43:17.000000000 -0700
-@@ -44,26 +44,28 @@
- .Sh LIBRARY
- .Lb libc
+--- realpath.3.orig 2008-04-05 00:03:06.000000000 -0700
++++ realpath.3 2008-04-05 17:42:41.000000000 -0700
+@@ -35,63 +35,73 @@
+ .\" @(#)realpath.3 8.2 (Berkeley) 2/16/94
+ .\" $FreeBSD: src/lib/libc/stdlib/realpath.3,v 1.13 2003/03/27 20:48:53 fjoe Exp $
+ .\"
+-.Dd February 16, 1994
++.Dd April 5, 2008
+ .Dt REALPATH 3
+ .Os
+ .Sh NAME
+ .Nm realpath
+ .Nd returns the canonicalized absolute pathname
+-.Sh LIBRARY
+-.Lb libc
++.\" .Sh LIBRARY
++.\" .Lb libc
.Sh SYNOPSIS
-.In sys/param.h
.In stdlib.h
.Pa /../
in
-.Fa pathname ,
-+.Fa file_name ,
- and copies the resulting absolute pathname into
- the memory referenced by
+-and copies the resulting absolute pathname into
+-the memory referenced by
-.Fa resolved_path .
-+.Fa resolved_name .
- The
+-The
-.Fa resolved_path
++.Fa file_name .
++If the
+.Fa resolved_name
argument
++is non-NULL, the resulting absolute pathname is copied there (it
.Em must
refer to a buffer capable of storing at least
-@@ -74,9 +76,9 @@
+ .Dv PATH_MAX
+-characters.
++characters).
++.Pp
++As a permitted extension to the standard, if
++.Fa resolved_name
++is NULL,
++memory is allocated for the resulting absolute pathname, and is returned by
++.Fn realpath .
++This memory should be freed by a call to
++.Xr free 3
++when no longer needed.
+ .Pp
+ The
.Fn realpath
function will resolve both absolute and relative paths
and return the absolute pathname corresponding to
must exist when
.Fn realpath
is called.
-@@ -84,14 +86,14 @@
- The
+ .Sh "RETURN VALUES"
+-The
++On success, the
.Fn realpath
- function returns
+-function returns
-.Fa resolved_path
+-on success.
++function returns the address of the resulting absolute pathname, which is
+.Fa resolved_name
- on success.
++if it was non-NULL, or the address of newly allocated memory.
If an error occurs,
.Fn realpath
returns
-.Dv NULL ,
-+.Dv NULL
- and
+-and
-.Fa resolved_path
++.Dv NULL .
++If
+.Fa resolved_name
++was non-NULL, it will
contains the pathname which caused the problem.
.Sh ERRORS
The function
-@@ -100,7 +102,7 @@
+@@ -99,24 +109,44 @@
+ may fail and set the external variable
.Va errno
for any of the errors specified for the library functions
++.Xr alloca 3 ,
++.Xr getattrlist 2 ,
++.Xr getcwd 3 ,
.Xr lstat 2 ,
-.Xr readlink 2
+.Xr readlink 2 ,
++.Xr stat 2 ,
and
- .Xr getcwd 3 .
- .Sh CAVEATS
-@@ -112,11 +114,26 @@
- version always returns absolute pathnames,
- whereas the Solaris implementation will,
- under certain circumstances, return a relative
--.Fa resolved_path
-+.Fa resolved_name
- when given a relative
--.Fa pathname .
-+.Fa file_name .
+-.Xr getcwd 3 .
+-.Sh CAVEATS
+-This implementation of
++.Xr strdup 3 .
++.\" .Sh CAVEATS
++.\" This implementation of
++.\" .Fn realpath
++.\" differs slightly from the Solaris implementation.
++.\" The
++.\" .Bx 4.4
++.\" version always returns absolute pathnames,
++.\" whereas the Solaris implementation will,
++.\" under certain circumstances, return a relative
++.\" .Fa resolved_name
++.\" when given a relative
++.\" .Fa file_name .
+.Sh LEGACY SYNOPSIS
+.Fd #include <sys/param.h>
+.Fd #include <stdlib.h>
+the last component of
+.Fa file_name
+does not need to exist when
-+.Fn realpath
+ .Fn realpath
+-differs slightly from the Solaris implementation.
+-The
+-.Bx 4.4
+-version always returns absolute pathnames,
+-whereas the Solaris implementation will,
+-under certain circumstances, return a relative
+-.Fa resolved_path
+-when given a relative
+-.Fa pathname .
+is called.
.Sh "SEE ALSO"
-.Xr getcwd 3
++.Xr free 3 ,
+.Xr getcwd 3 ,
+.Xr compat 5
.Sh HISTORY
---- realpath.c.orig 2006-09-16 19:12:28.000000000 -0700
-+++ realpath.c 2006-09-16 20:18:25.000000000 -0700
-@@ -35,13 +35,41 @@
+--- realpath.c.orig 2008-04-04 14:39:39.000000000 -0700
++++ realpath.c 2008-04-04 19:59:19.000000000 -0700
+@@ -35,13 +35,41 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/
#include "namespace.h"
#include <sys/param.h>
#include <sys/stat.h>
/*
* char *realpath(const char *path, char resolved[PATH_MAX]);
*
-@@ -52,24 +80,55 @@
+@@ -50,26 +78,67 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/
+ * in which case the path which caused trouble is left in (resolved).
+ */
char *
- realpath(const char *path, char resolved[PATH_MAX])
+-realpath(const char *path, char resolved[PATH_MAX])
++realpath(const char *path, char inresolved[PATH_MAX])
{
+ struct attrs attrs;
struct stat sb;
+ static dev_t rootdev;
+ static int rootdev_inited = 0;
+ ino_t inode;
++ char *resolved;
+ if (path == NULL) {
+ errno = EINVAL;
+ return (NULL);
+ }
+#endif /* __DARWIN_UNIX03 */
++ /*
++ * Extension to the standard; if inresolved == NULL, allocate memory
++ * (first on the stack, then use strdup())
++ */
++ if (!inresolved) {
++ if ((resolved = alloca(PATH_MAX)) == NULL) return (NULL);
++ } else {
++ resolved = inresolved;
++ }
+ if (!rootdev_inited) {
+ rootdev_inited = 1;
+ if (stat("/", &sb) < 0) {
strlcpy(resolved, ".", PATH_MAX);
return (NULL);
}
-@@ -80,6 +139,13 @@
+@@ -80,6 +149,13 @@ realpath(const char *path, char resolved
errno = ENAMETOOLONG;
return (NULL);
}
/*
* Iterate over path components in `left'.
-@@ -127,6 +193,13 @@
+@@ -127,6 +203,13 @@ realpath(const char *path, char resolved
}
/*
* Append the next path component and lstat() it. If
* lstat() fails we still can return successfully if
* there are no more path components left.
-@@ -136,25 +209,87 @@
+@@ -136,25 +219,87 @@ realpath(const char *path, char resolved
errno = ENAMETOOLONG;
return (NULL);
}
+ * that each component of the mountpoint
+ * is a directory (and not a symlink)
+ */
-+ char temp[MNAMELEN];
++ char temp[sizeof(sfs.f_mntonname)];
+ char *cp;
+ int ok = 1;
+
} else if (resolved_len > 1) {
/* Strip the last path component. */
resolved[resolved_len - 1] = '\0';
-@@ -184,7 +319,30 @@
+@@ -184,7 +329,30 @@ realpath(const char *path, char resolved
}
}
left_len = strlcpy(left, symlink, sizeof(left));
}
/*
+@@ -193,5 +361,6 @@ realpath(const char *path, char resolved
+ */
+ if (resolved_len > 1 && resolved[resolved_len - 1] == '/')
+ resolved[resolved_len - 1] = '\0';
++ if (!inresolved) resolved = strdup(resolved);
+ return (resolved);
+ }
---- setenv.c.orig 2006-12-12 18:14:46.000000000 -0800
-+++ setenv.c 2006-12-12 18:22:12.000000000 -0800
-@@ -40,32 +40,60 @@
+Index: setenv.c
+===================================================================
+--- setenv.c (revision 41051)
++++ setenv.c (working copy)
+@@ -40,32 +40,79 @@
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
- * setenv --
- * Set the value of the environmental variable "name" to be
- * "value". If rewrite is set, replace any current value.
++ * Create the environment malloc zone and give it a recognizable name.
+ */
+-int
+-setenv(name, value, rewrite)
++__private_extern__ int
++init__zone0(int should_set_errno)
++{
++ if (__zone0) return (0);
++
++ __zone0 = malloc_create_zone(0, 0);
++ if (!__zone0) {
++ if (should_set_errno) {
++ errno = ENOMEM;
++ }
++ return (-1);
++ }
++ malloc_set_zone_name(__zone0, "environ");
++ return (0);
++}
++
++/*
+ * The copy flag may have 3 values:
+ * 1 - make a copy of the name/value pair
+ * 0 - take the name as a user-supplied name=value string
+ * -1 - like 0, except we copy of the name=value string in name
- */
--int
--setenv(name, value, rewrite)
++ */
+__private_extern__ int
+__setenv(name, value, rewrite, copy, environp, envz)
const char *name;
while ( (*c++ = *value++) );
return (0);
}
-@@ -73,48 +101,250 @@
+@@ -73,48 +120,225 @@
int cnt;
char **p;
+}
+
+/****************************************************************************/
-+/*
+ /*
+ * _allocenvstate -- SPI that creates a new state (opaque)
+ */
+void *
+_allocenvstate(void)
+{
-+ return (void *)malloc_create_zone(1000 /* unused */, 0 /* unused */);
++ malloc_zone_t *zone;
++ zone = malloc_create_zone(1000 /* unused */, 0 /* unused */);
++ if (zone) {
++ malloc_set_zone_name(zone, "environ");
++ }
++ return (void *)zone;
+}
+
+/*
+int
+_setenvp(const char *name, const char *value, int rewrite, char ***envp, void *state)
+{
-+ /* insure __zone0 is set up */
-+ if (!__zone0) {
-+ __zone0 = malloc_create_zone(0, 0);
-+ if (!__zone0) {
-+ errno = ENOMEM;
-+ return (-1);
-+ }
-+ }
++ if (init__zone0(1)) return (-1);
+ return (__setenv(name, value, rewrite, 1, envp, (state ? (malloc_zone_t *)state : __zone0)));
+}
+
+int
+_unsetenvp(const char *name, char ***envp, void *state)
+{
-+ /* insure __zone0 is set up */
-+ if (!__zone0) {
-+ __zone0 = malloc_create_zone(0, 0);
-+ if (!__zone0) {
-+ errno = ENOMEM;
-+ return (-1);
-+ }
-+ }
++ if (init__zone0(1)) return (-1);
+ __unsetenv(name, *envp, (state ? (malloc_zone_t *)state : __zone0));
+ return 0;
+}
+ if (*value == '=') /* no `=' in value */
+ ++value;
+ /* insure __zone0 is set up before calling __malloc_check_env_name */
-+ if (!__zone0) {
-+ __zone0 = malloc_create_zone(0, 0);
-+ if (!__zone0) {
-+ errno = ENOMEM;
-+ return (-1);
-+ }
-+ }
++ if (init__zone0(1)) return (-1);
+ __malloc_check_env_name(name); /* see if we are changing a malloc environment variable */
+ return (__setenv(name, value, rewrite, 1, _NSGetEnviron(), __zone0));
+}
+
- /*
++/*
* unsetenv(name) --
* Delete environmental variable "name".
*/
+ return (-1);
+ }
+ /* insure __zone0 is set up before calling __malloc_check_env_name */
-+ if (!__zone0) {
-+ __zone0 = malloc_create_zone(0, 0);
-+ if (!__zone0) {
-+ errno = ENOMEM;
-+ return (-1);
-+ }
-+ }
++ if (init__zone0(1)) return (-1);
+#else /* !__DARWIN_UNIX03 */
+ /* no null ptr or empty str */
+ if(name == NULL || *name == 0)
+ return;
+ /* insure __zone0 is set up before calling __malloc_check_env_name */
-+ if (!__zone0) {
-+ __zone0 = malloc_create_zone(0, 0);
-+ if (!__zone0)
-+ return;
-+ }
++ if (init__zone0(0)) return;
+#endif /* __DARWIN_UNIX03 */
+ __malloc_check_env_name(name); /* see if we are changing a malloc environment variable */
+ __unsetenv(name, *_NSGetEnviron(), __zone0);
---- system.c.orig 2003-05-20 15:23:25.000000000 -0700
-+++ system.c 2006-06-24 18:48:47.000000000 -0700
-@@ -49,6 +49,16 @@
+--- system.c.orig 2008-08-28 02:12:39.000000000 -0700
++++ system.c 2008-08-28 02:15:08.000000000 -0700
+@@ -44,23 +44,61 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/
+ #include <stdlib.h>
+ #include <stddef.h>
+ #include <unistd.h>
++#include <spawn.h>
+ #include <paths.h>
+ #include <errno.h>
#include "un-namespace.h"
#include "libc_private.h"
++#include <crt_externs.h>
++#define environ (*_NSGetEnviron())
++
+#if __DARWIN_UNIX03
+#include <pthread.h>
+
int
__system(command)
const char *command;
-@@ -58,9 +68,24 @@
+ {
+ pid_t pid, savedpid;
+- int pstat;
++ int pstat, err;
struct sigaction ign, intact, quitact;
- sigset_t newsigblock, oldsigblock;
-
-- if (!command) /* just checking... */
-- return(1);
+- sigset_t newsigblock, oldsigblock;
++ sigset_t newsigblock, oldsigblock, defaultsig;
++ posix_spawnattr_t attr;
++ short flags = POSIX_SPAWN_SETSIGMASK;
++ const char *argv[] = {"sh", "-c", command, NULL};
++
+#if __DARWIN_UNIX03
+ if (__unix_conforming == 0)
+ __unix_conforming = 1;
+ return(1);
+ }
+- if (!command) /* just checking... */
+- return(1);
++ if ((err = posix_spawnattr_init(&attr)) != 0) {
++ errno = err;
++ return -1;
++ }
++ (void)sigemptyset(&defaultsig);
+
+#if __DARWIN_UNIX03
+ pthread_mutex_lock(&__systemfn_mutex);
+#endif /* __DARWIN_UNIX03 */
/*
* Ignore SIGINT and SIGQUIT, block SIGCHLD. Remember to save
* existing signal dispositions.
-@@ -83,6 +108,9 @@
- (void)_sigaction(SIGINT, &intact, NULL);
- (void)_sigaction(SIGQUIT, &quitact, NULL);
- (void)_sigprocmask(SIG_SETMASK, &oldsigblock, NULL);
-+#if __DARWIN_UNIX03
-+ pthread_mutex_unlock(&__systemfn_mutex);
-+#endif /* __DARWIN_UNIX03 */
- execl(_PATH_BSHELL, "sh", "-c", command, (char *)NULL);
- _exit(127);
- default: /* parent */
-@@ -95,6 +123,9 @@
+@@ -69,33 +107,45 @@ __system(command)
+ (void)sigemptyset(&ign.sa_mask);
+ ign.sa_flags = 0;
+ (void)_sigaction(SIGINT, &ign, &intact);
++ if (intact.sa_handler != SIG_IGN) {
++ sigaddset(&defaultsig, SIGINT);
++ flags |= POSIX_SPAWN_SETSIGDEF;
++ }
+ (void)_sigaction(SIGQUIT, &ign, &quitact);
++ if (quitact.sa_handler != SIG_IGN) {
++ sigaddset(&defaultsig, SIGQUIT);
++ flags |= POSIX_SPAWN_SETSIGDEF;
++ }
+ (void)sigemptyset(&newsigblock);
+ (void)sigaddset(&newsigblock, SIGCHLD);
+ (void)_sigprocmask(SIG_BLOCK, &newsigblock, &oldsigblock);
+- switch(pid = fork()) {
+- case -1: /* error */
+- break;
+- case 0: /* child */
+- /*
+- * Restore original signal dispositions and exec the command.
+- */
+- (void)_sigaction(SIGINT, &intact, NULL);
+- (void)_sigaction(SIGQUIT, &quitact, NULL);
+- (void)_sigprocmask(SIG_SETMASK, &oldsigblock, NULL);
+- execl(_PATH_BSHELL, "sh", "-c", command, (char *)NULL);
+- _exit(127);
+- default: /* parent */
++ (void)posix_spawnattr_setsigmask(&attr, &oldsigblock);
++ if (flags & POSIX_SPAWN_SETSIGDEF) {
++ (void)posix_spawnattr_setsigdefault(&attr, &defaultsig);
++ }
++ (void)posix_spawnattr_setflags(&attr, flags);
++
++ err = posix_spawn(&pid, _PATH_BSHELL, NULL, &attr, (char *const *)argv, environ);
++ (void)posix_spawnattr_destroy(&attr);
++ if (err == 0) {
+ savedpid = pid;
+ do {
+ pid = _wait4(savedpid, &pstat, 0, (struct rusage *)0);
+ } while (pid == -1 && errno == EINTR);
+- break;
++ if (pid == -1) pstat = -1;
++ } else if (err == ENOMEM || err == EAGAIN) { /* as if fork failed */
++ pstat = -1;
++ } else {
++ pstat = W_EXITCODE(127, 0); /* couldn't exec shell */
+ }
++
(void)_sigaction(SIGINT, &intact, NULL);
(void)_sigaction(SIGQUIT, &quitact, NULL);
(void)_sigprocmask(SIG_SETMASK, &oldsigblock, NULL);
+- return(pid == -1 ? -1 : pstat);
+#if __DARWIN_UNIX03
+ pthread_mutex_unlock(&__systemfn_mutex);
+#endif /* __DARWIN_UNIX03 */
- return(pid == -1 ? -1 : pstat);
++ return(pstat);
}
+ __weak_reference(__system, system);
CWD := ${.CURDIR}/stdlib
MISRCS+=a64l.c grantpt.c l64a.c
+.ifdef FEATURE_BLOCKS
+MISRCS+=qsort_b.c
+.endif # FEATURE_BLOCKS
.include "Makefile.fbsd_begin"
FBSDMISRCS=_Exit_.c abort.c abs.c atexit.c atof.c atoi.c atol.c atoll.c \
FBSDHDRS= atexit.h
.include "Makefile.fbsd_end"
+# special cases: heapsort_b-fbsd.c, merge_b-fbsd.c
+.for _file in heapsort merge
+.ifmake autopatch
+.for _cwd in ${CWD} # This .for statement forces evaluation of ${CWD}
+AUTOPATCHSRCS+= ${_cwd}/${_file}_b-fbsd.c
+${_cwd}/${_file}_b-fbsd.c: ${_cwd}/FreeBSD/${_file}.c
+ ${CP} ${.ALLSRC} ${.TARGET}
+ ${PATCH} ${.TARGET} ${.ALLSRC:S/${_file}/${_file}_b/}.patch
+.endfor # _cwd
+.else # !autopatch
+.ifdef FEATURE_BLOCKS
+MISRCS+= ${_file}_b.c
+.endif # FEATURE_BLOCKS
+.endif # autopatch
+.endfor # _file
+
+# special cases: heapsort_r-fbsd.c
+.for _file in heapsort
+.ifmake autopatch
+.for _cwd in ${CWD} # This .for statement forces evaluation of ${CWD}
+AUTOPATCHSRCS+= ${_cwd}/${_file}_r-fbsd.c
+${_cwd}/${_file}_r-fbsd.c: ${_cwd}/FreeBSD/${_file}.c
+ ${CP} ${.ALLSRC} ${.TARGET}
+ ${PATCH} ${.TARGET} ${.ALLSRC:S/${_file}/${_file}_r/}.patch
+.endfor # _cwd
+.else # !autopatch
+MISRCS+= ${_file}_r.c
+.endif # autopatch
+.endfor # _file
+
+# special cases: psort{,_b,_r}-fbsd.c psort.3
+.for _cwd in ${CWD} # This .for statement forces evaluation of ${CWD}
+.ifmake autopatch
+AUTOPATCHSRCS+= ${_cwd}/psort-fbsd.c
+${_cwd}/psort-fbsd.c: ${_cwd}/qsort-fbsd.c
+ ${CP} ${.ALLSRC} ${.TARGET}
+ ${PATCH} ${.TARGET} ${_cwd}/psort.c.patch
+.for _file in psort_b psort_r
+AUTOPATCHSRCS+= ${_cwd}/${_file}-fbsd.c
+${_cwd}/${_file}-fbsd.c: ${_cwd}/psort-fbsd.c
+ ${LN} ${.ALLSRC} ${.TARGET}
+.endfor # _file
+AUTOPATCHMAN+= ${_cwd}/psort.3
+${_cwd}/psort.3: ${_cwd}/qsort.3
+ ${CP} ${.ALLSRC} ${.TARGET}
+ ${PATCH} ${.TARGET} ${_cwd}/psort.3.patch
+.else # !autopatch
+MISRCS+= psort.c psort_r.c
+CFLAGS-psort_r-fbsd.c += -DI_AM_PSORT_R
+.ifdef FEATURE_BLOCKS
+MISRCS+= psort_b.c
+CFLAGS-psort_b-fbsd.c += -DI_AM_PSORT_B
+.endif # FEATURE_BLOCKS
+.endif # autopatch
+.endfor # _cwd
+
.include "Makefile.nbsd_begin"
NBSDMISRCS = strfmon.c
.include "Makefile.nbsd_end"
CFLAGS-system-fbsd.c += -DLIBC_ALIAS_SYSTEM
.if ${LIB} == "c"
-MAN3+= a64l.3 grantpt.3
+MAN3+= a64l.3 grantpt.3 psort.3
MAN3+= strtod_l.3 strtol_l.3
.include "Makefile.fbsd_begin"
MLINKS+= a64l.3 l64a.3
+.ifdef FEATURE_BLOCKS
+MLINKS+= atexit.3 atexit_b.3
+.endif # FEATURE_BLOCKS
+
MLINKS+= atof.3 atof_l.3
MLINKS+= atoi.3 atoi_l.3
atol.3 atoll.3 \
atol.3 atoll_l.3
+.ifdef FEATURE_BLOCKS
+MLINKS+= bsearch.3 bsearch_b.3
+.endif # FEATURE_BLOCKS
+
MLINKS+= ecvt.3 fcvt.3 \
ecvt.3 gcvt.3
MLINKS+= lsearch.3 lfind.3
+MLINKS+= psort.3 psort_r.3
+.ifdef FEATURE_BLOCKS
+MLINKS+= psort.3 psort_b.3
+.endif # FEATURE_BLOCKS
+
MLINKS+= qsort.3 heapsort.3 \
qsort.3 mergesort.3 \
qsort.3 qsort_r.3
+.ifdef FEATURE_BLOCKS
+MLINKS+= qsort.3 heapsort_b.3 \
+ qsort.3 mergesort_b.3 \
+ qsort.3 qsort_b.3
+.endif # FEATURE_BLOCKS
MLINKS+= radixsort.3 sradixsort.3
#include "namespace.h"
#include <signal.h>
+#include <stdarg.h>
#include <stdlib.h>
#include <stddef.h>
#include <unistd.h>
extern void (*__cleanup)();
extern void __abort(void) __dead2;
+extern const char *__crashreporter_info__;
#define TIMEOUT 10000 /* 10 milliseconds */
{
struct sigaction act;
+ if (!__crashreporter_info__)
+ __crashreporter_info__ = "abort() called";
/*
* POSIX requires we flush stdio buffers on abort.
* XXX ISO C requires that abort() be async-signal-safe.
{
struct sigaction act;
+ if (!__crashreporter_info__)
+ __crashreporter_info__ = "__abort() called";
act.sa_handler = SIG_DFL;
act.sa_flags = 0;
sigfillset(&act.sa_mask);
usleep(TIMEOUT); /* give time for signal to happen */
__builtin_trap(); /* never exit normally */
}
+
+__private_extern__ void
+abort_report_np(const char *fmt, ...)
+{
+ char *str;
+ va_list ap;
+
+ va_start(ap, fmt);
+ vasprintf(&str, fmt, ap);
+ va_end(ap);
+ __crashreporter_info__ = str ? str : fmt;
+ abort();
+}
#include "atexit.h"
#include "un-namespace.h"
+#ifdef __BLOCKS__
+#include <Block.h>
+#endif /* __BLOCKS__ */
#include "libc_private.h"
#define ATEXIT_FN_EMPTY 0
#define ATEXIT_FN_STD 1
#define ATEXIT_FN_CXA 2
+#ifdef __BLOCKS__
+#define ATEXIT_FN_BLK 3
+#endif /* __BLOCKS__ */
static pthread_mutex_t atexit_mutex = PTHREAD_MUTEX_INITIALIZER;
union {
void (*std_func)(void);
void (*cxa_func)(void *);
+#ifdef __BLOCKS__
+ void (^block)(void);
+#endif /* __BLOCKS__ */
} fn_ptr; /* function pointer */
void *fn_arg; /* argument for CXA callback */
void *fn_dso; /* shared module handle */
int error;
fn.fn_type = ATEXIT_FN_STD;
- fn.fn_ptr.std_func = func;;
+ fn.fn_ptr.std_func = func;
fn.fn_arg = NULL;
#if defined(__DYNAMIC__)
if ( dladdr(func, &info) )
return (error);
}
+#ifdef __BLOCKS__
+int
+atexit_b(void (^block)(void))
+{
+ struct atexit_fn fn;
+ struct dl_info info;
+ int error;
+
+ fn.fn_type = ATEXIT_FN_BLK;
+ fn.fn_ptr.block = Block_copy(block);
+ fn.fn_arg = NULL;
+#if defined(__DYNAMIC__)
+ if ( dladdr(block, &info) )
+ fn.fn_dso = info.dli_fbase;
+ else
+ fn.fn_dso = NULL;
+#else /* ! defined(__DYNAMIC__) */
+ fn.fn_dso = NULL;
+#endif /* defined(__DYNAMIC__) */
+
+ error = atexit_register(&fn);
+ return (error);
+}
+#endif /* __BLOCKS__ */
+
/*
* Register a function to be performed at exit or when an shared object
* with given dso handle is unloaded dynamically.
fn.fn_ptr.cxa_func(fn.fn_arg);
else if (fn.fn_type == ATEXIT_FN_STD)
fn.fn_ptr.std_func();
+#ifdef __BLOCKS__
+ else if (fn.fn_type == ATEXIT_FN_BLK)
+ fn.fn_ptr.block();
+#endif /* __BLOCKS__ */
_MUTEX_LOCK(&atexit_mutex);
if (new_registration)
goto restart;
.\" @(#)atexit.3 8.1 (Berkeley) 6/4/93
.\" $FreeBSD: src/lib/libc/stdlib/atexit.3,v 1.10 2002/12/18 13:33:03 ru Exp $
.\"
-.Dd September 6, 2002
+.Dd May 20, 2008
.Dt ATEXIT 3
.Os
.Sh NAME
.Nm atexit
.Nd register a function to be called on exit
-.Sh LIBRARY
-.Lb libc
.Sh SYNOPSIS
.In stdlib.h
.Ft int
.Fn atexit "void (*func)(void)"
+#ifdef UNIFDEF_BLOCKS
+.Ft int
+.Fn atexit_b "void (^block)(void)"
+#endif
.Sh DESCRIPTION
The
.Fn atexit
.Fn main .
Functions so registered are called in reverse order;
no arguments are passed.
+#ifdef UNIFDEF_BLOCKS
+.Pp
+The
+.Fn atexit_b
+function is like
+.Fn atexit
+except the callback is a block pointer instead of a function pointer.
+.Bd -ragged -offset indent
+Note: The
+.Fn Block_copy
+function (defined in
+.In Blocks.h )
+is used by
+.Fn atexit_b
+to make a copy of the block, especially for the case when a stack-based
+block might go out of scope when the subroutine returns.
+.Ed
+#endif
.Pp
-These functions must not call
+These callbacks must not call
.Fn exit ;
if it should be necessary to terminate the process while in such a
function, the
.Xr _exit 2
function should be used.
-(Alternatively, the function may cause abnormal
+(Alternatively, the callbacks may cause abnormal
process termination, for example by calling
.Xr abort 3 . )
.Pp
-At least 32 functions can always be registered;
+At least 32 callbacks can always be registered;
more are allowed as long as sufficient memory can be allocated.
.\" XXX {ATEXIT_MAX} is not implemented yet
.Sh RETURN VALUES
-.Rv -std atexit
+#ifdef UNIFDEF_BLOCKS
+.ds ATEXIT_B atexit_b
+#endif
+.Rv -std atexit \*[ATEXIT_B]
.Sh ERRORS
.Bl -tag -width Er
.It Bq Er ENOMEM
+++ /dev/null
-./bsearch.c
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)bsearch.c 8.1 (Berkeley) 6/4/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/bsearch.c,v 1.3 2002/03/21 22:48:41 obrien Exp $");
+
+#include <stddef.h>
+#include <stdlib.h>
+
+/*
+ * Perform a binary search.
+ *
+ * The code below is a bit sneaky. After a comparison fails, we
+ * divide the work in half by moving either left or right. If lim
+ * is odd, moving left simply involves halving lim: e.g., when lim
+ * is 5 we look at item 2, so we change lim to 2 so that we will
+ * look at items 0 & 1. If lim is even, the same applies. If lim
+ * is odd, moving right again involes halving lim, this time moving
+ * the base up one item past p: e.g., when lim is 5 we change base
+ * to item 3 and make lim 2 so that we will look at items 3 and 4.
+ * If lim is even, however, we have to shrink it by one before
+ * halving: e.g., when lim is 4, we still looked at item 2, so we
+ * have to make lim 3, then halve, obtaining 1, so that we will only
+ * look at item 3.
+ */
+void *
+bsearch(key, base0, nmemb, size, compar)
+ const void *key;
+ const void *base0;
+ size_t nmemb;
+ size_t size;
+ int (*compar)(const void *, const void *);
+{
+ const char *base = base0;
+ size_t lim;
+ int cmp;
+ const void *p;
+
+ for (lim = nmemb; lim != 0; lim >>= 1) {
+ p = base + (lim >> 1) * size;
+ cmp = (*compar)(key, p);
+ if (cmp == 0)
+ return ((void *)p);
+ if (cmp > 0) { /* key > p: move right */
+ base = (char *)p + size;
+ lim--;
+ } /* else move left */
+ }
+ return (NULL);
+}
+
+#ifdef __BLOCKS__
+void *
+bsearch_b(key, base0, nmemb, size, compar)
+ const void *key;
+ const void *base0;
+ size_t nmemb;
+ size_t size;
+ int (^compar)(const void *, const void *);
+{
+ const char *base = base0;
+ size_t lim;
+ int cmp;
+ const void *p;
+
+ for (lim = nmemb; lim != 0; lim >>= 1) {
+ p = base + (lim >> 1) * size;
+ cmp = compar(key, p);
+ if (cmp == 0)
+ return ((void *)p);
+ if (cmp > 0) { /* key > p: move right */
+ base = (char *)p + size;
+ lim--;
+ } /* else move left */
+ }
+ return (NULL);
+}
+#endif /* __BLOCKS__ */
.\" @(#)bsearch.3 8.3 (Berkeley) 4/19/94
.\" $FreeBSD: src/lib/libc/stdlib/bsearch.3,v 1.8 2001/09/07 14:46:35 asmodai Exp $
.\"
-.Dd April 19, 1994
+.Dd May 20, 2008
.Dt BSEARCH 3
.Os
.Sh NAME
+#ifdef UNIFDEF_BLOCKS
+.Nm bsearch ,
+.Nm bsearch_b
+#else
.Nm bsearch
+#endif
.Nd binary search of a sorted table
-.Sh LIBRARY
-.Lb libc
.Sh SYNOPSIS
.In stdlib.h
.Ft void *
.Fn bsearch "const void *key" "const void *base" "size_t nel" "size_t width" "int (*compar) (const void *, const void *)"
+#ifdef UNIFDEF_BLOCKS
+.Ft void *
+.Fn bsearch_b "const void *key" "const void *base" "size_t nel" "size_t width" "int (^compar) (const void *, const void *)"
+#endif
.Sh DESCRIPTION
The
.Fn bsearch
.Fa key
object is found, respectively, to be less than, to match, or be
greater than the array member.
+#ifdef UNIFDEF_BLOCKS
+.Pp
+The
+.Fn bsearch_b
+function is like
+.Fn bsearch
+except the callback
+.Fa compar
+is a block pointer instead of a function pointer.
+#endif
.Sh RETURN VALUES
The
.Fn bsearch
-function returns a pointer to a matching member of the array, or a null
+#ifdef UNIFDEF_BLOCKS
+and
+.Fn bsearch_b
+functions
+#else
+function
+#endif
+returns a pointer to a matching member of the array, or a null
pointer if no match is found.
If two members compare as equal, which member is matched is unspecified.
.Sh SEE ALSO
--- /dev/null
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ronnie Kon at Mindcraft Inc., Kevin Lew and Elmer Yglesias.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)heapsort.c 8.1 (Berkeley) 6/4/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/heapsort.c,v 1.6 2008/01/13 02:11:10 das Exp $");
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+/*
+ * Swap two areas of size number of bytes. Although qsort(3) permits random
+ * blocks of memory to be sorted, sorting pointers is almost certainly the
+ * common case (and, were it not, could easily be made so). Regardless, it
+ * isn't worth optimizing; the SWAP's get sped up by the cache, and pointer
+ * arithmetic gets lost in the time required for comparison function calls.
+ */
+#define SWAP(a, b, count, size, tmp) { \
+ count = size; \
+ do { \
+ tmp = *a; \
+ *a++ = *b; \
+ *b++ = tmp; \
+ } while (--count); \
+}
+
+/* Copy one block of size size to another. */
+#define COPY(a, b, count, size, tmp1, tmp2) { \
+ count = size; \
+ tmp1 = a; \
+ tmp2 = b; \
+ do { \
+ *tmp1++ = *tmp2++; \
+ } while (--count); \
+}
+
+/*
+ * Build the list into a heap, where a heap is defined such that for
+ * the records K1 ... KN, Kj/2 >= Kj for 1 <= j/2 <= j <= N.
+ *
+ * There two cases. If j == nmemb, select largest of Ki and Kj. If
+ * j < nmemb, select largest of Ki, Kj and Kj+1.
+ */
+#define CREATE(initval, nmemb, par_i, child_i, par, child, size, count, tmp) { \
+ for (par_i = initval; (child_i = par_i * 2) <= nmemb; \
+ par_i = child_i) { \
+ child = base + child_i * size; \
+ if (child_i < nmemb && compar(child, child + size) < 0) { \
+ child += size; \
+ ++child_i; \
+ } \
+ par = base + par_i * size; \
+ if (compar(child, par) <= 0) \
+ break; \
+ SWAP(par, child, count, size, tmp); \
+ } \
+}
+
+/*
+ * Select the top of the heap and 'heapify'. Since by far the most expensive
+ * action is the call to the compar function, a considerable optimization
+ * in the average case can be achieved due to the fact that k, the displaced
+ * elememt, is ususally quite small, so it would be preferable to first
+ * heapify, always maintaining the invariant that the larger child is copied
+ * over its parent's record.
+ *
+ * Then, starting from the *bottom* of the heap, finding k's correct place,
+ * again maintianing the invariant. As a result of the invariant no element
+ * is 'lost' when k is assigned its correct place in the heap.
+ *
+ * The time savings from this optimization are on the order of 15-20% for the
+ * average case. See Knuth, Vol. 3, page 158, problem 18.
+ *
+ * XXX Don't break the #define SELECT line, below. Reiser cpp gets upset.
+ */
+#define SELECT(par_i, child_i, nmemb, par, child, size, k, count, tmp1, tmp2) { \
+ for (par_i = 1; (child_i = par_i * 2) <= nmemb; par_i = child_i) { \
+ child = base + child_i * size; \
+ if (child_i < nmemb && compar(child, child + size) < 0) { \
+ child += size; \
+ ++child_i; \
+ } \
+ par = base + par_i * size; \
+ COPY(par, child, count, size, tmp1, tmp2); \
+ } \
+ for (;;) { \
+ child_i = par_i; \
+ par_i = child_i / 2; \
+ child = base + child_i * size; \
+ par = base + par_i * size; \
+ if (child_i == 1 || compar(k, par) < 0) { \
+ COPY(child, k, count, size, tmp1, tmp2); \
+ break; \
+ } \
+ COPY(child, par, count, size, tmp1, tmp2); \
+ } \
+}
+
+/*
+ * Heapsort -- Knuth, Vol. 3, page 145. Runs in O (N lg N), both average
+ * and worst. While heapsort is faster than the worst case of quicksort,
+ * the BSD quicksort does median selection so that the chance of finding
+ * a data set that will trigger the worst case is nonexistent. Heapsort's
+ * only advantage over quicksort is that it requires little additional memory.
+ */
+int
+heapsort_b(vbase, nmemb, size, compar)
+ void *vbase;
+ size_t nmemb, size;
+ int (^compar)(const void *, const void *);
+{
+ size_t cnt, i, j, l;
+ char tmp, *tmp1, *tmp2;
+ char *base, *k, *p, *t;
+
+ if (nmemb <= 1)
+ return (0);
+
+ if (!size) {
+ errno = EINVAL;
+ return (-1);
+ }
+
+ if ((k = malloc(size)) == NULL)
+ return (-1);
+
+ /*
+ * Items are numbered from 1 to nmemb, so offset from size bytes
+ * below the starting address.
+ */
+ base = (char *)vbase - size;
+
+ for (l = nmemb / 2 + 1; --l;)
+ CREATE(l, nmemb, i, j, t, p, size, cnt, tmp);
+
+ /*
+ * For each element of the heap, save the largest element into its
+ * final slot, save the displaced element (k), then recreate the
+ * heap.
+ */
+ while (nmemb > 1) {
+ COPY(k, base + nmemb * size, cnt, size, tmp1, tmp2);
+ COPY(base + nmemb * size, base + size, cnt, size, tmp1, tmp2);
+ --nmemb;
+ SELECT(i, j, nmemb, t, p, size, k, cnt, tmp1, tmp2);
+ }
+ free(k);
+ return (0);
+}
--- /dev/null
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ronnie Kon at Mindcraft Inc., Kevin Lew and Elmer Yglesias.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)heapsort.c 8.1 (Berkeley) 6/4/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/heapsort.c,v 1.6 2008/01/13 02:11:10 das Exp $");
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+/*
+ * Swap two areas of size number of bytes. Although qsort(3) permits random
+ * blocks of memory to be sorted, sorting pointers is almost certainly the
+ * common case (and, were it not, could easily be made so). Regardless, it
+ * isn't worth optimizing; the SWAP's get sped up by the cache, and pointer
+ * arithmetic gets lost in the time required for comparison function calls.
+ */
+#define SWAP(a, b, count, size, tmp) { \
+ count = size; \
+ do { \
+ tmp = *a; \
+ *a++ = *b; \
+ *b++ = tmp; \
+ } while (--count); \
+}
+
+/* Copy one block of size size to another. */
+#define COPY(a, b, count, size, tmp1, tmp2) { \
+ count = size; \
+ tmp1 = a; \
+ tmp2 = b; \
+ do { \
+ *tmp1++ = *tmp2++; \
+ } while (--count); \
+}
+
+/*
+ * Build the list into a heap, where a heap is defined such that for
+ * the records K1 ... KN, Kj/2 >= Kj for 1 <= j/2 <= j <= N.
+ *
+ * There two cases. If j == nmemb, select largest of Ki and Kj. If
+ * j < nmemb, select largest of Ki, Kj and Kj+1.
+ */
+#define CREATE(initval, nmemb, par_i, child_i, par, child, size, count, tmp) { \
+ for (par_i = initval; (child_i = par_i * 2) <= nmemb; \
+ par_i = child_i) { \
+ child = base + child_i * size; \
+ if (child_i < nmemb && compar(thunk, child, child + size) < 0) { \
+ child += size; \
+ ++child_i; \
+ } \
+ par = base + par_i * size; \
+ if (compar(thunk, child, par) <= 0) \
+ break; \
+ SWAP(par, child, count, size, tmp); \
+ } \
+}
+
+/*
+ * Select the top of the heap and 'heapify'. Since by far the most expensive
+ * action is the call to the compar function, a considerable optimization
+ * in the average case can be achieved due to the fact that k, the displaced
+ * elememt, is ususally quite small, so it would be preferable to first
+ * heapify, always maintaining the invariant that the larger child is copied
+ * over its parent's record.
+ *
+ * Then, starting from the *bottom* of the heap, finding k's correct place,
+ * again maintianing the invariant. As a result of the invariant no element
+ * is 'lost' when k is assigned its correct place in the heap.
+ *
+ * The time savings from this optimization are on the order of 15-20% for the
+ * average case. See Knuth, Vol. 3, page 158, problem 18.
+ *
+ * XXX Don't break the #define SELECT line, below. Reiser cpp gets upset.
+ */
+#define SELECT(par_i, child_i, nmemb, par, child, size, k, count, tmp1, tmp2) { \
+ for (par_i = 1; (child_i = par_i * 2) <= nmemb; par_i = child_i) { \
+ child = base + child_i * size; \
+ if (child_i < nmemb && compar(thunk, child, child + size) < 0) { \
+ child += size; \
+ ++child_i; \
+ } \
+ par = base + par_i * size; \
+ COPY(par, child, count, size, tmp1, tmp2); \
+ } \
+ for (;;) { \
+ child_i = par_i; \
+ par_i = child_i / 2; \
+ child = base + child_i * size; \
+ par = base + par_i * size; \
+ if (child_i == 1 || compar(thunk, k, par) < 0) { \
+ COPY(child, k, count, size, tmp1, tmp2); \
+ break; \
+ } \
+ COPY(child, par, count, size, tmp1, tmp2); \
+ } \
+}
+
+/*
+ * Heapsort -- Knuth, Vol. 3, page 145. Runs in O (N lg N), both average
+ * and worst. While heapsort is faster than the worst case of quicksort,
+ * the BSD quicksort does median selection so that the chance of finding
+ * a data set that will trigger the worst case is nonexistent. Heapsort's
+ * only advantage over quicksort is that it requires little additional memory.
+ */
+__private_extern__ int
+__heapsort_r(vbase, nmemb, size, thunk, compar)
+ void *vbase;
+ size_t nmemb, size;
+ void *thunk;
+ int (*compar)(void *, const void *, const void *);
+{
+ size_t cnt, i, j, l;
+ char tmp, *tmp1, *tmp2;
+ char *base, *k, *p, *t;
+
+ if (nmemb <= 1)
+ return (0);
+
+ if (!size) {
+ errno = EINVAL;
+ return (-1);
+ }
+
+ if ((k = malloc(size)) == NULL)
+ return (-1);
+
+ /*
+ * Items are numbered from 1 to nmemb, so offset from size bytes
+ * below the starting address.
+ */
+ base = (char *)vbase - size;
+
+ for (l = nmemb / 2 + 1; --l;)
+ CREATE(l, nmemb, i, j, t, p, size, cnt, tmp);
+
+ /*
+ * For each element of the heap, save the largest element into its
+ * final slot, save the displaced element (k), then recreate the
+ * heap.
+ */
+ while (nmemb > 1) {
+ COPY(k, base + nmemb * size, cnt, size, tmp1, tmp2);
+ COPY(base + nmemb * size, base + size, cnt, size, tmp1, tmp2);
+ --nmemb;
+ SELECT(i, j, nmemb, t, p, size, k, cnt, tmp1, tmp2);
+ }
+ free(k);
+ return (0);
+}
+++ /dev/null
-./merge.c
\ No newline at end of file
--- /dev/null
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Peter McIlroy.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)merge.c 8.2 (Berkeley) 2/14/94";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/merge.c,v 1.8 2007/01/09 00:28:10 imp Exp $");
+
+/*
+ * Hybrid exponential search/linear search merge sort with hybrid
+ * natural/pairwise first pass. Requires about .3% more comparisons
+ * for random data than LSMS with pairwise first pass alone.
+ * It works for objects as small as two bytes.
+ */
+
+#define NATURAL
+#define THRESHOLD 16 /* Best choice for natural merge cut-off. */
+
+/* #define NATURAL to get hybrid natural merge.
+ * (The default is pairwise merging.)
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+static void setup(u_char *, u_char *, size_t, size_t,
+ int (*)(const void *, const void *));
+static void insertionsort(u_char *, size_t, size_t,
+ int (*)(const void *, const void *));
+
+#define ISIZE sizeof(int)
+#define PSIZE sizeof(u_char *)
+#define ICOPY_LIST(src, dst, last) \
+ do \
+ *(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE; \
+ while(src < last)
+#define ICOPY_ELT(src, dst, i) \
+ do \
+ *(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE; \
+ while (i -= ISIZE)
+
+#define CCOPY_LIST(src, dst, last) \
+ do \
+ *dst++ = *src++; \
+ while (src < last)
+#define CCOPY_ELT(src, dst, i) \
+ do \
+ *dst++ = *src++; \
+ while (i -= 1)
+
+/*
+ * Find the next possible pointer head. (Trickery for forcing an array
+ * to do double duty as a linked list when objects do not align with word
+ * boundaries.
+ */
+/* Assumption: PSIZE is a power of 2. */
+#define EVAL(p) (u_char **) \
+ ((u_char *)0 + \
+ (((u_char *)p + PSIZE - 1 - (u_char *) 0) & ~(PSIZE - 1)))
+
+/*
+ * Arguments are as for qsort.
+ */
+int
+mergesort(base, nmemb, size, cmp)
+ void *base;
+ size_t nmemb;
+ size_t size;
+ int (*cmp)(const void *, const void *);
+{
+ size_t i;
+ int sense;
+ int big, iflag;
+ u_char *f1, *f2, *t, *b, *tp2, *q, *l1, *l2;
+ u_char *list2, *list1, *p2, *p, *last, **p1;
+
+ if (size < PSIZE / 2) { /* Pointers must fit into 2 * size. */
+ errno = EINVAL;
+ return (-1);
+ }
+
+ if (nmemb == 0)
+ return (0);
+
+ /*
+ * XXX
+ * Stupid subtraction for the Cray.
+ */
+ iflag = 0;
+ if (!(size % ISIZE) && !(((char *)base - (char *)0) % ISIZE))
+ iflag = 1;
+
+ if ((list2 = malloc(nmemb * size + PSIZE)) == NULL)
+ return (-1);
+
+ list1 = base;
+ setup(list1, list2, nmemb, size, cmp);
+ last = list2 + nmemb * size;
+ i = big = 0;
+ while (*EVAL(list2) != last) {
+ l2 = list1;
+ p1 = EVAL(list1);
+ for (tp2 = p2 = list2; p2 != last; p1 = EVAL(l2)) {
+ p2 = *EVAL(p2);
+ f1 = l2;
+ f2 = l1 = list1 + (p2 - list2);
+ if (p2 != last)
+ p2 = *EVAL(p2);
+ l2 = list1 + (p2 - list2);
+ while (f1 < l1 && f2 < l2) {
+ if ((*cmp)(f1, f2) <= 0) {
+ q = f2;
+ b = f1, t = l1;
+ sense = -1;
+ } else {
+ q = f1;
+ b = f2, t = l2;
+ sense = 0;
+ }
+ if (!big) { /* here i = 0 */
+ while ((b += size) < t && cmp(q, b) >sense)
+ if (++i == 6) {
+ big = 1;
+ goto EXPONENTIAL;
+ }
+ } else {
+EXPONENTIAL: for (i = size; ; i <<= 1)
+ if ((p = (b + i)) >= t) {
+ if ((p = t - size) > b &&
+ (*cmp)(q, p) <= sense)
+ t = p;
+ else
+ b = p;
+ break;
+ } else if ((*cmp)(q, p) <= sense) {
+ t = p;
+ if (i == size)
+ big = 0;
+ goto FASTCASE;
+ } else
+ b = p;
+ while (t > b+size) {
+ i = (((t - b) / size) >> 1) * size;
+ if ((*cmp)(q, p = b + i) <= sense)
+ t = p;
+ else
+ b = p;
+ }
+ goto COPY;
+FASTCASE: while (i > size)
+ if ((*cmp)(q,
+ p = b + (i >>= 1)) <= sense)
+ t = p;
+ else
+ b = p;
+COPY: b = t;
+ }
+ i = size;
+ if (q == f1) {
+ if (iflag) {
+ ICOPY_LIST(f2, tp2, b);
+ ICOPY_ELT(f1, tp2, i);
+ } else {
+ CCOPY_LIST(f2, tp2, b);
+ CCOPY_ELT(f1, tp2, i);
+ }
+ } else {
+ if (iflag) {
+ ICOPY_LIST(f1, tp2, b);
+ ICOPY_ELT(f2, tp2, i);
+ } else {
+ CCOPY_LIST(f1, tp2, b);
+ CCOPY_ELT(f2, tp2, i);
+ }
+ }
+ }
+ if (f2 < l2) {
+ if (iflag)
+ ICOPY_LIST(f2, tp2, l2);
+ else
+ CCOPY_LIST(f2, tp2, l2);
+ } else if (f1 < l1) {
+ if (iflag)
+ ICOPY_LIST(f1, tp2, l1);
+ else
+ CCOPY_LIST(f1, tp2, l1);
+ }
+ *p1 = l2;
+ }
+ tp2 = list1; /* swap list1, list2 */
+ list1 = list2;
+ list2 = tp2;
+ last = list2 + nmemb*size;
+ }
+ if (base == list2) {
+ memmove(list2, list1, nmemb*size);
+ list2 = list1;
+ }
+ free(list2);
+ return (0);
+}
+
+#define swap(a, b) { \
+ s = b; \
+ i = size; \
+ do { \
+ tmp = *a; *a++ = *s; *s++ = tmp; \
+ } while (--i); \
+ a -= size; \
+ }
+#define reverse(bot, top) { \
+ s = top; \
+ do { \
+ i = size; \
+ do { \
+ tmp = *bot; *bot++ = *s; *s++ = tmp; \
+ } while (--i); \
+ s -= size2; \
+ } while(bot < s); \
+}
+
+/*
+ * Optional hybrid natural/pairwise first pass. Eats up list1 in runs of
+ * increasing order, list2 in a corresponding linked list. Checks for runs
+ * when THRESHOLD/2 pairs compare with same sense. (Only used when NATURAL
+ * is defined. Otherwise simple pairwise merging is used.)
+ */
+void
+setup(list1, list2, n, size, cmp)
+ size_t n, size;
+ int (*cmp)(const void *, const void *);
+ u_char *list1, *list2;
+{
+ size_t i, size2;
+ int length, tmp, sense;
+ u_char *f1, *f2, *s, *l2, *last, *p2;
+
+ size2 = size*2;
+ if (n <= 5) {
+ insertionsort(list1, n, size, cmp);
+ *EVAL(list2) = (u_char*) list2 + n*size;
+ return;
+ }
+ /*
+ * Avoid running pointers out of bounds; limit n to evens
+ * for simplicity.
+ */
+ i = 4 + (n & 1);
+ insertionsort(list1 + (n - i) * size, i, size, cmp);
+ last = list1 + size * (n - i);
+ *EVAL(list2 + (last - list1)) = list2 + n * size;
+
+#ifdef NATURAL
+ p2 = list2;
+ f1 = list1;
+ sense = (cmp(f1, f1 + size) > 0);
+ for (; f1 < last; sense = !sense) {
+ length = 2;
+ /* Find pairs with same sense. */
+ for (f2 = f1 + size2; f2 < last; f2 += size2) {
+ if ((cmp(f2, f2+ size) > 0) != sense)
+ break;
+ length += 2;
+ }
+ if (length < THRESHOLD) { /* Pairwise merge */
+ do {
+ p2 = *EVAL(p2) = f1 + size2 - list1 + list2;
+ if (sense > 0)
+ swap (f1, f1 + size);
+ } while ((f1 += size2) < f2);
+ } else { /* Natural merge */
+ l2 = f2;
+ for (f2 = f1 + size2; f2 < l2; f2 += size2) {
+ if ((cmp(f2-size, f2) > 0) != sense) {
+ p2 = *EVAL(p2) = f2 - list1 + list2;
+ if (sense > 0)
+ reverse(f1, f2-size);
+ f1 = f2;
+ }
+ }
+ if (sense > 0)
+ reverse (f1, f2-size);
+ f1 = f2;
+ if (f2 < last || cmp(f2 - size, f2) > 0)
+ p2 = *EVAL(p2) = f2 - list1 + list2;
+ else
+ p2 = *EVAL(p2) = list2 + n*size;
+ }
+ }
+#else /* pairwise merge only. */
+ for (f1 = list1, p2 = list2; f1 < last; f1 += size2) {
+ p2 = *EVAL(p2) = p2 + size2;
+ if (cmp (f1, f1 + size) > 0)
+ swap(f1, f1 + size);
+ }
+#endif /* NATURAL */
+}
+
+/*
+ * This is to avoid out-of-bounds addresses in sorting the
+ * last 4 elements.
+ */
+static void
+insertionsort(a, n, size, cmp)
+ u_char *a;
+ size_t n, size;
+ int (*cmp)(const void *, const void *);
+{
+ u_char *ai, *s, *t, *u, tmp;
+ int i;
+
+ for (ai = a+size; --n >= 1; ai += size)
+ for (t = ai; t > a; t -= size) {
+ u = t - size;
+ if (cmp(u, t) <= 0)
+ break;
+ swap(u, t);
+ }
+}
--- /dev/null
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Peter McIlroy.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)merge.c 8.2 (Berkeley) 2/14/94";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/merge.c,v 1.8 2007/01/09 00:28:10 imp Exp $");
+
+/*
+ * Hybrid exponential search/linear search merge sort with hybrid
+ * natural/pairwise first pass. Requires about .3% more comparisons
+ * for random data than LSMS with pairwise first pass alone.
+ * It works for objects as small as two bytes.
+ */
+
+#define NATURAL
+#define THRESHOLD 16 /* Best choice for natural merge cut-off. */
+
+/* #define NATURAL to get hybrid natural merge.
+ * (The default is pairwise merging.)
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+static void setup(u_char *, u_char *, size_t, size_t,
+ int (^)(const void *, const void *));
+static void insertionsort(u_char *, size_t, size_t,
+ int (^)(const void *, const void *));
+
+#define ISIZE sizeof(int)
+#define PSIZE sizeof(u_char *)
+#define ICOPY_LIST(src, dst, last) \
+ do \
+ *(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE; \
+ while(src < last)
+#define ICOPY_ELT(src, dst, i) \
+ do \
+ *(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE; \
+ while (i -= ISIZE)
+
+#define CCOPY_LIST(src, dst, last) \
+ do \
+ *dst++ = *src++; \
+ while (src < last)
+#define CCOPY_ELT(src, dst, i) \
+ do \
+ *dst++ = *src++; \
+ while (i -= 1)
+
+/*
+ * Find the next possible pointer head. (Trickery for forcing an array
+ * to do double duty as a linked list when objects do not align with word
+ * boundaries.
+ */
+/* Assumption: PSIZE is a power of 2. */
+#define EVAL(p) (u_char **) \
+ ((u_char *)0 + \
+ (((u_char *)p + PSIZE - 1 - (u_char *) 0) & ~(PSIZE - 1)))
+
+/*
+ * Arguments are as for qsort.
+ */
+int
+mergesort_b(base, nmemb, size, cmp)
+ void *base;
+ size_t nmemb;
+ size_t size;
+ int (^cmp)(const void *, const void *);
+{
+ size_t i;
+ int sense;
+ int big, iflag;
+ u_char *f1, *f2, *t, *b, *tp2, *q, *l1, *l2;
+ u_char *list2, *list1, *p2, *p, *last, **p1;
+
+ if (size < PSIZE / 2) { /* Pointers must fit into 2 * size. */
+ errno = EINVAL;
+ return (-1);
+ }
+
+ if (nmemb == 0)
+ return (0);
+
+ /*
+ * XXX
+ * Stupid subtraction for the Cray.
+ */
+ iflag = 0;
+ if (!(size % ISIZE) && !(((char *)base - (char *)0) % ISIZE))
+ iflag = 1;
+
+ if ((list2 = malloc(nmemb * size + PSIZE)) == NULL)
+ return (-1);
+
+ list1 = base;
+ setup(list1, list2, nmemb, size, cmp);
+ last = list2 + nmemb * size;
+ i = big = 0;
+ while (*EVAL(list2) != last) {
+ l2 = list1;
+ p1 = EVAL(list1);
+ for (tp2 = p2 = list2; p2 != last; p1 = EVAL(l2)) {
+ p2 = *EVAL(p2);
+ f1 = l2;
+ f2 = l1 = list1 + (p2 - list2);
+ if (p2 != last)
+ p2 = *EVAL(p2);
+ l2 = list1 + (p2 - list2);
+ while (f1 < l1 && f2 < l2) {
+ if (cmp(f1, f2) <= 0) {
+ q = f2;
+ b = f1, t = l1;
+ sense = -1;
+ } else {
+ q = f1;
+ b = f2, t = l2;
+ sense = 0;
+ }
+ if (!big) { /* here i = 0 */
+ while ((b += size) < t && cmp(q, b) >sense)
+ if (++i == 6) {
+ big = 1;
+ goto EXPONENTIAL;
+ }
+ } else {
+EXPONENTIAL: for (i = size; ; i <<= 1)
+ if ((p = (b + i)) >= t) {
+ if ((p = t - size) > b &&
+ cmp(q, p) <= sense)
+ t = p;
+ else
+ b = p;
+ break;
+ } else if (cmp(q, p) <= sense) {
+ t = p;
+ if (i == size)
+ big = 0;
+ goto FASTCASE;
+ } else
+ b = p;
+ while (t > b+size) {
+ i = (((t - b) / size) >> 1) * size;
+ if (cmp(q, p = b + i) <= sense)
+ t = p;
+ else
+ b = p;
+ }
+ goto COPY;
+FASTCASE: while (i > size)
+ if (cmp(q,
+ p = b + (i >>= 1)) <= sense)
+ t = p;
+ else
+ b = p;
+COPY: b = t;
+ }
+ i = size;
+ if (q == f1) {
+ if (iflag) {
+ ICOPY_LIST(f2, tp2, b);
+ ICOPY_ELT(f1, tp2, i);
+ } else {
+ CCOPY_LIST(f2, tp2, b);
+ CCOPY_ELT(f1, tp2, i);
+ }
+ } else {
+ if (iflag) {
+ ICOPY_LIST(f1, tp2, b);
+ ICOPY_ELT(f2, tp2, i);
+ } else {
+ CCOPY_LIST(f1, tp2, b);
+ CCOPY_ELT(f2, tp2, i);
+ }
+ }
+ }
+ if (f2 < l2) {
+ if (iflag)
+ ICOPY_LIST(f2, tp2, l2);
+ else
+ CCOPY_LIST(f2, tp2, l2);
+ } else if (f1 < l1) {
+ if (iflag)
+ ICOPY_LIST(f1, tp2, l1);
+ else
+ CCOPY_LIST(f1, tp2, l1);
+ }
+ *p1 = l2;
+ }
+ tp2 = list1; /* swap list1, list2 */
+ list1 = list2;
+ list2 = tp2;
+ last = list2 + nmemb*size;
+ }
+ if (base == list2) {
+ memmove(list2, list1, nmemb*size);
+ list2 = list1;
+ }
+ free(list2);
+ return (0);
+}
+
+#define swap(a, b) { \
+ s = b; \
+ i = size; \
+ do { \
+ tmp = *a; *a++ = *s; *s++ = tmp; \
+ } while (--i); \
+ a -= size; \
+ }
+#define reverse(bot, top) { \
+ s = top; \
+ do { \
+ i = size; \
+ do { \
+ tmp = *bot; *bot++ = *s; *s++ = tmp; \
+ } while (--i); \
+ s -= size2; \
+ } while(bot < s); \
+}
+
+/*
+ * Optional hybrid natural/pairwise first pass. Eats up list1 in runs of
+ * increasing order, list2 in a corresponding linked list. Checks for runs
+ * when THRESHOLD/2 pairs compare with same sense. (Only used when NATURAL
+ * is defined. Otherwise simple pairwise merging is used.)
+ */
+void
+setup(list1, list2, n, size, cmp)
+ size_t n, size;
+ int (^cmp)(const void *, const void *);
+ u_char *list1, *list2;
+{
+ size_t i, size2;
+ int length, tmp, sense;
+ u_char *f1, *f2, *s, *l2, *last, *p2;
+
+ size2 = size*2;
+ if (n <= 5) {
+ insertionsort(list1, n, size, cmp);
+ *EVAL(list2) = (u_char*) list2 + n*size;
+ return;
+ }
+ /*
+ * Avoid running pointers out of bounds; limit n to evens
+ * for simplicity.
+ */
+ i = 4 + (n & 1);
+ insertionsort(list1 + (n - i) * size, i, size, cmp);
+ last = list1 + size * (n - i);
+ *EVAL(list2 + (last - list1)) = list2 + n * size;
+
+#ifdef NATURAL
+ p2 = list2;
+ f1 = list1;
+ sense = (cmp(f1, f1 + size) > 0);
+ for (; f1 < last; sense = !sense) {
+ length = 2;
+ /* Find pairs with same sense. */
+ for (f2 = f1 + size2; f2 < last; f2 += size2) {
+ if ((cmp(f2, f2+ size) > 0) != sense)
+ break;
+ length += 2;
+ }
+ if (length < THRESHOLD) { /* Pairwise merge */
+ do {
+ p2 = *EVAL(p2) = f1 + size2 - list1 + list2;
+ if (sense > 0)
+ swap (f1, f1 + size);
+ } while ((f1 += size2) < f2);
+ } else { /* Natural merge */
+ l2 = f2;
+ for (f2 = f1 + size2; f2 < l2; f2 += size2) {
+ if ((cmp(f2-size, f2) > 0) != sense) {
+ p2 = *EVAL(p2) = f2 - list1 + list2;
+ if (sense > 0)
+ reverse(f1, f2-size);
+ f1 = f2;
+ }
+ }
+ if (sense > 0)
+ reverse (f1, f2-size);
+ f1 = f2;
+ if (f2 < last || cmp(f2 - size, f2) > 0)
+ p2 = *EVAL(p2) = f2 - list1 + list2;
+ else
+ p2 = *EVAL(p2) = list2 + n*size;
+ }
+ }
+#else /* pairwise merge only. */
+ for (f1 = list1, p2 = list2; f1 < last; f1 += size2) {
+ p2 = *EVAL(p2) = p2 + size2;
+ if (cmp (f1, f1 + size) > 0)
+ swap(f1, f1 + size);
+ }
+#endif /* NATURAL */
+}
+
+/*
+ * This is to avoid out-of-bounds addresses in sorting the
+ * last 4 elements.
+ */
+static void
+insertionsort(a, n, size, cmp)
+ u_char *a;
+ size_t n, size;
+ int (^cmp)(const void *, const void *);
+{
+ u_char *ai, *s, *t, *u, tmp;
+ int i;
+
+ for (ai = a+size; --n >= 1; ai += size)
+ for (t = ai; t > a; t -= size) {
+ u = t - size;
+ if (cmp(u, t) <= 0)
+ break;
+ swap(u, t);
+ }
+}
--- /dev/null
+/****************************************************************************/
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)qsort.c 8.1 (Berkeley) 6/4/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/qsort.c,v 1.15 2008/01/14 09:21:34 das Exp $");
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <dispatch/dispatch.h>
+#include <stddef.h>
+#include <string.h>
+#include <libkern/OSAtomic.h>
+#include <sys/mman.h>
+#include <errno.h>
+#define __APPLE_API_PRIVATE
+#include <machine/cpu_capabilities.h>
+
+#ifdef I_AM_PSORT_R
+typedef int cmp_t(void *, const void *, const void *);
+#else
+typedef int cmp_t(const void *, const void *);
+#endif
+#ifdef I_AM_PSORT_B
+static inline char *med3(char *, char *, char *, cmp_t ^, void *) __attribute__((always_inline));
+#else
+static inline char *med3(char *, char *, char *, cmp_t *, void *) __attribute__((always_inline));
+#endif
+static inline void swapfunc(char *, char *, int, int) __attribute__((always_inline));
+
+#define min(a, b) (a) < (b) ? a : b
+
+#define NARGS ((PAGESIZE - offsetof(struct page, args)) / sizeof(union args))
+#define PAGESIZE 4096
+#define PARALLEL_MIN_SIZE 2000 /* determine heuristically */
+
+struct shared; /* forward reference */
+union args {
+ union args *next;
+ struct {
+ struct shared *shared;
+ void *a;
+ size_t n;
+ int depth_limit;
+ } /* anonymous */;
+};
+
+struct page {
+ struct page *next;
+ union args args[0];
+};
+
+struct shared {
+ char *who;
+ union args *freelist;
+ struct page *pagelist;
+#ifdef I_AM_PSORT_R
+ void *thunk;
+#endif
+#ifdef I_AM_PSORT_B
+ cmp_t ^cmp;
+#else
+ cmp_t *cmp;
+#endif
+ size_t es;
+ size_t turnoff;
+ dispatch_queue_t queue;
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ OSSpinLock sharedlock;
+ int count;
+};
+
+static union args *
+getargs(struct shared *shared)
+{
+ union args *args;
+
+ OSSpinLockLock(&shared->sharedlock);
+ if(!shared->freelist) {
+ struct page *page;
+ union args *prev;
+ int i;
+ if((page = (struct page *)mmap(NULL, PAGESIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0)) == NULL)
+ return NULL;
+ page->next = shared->pagelist;
+ shared->pagelist = page;
+ prev = NULL;
+ for(args = page->args, i = NARGS; i > 0; args++, i--) {
+ args->next = prev;
+ prev = args;
+ }
+ shared->freelist = prev;
+ }
+ args = shared->freelist;
+ shared->freelist = args->next;
+ OSSpinLockUnlock(&shared->sharedlock);
+ return args;
+}
+
+static void
+returnargs(struct shared *shared, union args *args)
+{
+ OSSpinLockLock(&shared->sharedlock);
+ args->next = shared->freelist;
+ shared->freelist = args;
+ OSSpinLockUnlock(&shared->sharedlock);
+}
+
+/*
+ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
+ */
+#define swapcode(TYPE, parmi, parmj, n) { \
+ long i = (n) / sizeof (TYPE); \
+ TYPE *pi = (TYPE *) (parmi); \
+ TYPE *pj = (TYPE *) (parmj); \
+ do { \
+ TYPE t = *pi; \
+ *pi++ = *pj; \
+ *pj++ = t; \
+ } while (--i > 0); \
+}
+
+#define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
+ es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
+
+static inline void
+swapfunc(a, b, n, swaptype)
+ char *a, *b;
+ int n, swaptype;
+{
+ if(swaptype <= 1)
+ swapcode(long, a, b, n)
+ else
+ swapcode(char, a, b, n)
+}
+
+#define swap(a, b) \
+ if (swaptype == 0) { \
+ long t = *(long *)(a); \
+ *(long *)(a) = *(long *)(b); \
+ *(long *)(b) = t; \
+ } else \
+ swapfunc(a, b, es, swaptype)
+
+#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
+
+#ifdef I_AM_PSORT_R
+#define CMP(t, x, y) (cmp((t), (x), (y)))
+#else
+#define CMP(t, x, y) (cmp((x), (y)))
+#endif
+
+static inline char *
+med3(char *a, char *b, char *c,
+#ifdef I_AM_PSORT_B
+cmp_t ^cmp,
+#else
+cmp_t *cmp,
+#endif
+void *thunk
+#ifndef I_AM_PSORT_R
+__unused
+#endif
+)
+{
+ return CMP(thunk, a, b) < 0 ?
+ (CMP(thunk, b, c) < 0 ? b : (CMP(thunk, a, c) < 0 ? c : a ))
+ :(CMP(thunk, b, c) > 0 ? b : (CMP(thunk, a, c) < 0 ? a : c ));
+}
+
+#ifdef __LP64__
+#define DEPTH(x) (2 * (flsl((long)(x)) - 1))
+#else /* !__LP64__ */
+#define DEPTH(x) (2 * (fls((int)(x)) - 1))
+#endif /* __LP64__ */
+
+#ifdef I_AM_PSORT_R
+int __heapsort_r(void *, size_t, size_t, void *, int (*)(void *, const void *, const void *));
+#endif
+
+static void _psort_parallel(void *x);
+
+static void
+_psort(void *a, size_t n, size_t es,
+#ifdef I_AM_PSORT_R
+void *thunk,
+#else
+#define thunk NULL
+#endif
+#ifdef I_AM_PSORT_B
+cmp_t ^cmp,
+#else
+cmp_t *cmp,
+#endif
+int depth_limit, struct shared *shared)
+{
+ char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
+ size_t d, r;
+ int cmp_result;
+ int swaptype, swap_cnt;
+
+loop:
+ if (depth_limit-- <= 0) {
+#ifdef I_AM_PSORT_B
+ heapsort_b(a, n, es, cmp);
+#elif defined(I_AM_PSORT_R)
+ __heapsort_r(a, n, es, thunk, cmp);
+#else
+ heapsort(a, n, es, cmp);
+#endif
+ return;
+ }
+ SWAPINIT(a, es);
+ swap_cnt = 0;
+ if (n < 7) {
+ for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
+ for (pl = pm;
+ pl > (char *)a && CMP(thunk, pl - es, pl) > 0;
+ pl -= es)
+ swap(pl, pl - es);
+ return;
+ }
+ pm = (char *)a + (n / 2) * es;
+ if (n > 7) {
+ pl = a;
+ pn = (char *)a + (n - 1) * es;
+ if (n > 40) {
+ d = (n / 8) * es;
+ pl = med3(pl, pl + d, pl + 2 * d, cmp, thunk);
+ pm = med3(pm - d, pm, pm + d, cmp, thunk);
+ pn = med3(pn - 2 * d, pn - d, pn, cmp, thunk);
+ }
+ pm = med3(pl, pm, pn, cmp, thunk);
+ }
+ swap(a, pm);
+ pa = pb = (char *)a + es;
+
+ pc = pd = (char *)a + (n - 1) * es;
+ for (;;) {
+ while (pb <= pc && (cmp_result = CMP(thunk, pb, a)) <= 0) {
+ if (cmp_result == 0) {
+ swap_cnt = 1;
+ swap(pa, pb);
+ pa += es;
+ }
+ pb += es;
+ }
+ while (pb <= pc && (cmp_result = CMP(thunk, pc, a)) >= 0) {
+ if (cmp_result == 0) {
+ swap_cnt = 1;
+ swap(pc, pd);
+ pd -= es;
+ }
+ pc -= es;
+ }
+ if (pb > pc)
+ break;
+ swap(pb, pc);
+ swap_cnt = 1;
+ pb += es;
+ pc -= es;
+ }
+
+ pn = (char *)a + n * es;
+ r = min(pa - (char *)a, pb - pa);
+ vecswap(a, pb - r, r);
+ r = min(pd - pc, pn - pd - es);
+ vecswap(pb, pn - r, r);
+
+ if (swap_cnt == 0) { /* Switch to insertion sort */
+ r = 1 + n / 4; /* n >= 7, so r >= 2 */
+ for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
+ for (pl = pm;
+ pl > (char *)a && CMP(thunk, pl - es, pl) > 0;
+ pl -= es) {
+ swap(pl, pl - es);
+ if (++swap_cnt > r) goto nevermind;
+ }
+ return;
+ }
+
+nevermind:
+ if ((r = pb - pa) > es) {
+ r /= es;
+ if (shared && r > shared->turnoff) {
+ union args *args = getargs(shared);
+
+ if (args == NULL)
+ LIBC_ABORT("%s: getargs: %s", shared->who, strerror(errno));
+ args->shared = shared;
+ args->a = a;
+ args->n = r;
+ args->depth_limit = depth_limit;
+ OSAtomicIncrement32(&shared->count);
+ dispatch_async_f(shared->queue, args, _psort_parallel);
+ } else {
+#ifdef I_AM_PSORT_R
+ _psort(a, r, es, thunk, cmp, depth_limit, NULL);
+#else
+ _psort(a, r, es, cmp, depth_limit, NULL);
+#endif
+ }
+ }
+ if ((r = pd - pc) > es) {
+ /* Iterate rather than recurse to save stack space */
+ a = pn - r;
+ n = r / es;
+ goto loop;
+ }
+/* psort(pn - r, r / es, es, cmp);*/
+}
+
+static void
+_psort_parallel(void *x)
+{
+ union args *args = (union args *)x;
+ struct shared *shared = args->shared;
+
+ _psort(args->a, args->n, shared->es,
+#ifdef I_AM_PSORT_R
+ shared->thunk,
+#endif
+ shared->cmp, args->depth_limit, shared);
+ returnargs(shared, args);
+ if(OSAtomicDecrement32(&shared->count) <= 0) {
+ pthread_mutex_lock(&shared->mutex);
+ pthread_cond_signal(&shared->cond);
+ pthread_mutex_unlock(&shared->mutex);
+ }
+}
+
+/* fast, approximate integer square root */
+static size_t
+isqrt(size_t x)
+{
+ size_t s = 1L << (flsl(x) / 2);
+ return (s + x / s) / 2;
+}
+
+void
+#ifdef I_AM_PSORT_R
+psort_r(void *a, size_t n, size_t es, void *thunk, cmp_t *cmp)
+#elif defined(I_AM_PSORT_B)
+psort_b(void *a, size_t n, size_t es, cmp_t ^cmp)
+#else
+psort(void *a, size_t n, size_t es, cmp_t *cmp)
+#endif
+{
+ if (n >= PARALLEL_MIN_SIZE && _NumCPUs() > 1) {
+ struct shared shared;
+ union args *args;
+
+ bzero(&shared, sizeof(shared));
+ shared.sharedlock = OS_SPINLOCK_INIT;
+ if ((args = getargs(&shared)) != NULL) {
+ struct page *p, *pp;
+#ifdef I_AM_PSORT_R
+ shared.who = "psort_r";
+ shared.thunk = thunk;
+#elif defined(I_AM_PSORT_B)
+ shared.who = "psort_b";
+#else
+ shared.who = "psort";
+#endif
+ shared.cmp = cmp;
+ shared.es = es;
+ shared.queue = dispatch_get_concurrent_queue(0);
+ shared.cond = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
+ shared.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
+ args->a = a;
+ args->n = n;
+ args->depth_limit = DEPTH(n);
+ args->shared = &shared;
+ /*
+ * The turnoff value is the size of a partition that,
+ * below which, we stop doing in parallel, and just do
+ * in the current thread. The value of sqrt(n) was
+ * determined heuristically. There is a smaller
+ * dependence on the slowness of the comparison
+ * function, and there might be a dependence on the
+ * number of processors, but the algorithm has not been
+ * determined. Because the sensitivity to the turnoff
+ * value is relatively low, we use a fast, approximate
+ * integer square root routine that is good enough for
+ * this purpose.
+ */
+ shared.turnoff = isqrt(n);
+ OSAtomicIncrement32(&shared.count);
+ _psort_parallel(args);
+
+ /* wait for queue to drain */
+ pthread_mutex_lock(&shared.mutex);
+ while(shared.count > 0)
+ pthread_cond_wait(&shared.cond, &shared.mutex);
+
+ pthread_mutex_unlock(&shared.mutex);
+ pthread_mutex_destroy(&shared.mutex);
+ pthread_cond_destroy(&shared.cond);
+ for(p = shared.pagelist; p; p = pp) {
+ pp = p->next;
+ munmap(p, PAGESIZE);
+ }
+ return;
+ }
+ }
+ /* Just call qsort */
+#ifdef I_AM_PSORT_R
+ qsort_r(a, n, es, thunk, cmp);
+#elif defined(I_AM_PSORT_B)
+ qsort_b(a, n, es, cmp);
+#else
+ qsort(a, n, es, cmp);
+#endif
+}
--- /dev/null
+.\" Copyright (c) 1990, 1991, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software contributed to Berkeley by
+.\" the American National Standards Committee X3, on Information
+.\" Processing Systems.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. All advertising materials mentioning features or use of this software
+.\" must display the following acknowledgement:
+.\" This product includes software developed by the University of
+.\" California, Berkeley and its contributors.
+.\" 4. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" @(#)qsort.3 8.1 (Berkeley) 6/4/93
+.\" $FreeBSD: src/lib/libc/stdlib/qsort.3,v 1.15 2004/07/02 23:52:12 ru Exp $
+.\"
+.Dd Nov 25, 2008
+.Dt PSORT 3
+.Os "Mac OS X"
+.Sh NAME
+.Nm psort ,
+#ifdef UNIFDEF_BLOCKS
+.Nm psort_b ,
+#endif
+.Nm psort_r
+.Nd parallel sort functions
+.Sh SYNOPSIS
+.In stdlib.h
+.Ft void
+.Fo psort
+.Fa "void *base"
+.Fa "size_t nel"
+.Fa "size_t width"
+.Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
+.Fc
+#ifdef UNIFDEF_BLOCKS
+.Ft void
+.Fo psort_b
+.Fa "void *base"
+.Fa "size_t nel"
+.Fa "size_t width"
+.Fa "int \*[lp]^compar\*[rp]\*[lp]const void *, const void *\*[rp]"
+.Fc
+#endif
+.Ft void
+.Fo psort_r
+.Fa "void *base"
+.Fa "size_t nel"
+.Fa "size_t width"
+.Fa "void *thunk"
+.Fa "int \*[lp]*compar\*[rp]\*[lp]void *, const void *, const void *\*[rp]"
+.Fc
+.Sh DESCRIPTION
+The
+#ifdef UNIFDEF_BLOCKS
+.Fn psort ,
+.Fn psort_b ,
+#else
+.Fn psort
+#endif
+and
+.Fn psort_r
+functions are parallel sort routines that are drop-in compatible with the
+corresponding
+.Fn qsort
+function (see
+.Xr qsort 3
+for a description of the arguments).
+On multiprocessor machines, multiple threads may be created to simultaneously
+perform the sort calculations, resulting in an overall faster sort result.
+Overhead in managing the threads limits the maximum speed improvement to
+somewhat less that the number of processors available.
+For example, on a 4-processor machine, a typical sort on a large array might
+result in 3.2 times faster sorting than a regular
+.Fn qsort .
+.Sh RESTRICTIONS
+Because of the multi-threaded nature of the sort, the comparison function
+is expected to perform its own synchronization that might be required for
+data physically
+.Em outside
+the two objects passed to the comparison function.
+However, no synchronization is required for the two
+object themselves, unless some third party is also accessing those objects.
+.Pp
+Additional memory is temporary allocated to deal with the parallel nature
+of the computation.
+.Pp
+Because of the overhead of maintaining multiple threads, the
+.Fn psort
+family of routines may choose to just call
+.Xr qsort 3
+when there is no advantage to parallelizing (for example, when the number of
+objects in the array is too small, or only one processor is available).
+.Pp
+Like
+.Xr qsort 3 ,
+the sort is not stable.
+.Sh RETURN VALUES
+The
+#ifdef UNIFDEF_BLOCKS
+.Fn psort ,
+.Fn psort_b
+#else
+.Fn psort
+#endif
+and
+.Fn psort_r
+functions
+return no value.
+.Sh SEE ALSO
+.Xr qsort 3
--- /dev/null
+--- psort.3.orig 2009-05-20 15:59:00.000000000 -0700
++++ psort.3 2009-05-20 16:08:34.000000000 -0700
+@@ -36,60 +36,20 @@
+ .\" @(#)qsort.3 8.1 (Berkeley) 6/4/93
+ .\" $FreeBSD: src/lib/libc/stdlib/qsort.3,v 1.15 2004/07/02 23:52:12 ru Exp $
+ .\"
+-.Dd September 30, 2003
+-.Dt QSORT 3
+-.Os
++.Dd Nov 25, 2008
++.Dt PSORT 3
++.Os "Mac OS X"
+ .Sh NAME
+-.Nm heapsort ,
++.Nm psort ,
+ #ifdef UNIFDEF_BLOCKS
+-.Nm heapsort_b ,
++.Nm psort_b ,
+ #endif
+-.Nm mergesort ,
+-#ifdef UNIFDEF_BLOCKS
+-.Nm mergesort_b ,
+-#endif
+-.Nm qsort ,
+-#ifdef UNIFDEF_BLOCKS
+-.Nm qsort_b ,
+-#endif
+-.Nm qsort_r
+-.Nd sort functions
++.Nm psort_r
++.Nd parallel sort functions
+ .Sh SYNOPSIS
+ .In stdlib.h
+-.Ft int
+-.Fo heapsort
+-.Fa "void *base"
+-.Fa "size_t nel"
+-.Fa "size_t width"
+-.Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
+-.Fc
+-#ifdef UNIFDEF_BLOCKS
+-.Ft int
+-.Fo heapsort_b
+-.Fa "void *base"
+-.Fa "size_t nel"
+-.Fa "size_t width"
+-.Fa "int \*[lp]^compar\*[rp]\*[lp]const void *, const void *\*[rp]"
+-.Fc
+-#endif
+-.Ft int
+-.Fo mergesort
+-.Fa "void *base"
+-.Fa "size_t nel"
+-.Fa "size_t width"
+-.Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
+-.Fc
+-#ifdef UNIFDEF_BLOCKS
+-.Ft int
+-.Fo mergesort_b
+-.Fa "void *base"
+-.Fa "size_t nel"
+-.Fa "size_t width"
+-.Fa "int \*[lp]^compar\*[rp]\*[lp]const void *, const void *\*[rp]"
+-.Fc
+-#endif
+ .Ft void
+-.Fo qsort
++.Fo psort
+ .Fa "void *base"
+ .Fa "size_t nel"
+ .Fa "size_t width"
+@@ -97,7 +57,7 @@
+ .Fc
+ #ifdef UNIFDEF_BLOCKS
+ .Ft void
+-.Fo qsort_b
++.Fo psort_b
+ .Fa "void *base"
+ .Fa "size_t nel"
+ .Fa "size_t width"
+@@ -105,7 +65,7 @@
+ .Fc
+ #endif
+ .Ft void
+-.Fo qsort_r
++.Fo psort_r
+ .Fa "void *base"
+ .Fa "size_t nel"
+ .Fa "size_t width"
+@@ -114,255 +74,60 @@
+ .Fc
+ .Sh DESCRIPTION
+ The
+-.Fn qsort
+-function is a modified partition-exchange sort, or quicksort.
+-The
+-.Fn heapsort
+-function is a modified selection sort.
+-The
+-.Fn mergesort
+-function is a modified merge sort with exponential search,
+-intended for sorting data with pre-existing order.
+-.Pp
+-The
+-.Fn qsort
+-and
+-.Fn heapsort
+-functions sort an array of
+-.Fa nel
+-objects, the initial member of which is pointed to by
+-.Fa base .
+-The size of each object is specified by
+-.Fa width .
+-The
+-.Fn mergesort
+-function
+-behaves similarly, but
+-.Em requires
+-that
+-.Fa width
+-be greater than or equal to
+-.Dq "sizeof(void *) / 2" .
+-.Pp
+-The contents of the array
+-.Fa base
+-are sorted in ascending order according to
+-a comparison function pointed to by
+-.Fa compar ,
+-which requires two arguments pointing to the objects being
+-compared.
+-.Pp
+-The comparison function must return an integer less than, equal to, or
+-greater than zero if the first argument is considered to be respectively
+-less than, equal to, or greater than the second.
+-.Pp
+-The
+-.Fn qsort_r
+-function behaves identically to
+-.Fn qsort ,
+-except that it takes an additional argument,
+-.Fa thunk ,
+-which is passed unchanged as the first argument to function pointed to
+-.Fa compar .
+-This allows the comparison function to access additional
+-data without using global variables, and thus
+-.Fn qsort_r
+-is suitable for use in functions which must be reentrant.
+-.Pp
+-The algorithms implemented by
+-.Fn qsort ,
+-.Fn qsort_r ,
+-and
+-.Fn heapsort
+-are
+-.Em not
+-stable; that is, if two members compare as equal, their order in
+-the sorted array is undefined.
+-The
+-.Fn mergesort
+-algorithm is stable.
+-.Pp
+-The
+-.Fn qsort
+-and
+-.Fn qsort_r
+-functions are an implementation of C.A.R.
+-Hoare's
+-.Dq quicksort
+-algorithm,
+-a variant of partition-exchange sorting; in particular, see
+-.An D.E. Knuth Ns 's
+-.%T "Algorithm Q" .
+-.Sy Quicksort
+-takes O N lg N average time.
+-This implementation uses median selection to avoid its
+-O N**2 worst-case behavior.
+-.Pp
+-The
+-.Fn heapsort
+-function is an implementation of
+-.An "J.W.J. William" Ns 's
+-.Dq heapsort
+-algorithm,
+-a variant of selection sorting; in particular, see
+-.An "D.E. Knuth" Ns 's
+-.%T "Algorithm H" .
+-.Sy Heapsort
+-takes O N lg N worst-case time.
+-Its
+-.Em only
+-advantage over
+-.Fn qsort
+-is that it uses almost no additional memory; while
+-.Fn qsort
+-does not allocate memory, it is implemented using recursion.
+-.Pp
+-The function
+-.Fn mergesort
+-requires additional memory of size
+-.Fa nel *
+-.Fa width
+-bytes; it should be used only when space is not at a premium.
+-The
+-.Fn mergesort
+-function
+-is optimized for data with pre-existing order; its worst case
+-time is O N lg N; its best case is O N.
+-.Pp
+-Normally,
+-.Fn qsort
+-is faster than
+-.Fn mergesort ,
+-which is faster than
+-.Fn heapsort .
+-Memory availability and pre-existing order in the data can make this
+-untrue.
+-#ifdef UNIFDEF_BLOCKS
+-.Pp
+-The
+-.Fn heapsort_b ,
+-.Fn mergesort_b ,
+-and
+-.Fn qsort_b
+-routines are like the corresponding routines without the _b suffix, expect
+-that the
+-.Fa compar
+-callback is a block pointer instead of a function pointer.
+-#endif
+-.Sh RETURN VALUES
+-The
+ #ifdef UNIFDEF_BLOCKS
+-.Fn qsort ,
+-.Fn qsort_b
++.Fn psort ,
++.Fn psort_b ,
+ #else
+-.Fn qsort
++.Fn psort
+ #endif
+ and
+-.Fn qsort_r
+-functions
+-return no value.
+-.Pp
+-#ifdef UNIFDEF_BLOCKS
+-.ds HEAPSORT_B heapsort_b
+-.ds MERGESORT_B mergesort_b
+-#endif
+-.Rv -std heapsort \*[HEAPSORT_B] mergesort \*[MERGESORT_B]
+-.Sh ERRORS
++.Fn psort_r
++functions are parallel sort routines that are drop-in compatible with the
++corresponding
++.Fn qsort
++function (see
++.Xr qsort 3
++for a description of the arguments).
++On multiprocessor machines, multiple threads may be created to simultaneously
++perform the sort calculations, resulting in an overall faster sort result.
++Overhead in managing the threads limits the maximum speed improvement to
++somewhat less that the number of processors available.
++For example, on a 4-processor machine, a typical sort on a large array might
++result in 3.2 times faster sorting than a regular
++.Fn qsort .
++.Sh RESTRICTIONS
++Because of the multi-threaded nature of the sort, the comparison function
++is expected to perform its own synchronization that might be required for
++data physically
++.Em outside
++the two objects passed to the comparison function.
++However, no synchronization is required for the two
++object themselves, unless some third party is also accessing those objects.
++.Pp
++Additional memory is temporary allocated to deal with the parallel nature
++of the computation.
++.Pp
++Because of the overhead of maintaining multiple threads, the
++.Fn psort
++family of routines may choose to just call
++.Xr qsort 3
++when there is no advantage to parallelizing (for example, when the number of
++objects in the array is too small, or only one processor is available).
++.Pp
++Like
++.Xr qsort 3 ,
++the sort is not stable.
++.Sh RETURN VALUES
+ The
+ #ifdef UNIFDEF_BLOCKS
+-.Fn heapsort ,
+-.Fn heapsort_b ,
+-.Fn mergesort
+-and
+-.Fn mergesort_b
++.Fn psort ,
++.Fn psort_b
+ #else
+-.Fn heapsort
+-and
+-.Fn mergesort
+-#endif
+-functions succeed unless:
+-.Bl -tag -width Er
+-.It Bq Er EINVAL
+-The
+-.Fa width
+-argument is zero, or,
+-the
+-.Fa width
+-argument to
+-.Fn mergesort
+-#ifdef UNIFDEF_BLOCKS
+-or
+-.Fn mergesort_b
++.Fn psort
+ #endif
+-is less than
+-.Dq "sizeof(void *) / 2" .
+-.It Bq Er ENOMEM
+-The
+-#ifdef UNIFDEF_BLOCKS
+-.Fn heapsort ,
+-.Fn heapsort_b ,
+-.Fn mergesort
+-and
+-.Fn mergesort_b
+-#else
+-.Fn heapsort
+ and
+-.Fn mergesort
+-#endif
++.Fn psort_r
+ functions
+-were unable to allocate memory.
+-.El
+-.Sh COMPATIBILITY
+-Previous versions of
+-.Fn qsort
+-did not permit the comparison routine itself to call
+-.Fn qsort 3 .
+-This is no longer true.
++return no value.
+ .Sh SEE ALSO
+-.Xr sort 1 ,
+-.Xr radixsort 3
+-.Rs
+-.%A Hoare, C.A.R.
+-.%D 1962
+-.%T "Quicksort"
+-.%J "The Computer Journal"
+-.%V 5:1
+-.%P pp. 10-15
+-.Re
+-.Rs
+-.%A Williams, J.W.J
+-.%D 1964
+-.%T "Heapsort"
+-.%J "Communications of the ACM"
+-.%V 7:1
+-.%P pp. 347-348
+-.Re
+-.Rs
+-.%A Knuth, D.E.
+-.%D 1968
+-.%B "The Art of Computer Programming"
+-.%V Vol. 3
+-.%T "Sorting and Searching"
+-.%P pp. 114-123, 145-149
+-.Re
+-.Rs
+-.%A McIlroy, P.M.
+-.%T "Optimistic Sorting and Information Theoretic Complexity"
+-.%J "Fourth Annual ACM-SIAM Symposium on Discrete Algorithms"
+-.%V January 1992
+-.Re
+-.Rs
+-.%A Bentley, J.L.
+-.%A McIlroy, M.D.
+-.%T "Engineering a Sort Function"
+-.%J "Software--Practice and Experience"
+-.%V Vol. 23(11)
+-.%P pp. 1249-1265
+-.%D November\ 1993
+-.Re
+-.Sh STANDARDS
+-The
+-.Fn qsort
+-function
+-conforms to
+-.St -isoC .
++.Xr qsort 3
--- /dev/null
+--- psort.c.orig 2008-11-24 17:01:07.000000000 -0800
++++ psort.c 2008-11-24 22:02:57.000000000 -0800
+@@ -1,3 +1,4 @@
++/****************************************************************************/
+ /*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+@@ -34,14 +35,22 @@ static char sccsid[] = "@(#)qsort.c 8.1
+ __FBSDID("$FreeBSD: src/lib/libc/stdlib/qsort.c,v 1.15 2008/01/14 09:21:34 das Exp $");
+
+ #include <stdlib.h>
++#include <pthread.h>
++#include <dispatch/dispatch.h>
++#include <stddef.h>
+ #include <string.h>
++#include <libkern/OSAtomic.h>
++#include <sys/mman.h>
++#include <errno.h>
++#define __APPLE_API_PRIVATE
++#include <machine/cpu_capabilities.h>
+
+-#ifdef I_AM_QSORT_R
++#ifdef I_AM_PSORT_R
+ typedef int cmp_t(void *, const void *, const void *);
+ #else
+ typedef int cmp_t(const void *, const void *);
+ #endif
+-#ifdef I_AM_QSORT_B
++#ifdef I_AM_PSORT_B
+ static inline char *med3(char *, char *, char *, cmp_t ^, void *) __attribute__((always_inline));
+ #else
+ static inline char *med3(char *, char *, char *, cmp_t *, void *) __attribute__((always_inline));
+@@ -50,6 +59,83 @@ static inline void swapfunc(char *, cha
+
+ #define min(a, b) (a) < (b) ? a : b
+
++#define NARGS ((PAGESIZE - offsetof(struct page, args)) / sizeof(union args))
++#define PAGESIZE 4096
++#define PARALLEL_MIN_SIZE 2000 /* determine heuristically */
++
++struct shared; /* forward reference */
++union args {
++ union args *next;
++ struct {
++ struct shared *shared;
++ void *a;
++ size_t n;
++ int depth_limit;
++ } /* anonymous */;
++};
++
++struct page {
++ struct page *next;
++ union args args[0];
++};
++
++struct shared {
++ char *who;
++ union args *freelist;
++ struct page *pagelist;
++#ifdef I_AM_PSORT_R
++ void *thunk;
++#endif
++#ifdef I_AM_PSORT_B
++ cmp_t ^cmp;
++#else
++ cmp_t *cmp;
++#endif
++ size_t es;
++ size_t turnoff;
++ dispatch_queue_t queue;
++ pthread_cond_t cond;
++ pthread_mutex_t mutex;
++ OSSpinLock sharedlock;
++ int count;
++};
++
++static union args *
++getargs(struct shared *shared)
++{
++ union args *args;
++
++ OSSpinLockLock(&shared->sharedlock);
++ if(!shared->freelist) {
++ struct page *page;
++ union args *prev;
++ int i;
++ if((page = (struct page *)mmap(NULL, PAGESIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0)) == NULL)
++ return NULL;
++ page->next = shared->pagelist;
++ shared->pagelist = page;
++ prev = NULL;
++ for(args = page->args, i = NARGS; i > 0; args++, i--) {
++ args->next = prev;
++ prev = args;
++ }
++ shared->freelist = prev;
++ }
++ args = shared->freelist;
++ shared->freelist = args->next;
++ OSSpinLockUnlock(&shared->sharedlock);
++ return args;
++}
++
++static void
++returnargs(struct shared *shared, union args *args)
++{
++ OSSpinLockLock(&shared->sharedlock);
++ args->next = shared->freelist;
++ shared->freelist = args;
++ OSSpinLockUnlock(&shared->sharedlock);
++}
++
+ /*
+ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
+ */
+@@ -88,7 +174,7 @@ swapfunc(a, b, n, swaptype)
+
+ #define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
+
+-#ifdef I_AM_QSORT_R
++#ifdef I_AM_PSORT_R
+ #define CMP(t, x, y) (cmp((t), (x), (y)))
+ #else
+ #define CMP(t, x, y) (cmp((x), (y)))
+@@ -96,13 +182,13 @@ swapfunc(a, b, n, swaptype)
+
+ static inline char *
+ med3(char *a, char *b, char *c,
+-#ifdef I_AM_QSORT_B
++#ifdef I_AM_PSORT_B
+ cmp_t ^cmp,
+ #else
+ cmp_t *cmp,
+ #endif
+ void *thunk
+-#ifndef I_AM_QSORT_R
++#ifndef I_AM_PSORT_R
+ __unused
+ #endif
+ )
+@@ -118,23 +204,25 @@ __unused
+ #define DEPTH(x) (2 * (fls((int)(x)) - 1))
+ #endif /* __LP64__ */
+
+-#ifdef I_AM_QSORT_R
++#ifdef I_AM_PSORT_R
+ int __heapsort_r(void *, size_t, size_t, void *, int (*)(void *, const void *, const void *));
+ #endif
+
++static void _psort_parallel(void *x);
++
+ static void
+-_qsort(void *a, size_t n, size_t es,
+-#ifdef I_AM_QSORT_R
++_psort(void *a, size_t n, size_t es,
++#ifdef I_AM_PSORT_R
+ void *thunk,
+ #else
+ #define thunk NULL
+ #endif
+-#ifdef I_AM_QSORT_B
++#ifdef I_AM_PSORT_B
+ cmp_t ^cmp,
+ #else
+ cmp_t *cmp,
+ #endif
+-int depth_limit)
++int depth_limit, struct shared *shared)
+ {
+ char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
+ size_t d, r;
+@@ -143,9 +231,9 @@ int depth_limit)
+
+ loop:
+ if (depth_limit-- <= 0) {
+-#ifdef I_AM_QSORT_B
++#ifdef I_AM_PSORT_B
+ heapsort_b(a, n, es, cmp);
+-#elif defined(I_AM_QSORT_R)
++#elif defined(I_AM_PSORT_R)
+ __heapsort_r(a, n, es, thunk, cmp);
+ #else
+ heapsort(a, n, es, cmp);
+@@ -222,33 +310,135 @@ loop:
+ }
+
+ nevermind:
+- if ((r = pb - pa) > es)
+-#ifdef I_AM_QSORT_R
+- _qsort(a, r / es, es, thunk, cmp, depth_limit);
++ if ((r = pb - pa) > es) {
++ r /= es;
++ if (shared && r > shared->turnoff) {
++ union args *args = getargs(shared);
++
++ if (args == NULL)
++ LIBC_ABORT("%s: getargs: %s", shared->who, strerror(errno));
++ args->shared = shared;
++ args->a = a;
++ args->n = r;
++ args->depth_limit = depth_limit;
++ OSAtomicIncrement32(&shared->count);
++ dispatch_async_f(shared->queue, args, _psort_parallel);
++ } else {
++#ifdef I_AM_PSORT_R
++ _psort(a, r, es, thunk, cmp, depth_limit, NULL);
+ #else
+- _qsort(a, r / es, es, cmp, depth_limit);
++ _psort(a, r, es, cmp, depth_limit, NULL);
+ #endif
++ }
++ }
+ if ((r = pd - pc) > es) {
+ /* Iterate rather than recurse to save stack space */
+ a = pn - r;
+ n = r / es;
+ goto loop;
+ }
+-/* qsort(pn - r, r / es, es, cmp);*/
++/* psort(pn - r, r / es, es, cmp);*/
++}
++
++static void
++_psort_parallel(void *x)
++{
++ union args *args = (union args *)x;
++ struct shared *shared = args->shared;
++
++ _psort(args->a, args->n, shared->es,
++#ifdef I_AM_PSORT_R
++ shared->thunk,
++#endif
++ shared->cmp, args->depth_limit, shared);
++ returnargs(shared, args);
++ if(OSAtomicDecrement32(&shared->count) <= 0) {
++ pthread_mutex_lock(&shared->mutex);
++ pthread_cond_signal(&shared->cond);
++ pthread_mutex_unlock(&shared->mutex);
++ }
++}
++
++/* fast, approximate integer square root */
++static size_t
++isqrt(size_t x)
++{
++ size_t s = 1L << (flsl(x) / 2);
++ return (s + x / s) / 2;
+ }
+
+ void
+-#ifdef I_AM_QSORT_R
+-qsort_r(void *a, size_t n, size_t es, void *thunk, cmp_t *cmp)
+-#elif defined(I_AM_QSORT_B)
+-qsort_b(void *a, size_t n, size_t es, cmp_t ^cmp)
++#ifdef I_AM_PSORT_R
++psort_r(void *a, size_t n, size_t es, void *thunk, cmp_t *cmp)
++#elif defined(I_AM_PSORT_B)
++psort_b(void *a, size_t n, size_t es, cmp_t ^cmp)
+ #else
+-qsort(void *a, size_t n, size_t es, cmp_t *cmp)
++psort(void *a, size_t n, size_t es, cmp_t *cmp)
+ #endif
+ {
+- _qsort(a, n, es,
+-#ifdef I_AM_QSORT_R
+- thunk,
++ if (n >= PARALLEL_MIN_SIZE && _NumCPUs() > 1) {
++ struct shared shared;
++ union args *args;
++
++ bzero(&shared, sizeof(shared));
++ shared.sharedlock = OS_SPINLOCK_INIT;
++ if ((args = getargs(&shared)) != NULL) {
++ struct page *p, *pp;
++#ifdef I_AM_PSORT_R
++ shared.who = "psort_r";
++ shared.thunk = thunk;
++#elif defined(I_AM_PSORT_B)
++ shared.who = "psort_b";
++#else
++ shared.who = "psort";
++#endif
++ shared.cmp = cmp;
++ shared.es = es;
++ shared.queue = dispatch_get_concurrent_queue(0);
++ shared.cond = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
++ shared.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
++ args->a = a;
++ args->n = n;
++ args->depth_limit = DEPTH(n);
++ args->shared = &shared;
++ /*
++ * The turnoff value is the size of a partition that,
++ * below which, we stop doing in parallel, and just do
++ * in the current thread. The value of sqrt(n) was
++ * determined heuristically. There is a smaller
++ * dependence on the slowness of the comparison
++ * function, and there might be a dependence on the
++ * number of processors, but the algorithm has not been
++ * determined. Because the sensitivity to the turnoff
++ * value is relatively low, we use a fast, approximate
++ * integer square root routine that is good enough for
++ * this purpose.
++ */
++ shared.turnoff = isqrt(n);
++ OSAtomicIncrement32(&shared.count);
++ _psort_parallel(args);
++
++ /* wait for queue to drain */
++ pthread_mutex_lock(&shared.mutex);
++ while(shared.count > 0)
++ pthread_cond_wait(&shared.cond, &shared.mutex);
++
++ pthread_mutex_unlock(&shared.mutex);
++ pthread_mutex_destroy(&shared.mutex);
++ pthread_cond_destroy(&shared.cond);
++ for(p = shared.pagelist; p; p = pp) {
++ pp = p->next;
++ munmap(p, PAGESIZE);
++ }
++ return;
++ }
++ }
++ /* Just call qsort */
++#ifdef I_AM_PSORT_R
++ qsort_r(a, n, es, thunk, cmp);
++#elif defined(I_AM_PSORT_B)
++ qsort_b(a, n, es, cmp);
++#else
++ qsort(a, n, es, cmp);
+ #endif
+- cmp, DEPTH(n));
+ }
--- /dev/null
+/****************************************************************************/
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)qsort.c 8.1 (Berkeley) 6/4/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/qsort.c,v 1.15 2008/01/14 09:21:34 das Exp $");
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <dispatch/dispatch.h>
+#include <stddef.h>
+#include <string.h>
+#include <libkern/OSAtomic.h>
+#include <sys/mman.h>
+#include <errno.h>
+#define __APPLE_API_PRIVATE
+#include <machine/cpu_capabilities.h>
+
+#ifdef I_AM_PSORT_R
+typedef int cmp_t(void *, const void *, const void *);
+#else
+typedef int cmp_t(const void *, const void *);
+#endif
+#ifdef I_AM_PSORT_B
+static inline char *med3(char *, char *, char *, cmp_t ^, void *) __attribute__((always_inline));
+#else
+static inline char *med3(char *, char *, char *, cmp_t *, void *) __attribute__((always_inline));
+#endif
+static inline void swapfunc(char *, char *, int, int) __attribute__((always_inline));
+
+#define min(a, b) (a) < (b) ? a : b
+
+#define NARGS ((PAGESIZE - offsetof(struct page, args)) / sizeof(union args))
+#define PAGESIZE 4096
+#define PARALLEL_MIN_SIZE 2000 /* determine heuristically */
+
+struct shared; /* forward reference */
+union args {
+ union args *next;
+ struct {
+ struct shared *shared;
+ void *a;
+ size_t n;
+ int depth_limit;
+ } /* anonymous */;
+};
+
+struct page {
+ struct page *next;
+ union args args[0];
+};
+
+struct shared {
+ char *who;
+ union args *freelist;
+ struct page *pagelist;
+#ifdef I_AM_PSORT_R
+ void *thunk;
+#endif
+#ifdef I_AM_PSORT_B
+ cmp_t ^cmp;
+#else
+ cmp_t *cmp;
+#endif
+ size_t es;
+ size_t turnoff;
+ dispatch_queue_t queue;
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ OSSpinLock sharedlock;
+ int count;
+};
+
+static union args *
+getargs(struct shared *shared)
+{
+ union args *args;
+
+ OSSpinLockLock(&shared->sharedlock);
+ if(!shared->freelist) {
+ struct page *page;
+ union args *prev;
+ int i;
+ if((page = (struct page *)mmap(NULL, PAGESIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0)) == NULL)
+ return NULL;
+ page->next = shared->pagelist;
+ shared->pagelist = page;
+ prev = NULL;
+ for(args = page->args, i = NARGS; i > 0; args++, i--) {
+ args->next = prev;
+ prev = args;
+ }
+ shared->freelist = prev;
+ }
+ args = shared->freelist;
+ shared->freelist = args->next;
+ OSSpinLockUnlock(&shared->sharedlock);
+ return args;
+}
+
+static void
+returnargs(struct shared *shared, union args *args)
+{
+ OSSpinLockLock(&shared->sharedlock);
+ args->next = shared->freelist;
+ shared->freelist = args;
+ OSSpinLockUnlock(&shared->sharedlock);
+}
+
+/*
+ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
+ */
+#define swapcode(TYPE, parmi, parmj, n) { \
+ long i = (n) / sizeof (TYPE); \
+ TYPE *pi = (TYPE *) (parmi); \
+ TYPE *pj = (TYPE *) (parmj); \
+ do { \
+ TYPE t = *pi; \
+ *pi++ = *pj; \
+ *pj++ = t; \
+ } while (--i > 0); \
+}
+
+#define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
+ es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
+
+static inline void
+swapfunc(a, b, n, swaptype)
+ char *a, *b;
+ int n, swaptype;
+{
+ if(swaptype <= 1)
+ swapcode(long, a, b, n)
+ else
+ swapcode(char, a, b, n)
+}
+
+#define swap(a, b) \
+ if (swaptype == 0) { \
+ long t = *(long *)(a); \
+ *(long *)(a) = *(long *)(b); \
+ *(long *)(b) = t; \
+ } else \
+ swapfunc(a, b, es, swaptype)
+
+#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
+
+#ifdef I_AM_PSORT_R
+#define CMP(t, x, y) (cmp((t), (x), (y)))
+#else
+#define CMP(t, x, y) (cmp((x), (y)))
+#endif
+
+static inline char *
+med3(char *a, char *b, char *c,
+#ifdef I_AM_PSORT_B
+cmp_t ^cmp,
+#else
+cmp_t *cmp,
+#endif
+void *thunk
+#ifndef I_AM_PSORT_R
+__unused
+#endif
+)
+{
+ return CMP(thunk, a, b) < 0 ?
+ (CMP(thunk, b, c) < 0 ? b : (CMP(thunk, a, c) < 0 ? c : a ))
+ :(CMP(thunk, b, c) > 0 ? b : (CMP(thunk, a, c) < 0 ? a : c ));
+}
+
+#ifdef __LP64__
+#define DEPTH(x) (2 * (flsl((long)(x)) - 1))
+#else /* !__LP64__ */
+#define DEPTH(x) (2 * (fls((int)(x)) - 1))
+#endif /* __LP64__ */
+
+#ifdef I_AM_PSORT_R
+int __heapsort_r(void *, size_t, size_t, void *, int (*)(void *, const void *, const void *));
+#endif
+
+static void _psort_parallel(void *x);
+
+static void
+_psort(void *a, size_t n, size_t es,
+#ifdef I_AM_PSORT_R
+void *thunk,
+#else
+#define thunk NULL
+#endif
+#ifdef I_AM_PSORT_B
+cmp_t ^cmp,
+#else
+cmp_t *cmp,
+#endif
+int depth_limit, struct shared *shared)
+{
+ char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
+ size_t d, r;
+ int cmp_result;
+ int swaptype, swap_cnt;
+
+loop:
+ if (depth_limit-- <= 0) {
+#ifdef I_AM_PSORT_B
+ heapsort_b(a, n, es, cmp);
+#elif defined(I_AM_PSORT_R)
+ __heapsort_r(a, n, es, thunk, cmp);
+#else
+ heapsort(a, n, es, cmp);
+#endif
+ return;
+ }
+ SWAPINIT(a, es);
+ swap_cnt = 0;
+ if (n < 7) {
+ for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
+ for (pl = pm;
+ pl > (char *)a && CMP(thunk, pl - es, pl) > 0;
+ pl -= es)
+ swap(pl, pl - es);
+ return;
+ }
+ pm = (char *)a + (n / 2) * es;
+ if (n > 7) {
+ pl = a;
+ pn = (char *)a + (n - 1) * es;
+ if (n > 40) {
+ d = (n / 8) * es;
+ pl = med3(pl, pl + d, pl + 2 * d, cmp, thunk);
+ pm = med3(pm - d, pm, pm + d, cmp, thunk);
+ pn = med3(pn - 2 * d, pn - d, pn, cmp, thunk);
+ }
+ pm = med3(pl, pm, pn, cmp, thunk);
+ }
+ swap(a, pm);
+ pa = pb = (char *)a + es;
+
+ pc = pd = (char *)a + (n - 1) * es;
+ for (;;) {
+ while (pb <= pc && (cmp_result = CMP(thunk, pb, a)) <= 0) {
+ if (cmp_result == 0) {
+ swap_cnt = 1;
+ swap(pa, pb);
+ pa += es;
+ }
+ pb += es;
+ }
+ while (pb <= pc && (cmp_result = CMP(thunk, pc, a)) >= 0) {
+ if (cmp_result == 0) {
+ swap_cnt = 1;
+ swap(pc, pd);
+ pd -= es;
+ }
+ pc -= es;
+ }
+ if (pb > pc)
+ break;
+ swap(pb, pc);
+ swap_cnt = 1;
+ pb += es;
+ pc -= es;
+ }
+
+ pn = (char *)a + n * es;
+ r = min(pa - (char *)a, pb - pa);
+ vecswap(a, pb - r, r);
+ r = min(pd - pc, pn - pd - es);
+ vecswap(pb, pn - r, r);
+
+ if (swap_cnt == 0) { /* Switch to insertion sort */
+ r = 1 + n / 4; /* n >= 7, so r >= 2 */
+ for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
+ for (pl = pm;
+ pl > (char *)a && CMP(thunk, pl - es, pl) > 0;
+ pl -= es) {
+ swap(pl, pl - es);
+ if (++swap_cnt > r) goto nevermind;
+ }
+ return;
+ }
+
+nevermind:
+ if ((r = pb - pa) > es) {
+ r /= es;
+ if (shared && r > shared->turnoff) {
+ union args *args = getargs(shared);
+
+ if (args == NULL)
+ LIBC_ABORT("%s: getargs: %s", shared->who, strerror(errno));
+ args->shared = shared;
+ args->a = a;
+ args->n = r;
+ args->depth_limit = depth_limit;
+ OSAtomicIncrement32(&shared->count);
+ dispatch_async_f(shared->queue, args, _psort_parallel);
+ } else {
+#ifdef I_AM_PSORT_R
+ _psort(a, r, es, thunk, cmp, depth_limit, NULL);
+#else
+ _psort(a, r, es, cmp, depth_limit, NULL);
+#endif
+ }
+ }
+ if ((r = pd - pc) > es) {
+ /* Iterate rather than recurse to save stack space */
+ a = pn - r;
+ n = r / es;
+ goto loop;
+ }
+/* psort(pn - r, r / es, es, cmp);*/
+}
+
+static void
+_psort_parallel(void *x)
+{
+ union args *args = (union args *)x;
+ struct shared *shared = args->shared;
+
+ _psort(args->a, args->n, shared->es,
+#ifdef I_AM_PSORT_R
+ shared->thunk,
+#endif
+ shared->cmp, args->depth_limit, shared);
+ returnargs(shared, args);
+ if(OSAtomicDecrement32(&shared->count) <= 0) {
+ pthread_mutex_lock(&shared->mutex);
+ pthread_cond_signal(&shared->cond);
+ pthread_mutex_unlock(&shared->mutex);
+ }
+}
+
+/* fast, approximate integer square root */
+static size_t
+isqrt(size_t x)
+{
+ size_t s = 1L << (flsl(x) / 2);
+ return (s + x / s) / 2;
+}
+
+void
+#ifdef I_AM_PSORT_R
+psort_r(void *a, size_t n, size_t es, void *thunk, cmp_t *cmp)
+#elif defined(I_AM_PSORT_B)
+psort_b(void *a, size_t n, size_t es, cmp_t ^cmp)
+#else
+psort(void *a, size_t n, size_t es, cmp_t *cmp)
+#endif
+{
+ if (n >= PARALLEL_MIN_SIZE && _NumCPUs() > 1) {
+ struct shared shared;
+ union args *args;
+
+ bzero(&shared, sizeof(shared));
+ shared.sharedlock = OS_SPINLOCK_INIT;
+ if ((args = getargs(&shared)) != NULL) {
+ struct page *p, *pp;
+#ifdef I_AM_PSORT_R
+ shared.who = "psort_r";
+ shared.thunk = thunk;
+#elif defined(I_AM_PSORT_B)
+ shared.who = "psort_b";
+#else
+ shared.who = "psort";
+#endif
+ shared.cmp = cmp;
+ shared.es = es;
+ shared.queue = dispatch_get_concurrent_queue(0);
+ shared.cond = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
+ shared.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
+ args->a = a;
+ args->n = n;
+ args->depth_limit = DEPTH(n);
+ args->shared = &shared;
+ /*
+ * The turnoff value is the size of a partition that,
+ * below which, we stop doing in parallel, and just do
+ * in the current thread. The value of sqrt(n) was
+ * determined heuristically. There is a smaller
+ * dependence on the slowness of the comparison
+ * function, and there might be a dependence on the
+ * number of processors, but the algorithm has not been
+ * determined. Because the sensitivity to the turnoff
+ * value is relatively low, we use a fast, approximate
+ * integer square root routine that is good enough for
+ * this purpose.
+ */
+ shared.turnoff = isqrt(n);
+ OSAtomicIncrement32(&shared.count);
+ _psort_parallel(args);
+
+ /* wait for queue to drain */
+ pthread_mutex_lock(&shared.mutex);
+ while(shared.count > 0)
+ pthread_cond_wait(&shared.cond, &shared.mutex);
+
+ pthread_mutex_unlock(&shared.mutex);
+ pthread_mutex_destroy(&shared.mutex);
+ pthread_cond_destroy(&shared.cond);
+ for(p = shared.pagelist; p; p = pp) {
+ pp = p->next;
+ munmap(p, PAGESIZE);
+ }
+ return;
+ }
+ }
+ /* Just call qsort */
+#ifdef I_AM_PSORT_R
+ qsort_r(a, n, es, thunk, cmp);
+#elif defined(I_AM_PSORT_B)
+ qsort_b(a, n, es, cmp);
+#else
+ qsort(a, n, es, cmp);
+#endif
+}
--- /dev/null
+/****************************************************************************/
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)qsort.c 8.1 (Berkeley) 6/4/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/qsort.c,v 1.15 2008/01/14 09:21:34 das Exp $");
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <dispatch/dispatch.h>
+#include <stddef.h>
+#include <string.h>
+#include <libkern/OSAtomic.h>
+#include <sys/mman.h>
+#include <errno.h>
+#define __APPLE_API_PRIVATE
+#include <machine/cpu_capabilities.h>
+
+#ifdef I_AM_PSORT_R
+typedef int cmp_t(void *, const void *, const void *);
+#else
+typedef int cmp_t(const void *, const void *);
+#endif
+#ifdef I_AM_PSORT_B
+static inline char *med3(char *, char *, char *, cmp_t ^, void *) __attribute__((always_inline));
+#else
+static inline char *med3(char *, char *, char *, cmp_t *, void *) __attribute__((always_inline));
+#endif
+static inline void swapfunc(char *, char *, int, int) __attribute__((always_inline));
+
+#define min(a, b) (a) < (b) ? a : b
+
+#define NARGS ((PAGESIZE - offsetof(struct page, args)) / sizeof(union args))
+#define PAGESIZE 4096
+#define PARALLEL_MIN_SIZE 2000 /* determine heuristically */
+
+struct shared; /* forward reference */
+union args {
+ union args *next;
+ struct {
+ struct shared *shared;
+ void *a;
+ size_t n;
+ int depth_limit;
+ } /* anonymous */;
+};
+
+struct page {
+ struct page *next;
+ union args args[0];
+};
+
+struct shared {
+ char *who;
+ union args *freelist;
+ struct page *pagelist;
+#ifdef I_AM_PSORT_R
+ void *thunk;
+#endif
+#ifdef I_AM_PSORT_B
+ cmp_t ^cmp;
+#else
+ cmp_t *cmp;
+#endif
+ size_t es;
+ size_t turnoff;
+ dispatch_queue_t queue;
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ OSSpinLock sharedlock;
+ int count;
+};
+
+static union args *
+getargs(struct shared *shared)
+{
+ union args *args;
+
+ OSSpinLockLock(&shared->sharedlock);
+ if(!shared->freelist) {
+ struct page *page;
+ union args *prev;
+ int i;
+ if((page = (struct page *)mmap(NULL, PAGESIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0)) == NULL)
+ return NULL;
+ page->next = shared->pagelist;
+ shared->pagelist = page;
+ prev = NULL;
+ for(args = page->args, i = NARGS; i > 0; args++, i--) {
+ args->next = prev;
+ prev = args;
+ }
+ shared->freelist = prev;
+ }
+ args = shared->freelist;
+ shared->freelist = args->next;
+ OSSpinLockUnlock(&shared->sharedlock);
+ return args;
+}
+
+static void
+returnargs(struct shared *shared, union args *args)
+{
+ OSSpinLockLock(&shared->sharedlock);
+ args->next = shared->freelist;
+ shared->freelist = args;
+ OSSpinLockUnlock(&shared->sharedlock);
+}
+
+/*
+ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
+ */
+#define swapcode(TYPE, parmi, parmj, n) { \
+ long i = (n) / sizeof (TYPE); \
+ TYPE *pi = (TYPE *) (parmi); \
+ TYPE *pj = (TYPE *) (parmj); \
+ do { \
+ TYPE t = *pi; \
+ *pi++ = *pj; \
+ *pj++ = t; \
+ } while (--i > 0); \
+}
+
+#define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
+ es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
+
+static inline void
+swapfunc(a, b, n, swaptype)
+ char *a, *b;
+ int n, swaptype;
+{
+ if(swaptype <= 1)
+ swapcode(long, a, b, n)
+ else
+ swapcode(char, a, b, n)
+}
+
+#define swap(a, b) \
+ if (swaptype == 0) { \
+ long t = *(long *)(a); \
+ *(long *)(a) = *(long *)(b); \
+ *(long *)(b) = t; \
+ } else \
+ swapfunc(a, b, es, swaptype)
+
+#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
+
+#ifdef I_AM_PSORT_R
+#define CMP(t, x, y) (cmp((t), (x), (y)))
+#else
+#define CMP(t, x, y) (cmp((x), (y)))
+#endif
+
+static inline char *
+med3(char *a, char *b, char *c,
+#ifdef I_AM_PSORT_B
+cmp_t ^cmp,
+#else
+cmp_t *cmp,
+#endif
+void *thunk
+#ifndef I_AM_PSORT_R
+__unused
+#endif
+)
+{
+ return CMP(thunk, a, b) < 0 ?
+ (CMP(thunk, b, c) < 0 ? b : (CMP(thunk, a, c) < 0 ? c : a ))
+ :(CMP(thunk, b, c) > 0 ? b : (CMP(thunk, a, c) < 0 ? a : c ));
+}
+
+#ifdef __LP64__
+#define DEPTH(x) (2 * (flsl((long)(x)) - 1))
+#else /* !__LP64__ */
+#define DEPTH(x) (2 * (fls((int)(x)) - 1))
+#endif /* __LP64__ */
+
+#ifdef I_AM_PSORT_R
+int __heapsort_r(void *, size_t, size_t, void *, int (*)(void *, const void *, const void *));
+#endif
+
+static void _psort_parallel(void *x);
+
+static void
+_psort(void *a, size_t n, size_t es,
+#ifdef I_AM_PSORT_R
+void *thunk,
+#else
+#define thunk NULL
+#endif
+#ifdef I_AM_PSORT_B
+cmp_t ^cmp,
+#else
+cmp_t *cmp,
+#endif
+int depth_limit, struct shared *shared)
+{
+ char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
+ size_t d, r;
+ int cmp_result;
+ int swaptype, swap_cnt;
+
+loop:
+ if (depth_limit-- <= 0) {
+#ifdef I_AM_PSORT_B
+ heapsort_b(a, n, es, cmp);
+#elif defined(I_AM_PSORT_R)
+ __heapsort_r(a, n, es, thunk, cmp);
+#else
+ heapsort(a, n, es, cmp);
+#endif
+ return;
+ }
+ SWAPINIT(a, es);
+ swap_cnt = 0;
+ if (n < 7) {
+ for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
+ for (pl = pm;
+ pl > (char *)a && CMP(thunk, pl - es, pl) > 0;
+ pl -= es)
+ swap(pl, pl - es);
+ return;
+ }
+ pm = (char *)a + (n / 2) * es;
+ if (n > 7) {
+ pl = a;
+ pn = (char *)a + (n - 1) * es;
+ if (n > 40) {
+ d = (n / 8) * es;
+ pl = med3(pl, pl + d, pl + 2 * d, cmp, thunk);
+ pm = med3(pm - d, pm, pm + d, cmp, thunk);
+ pn = med3(pn - 2 * d, pn - d, pn, cmp, thunk);
+ }
+ pm = med3(pl, pm, pn, cmp, thunk);
+ }
+ swap(a, pm);
+ pa = pb = (char *)a + es;
+
+ pc = pd = (char *)a + (n - 1) * es;
+ for (;;) {
+ while (pb <= pc && (cmp_result = CMP(thunk, pb, a)) <= 0) {
+ if (cmp_result == 0) {
+ swap_cnt = 1;
+ swap(pa, pb);
+ pa += es;
+ }
+ pb += es;
+ }
+ while (pb <= pc && (cmp_result = CMP(thunk, pc, a)) >= 0) {
+ if (cmp_result == 0) {
+ swap_cnt = 1;
+ swap(pc, pd);
+ pd -= es;
+ }
+ pc -= es;
+ }
+ if (pb > pc)
+ break;
+ swap(pb, pc);
+ swap_cnt = 1;
+ pb += es;
+ pc -= es;
+ }
+
+ pn = (char *)a + n * es;
+ r = min(pa - (char *)a, pb - pa);
+ vecswap(a, pb - r, r);
+ r = min(pd - pc, pn - pd - es);
+ vecswap(pb, pn - r, r);
+
+ if (swap_cnt == 0) { /* Switch to insertion sort */
+ r = 1 + n / 4; /* n >= 7, so r >= 2 */
+ for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
+ for (pl = pm;
+ pl > (char *)a && CMP(thunk, pl - es, pl) > 0;
+ pl -= es) {
+ swap(pl, pl - es);
+ if (++swap_cnt > r) goto nevermind;
+ }
+ return;
+ }
+
+nevermind:
+ if ((r = pb - pa) > es) {
+ r /= es;
+ if (shared && r > shared->turnoff) {
+ union args *args = getargs(shared);
+
+ if (args == NULL)
+ LIBC_ABORT("%s: getargs: %s", shared->who, strerror(errno));
+ args->shared = shared;
+ args->a = a;
+ args->n = r;
+ args->depth_limit = depth_limit;
+ OSAtomicIncrement32(&shared->count);
+ dispatch_async_f(shared->queue, args, _psort_parallel);
+ } else {
+#ifdef I_AM_PSORT_R
+ _psort(a, r, es, thunk, cmp, depth_limit, NULL);
+#else
+ _psort(a, r, es, cmp, depth_limit, NULL);
+#endif
+ }
+ }
+ if ((r = pd - pc) > es) {
+ /* Iterate rather than recurse to save stack space */
+ a = pn - r;
+ n = r / es;
+ goto loop;
+ }
+/* psort(pn - r, r / es, es, cmp);*/
+}
+
+static void
+_psort_parallel(void *x)
+{
+ union args *args = (union args *)x;
+ struct shared *shared = args->shared;
+
+ _psort(args->a, args->n, shared->es,
+#ifdef I_AM_PSORT_R
+ shared->thunk,
+#endif
+ shared->cmp, args->depth_limit, shared);
+ returnargs(shared, args);
+ if(OSAtomicDecrement32(&shared->count) <= 0) {
+ pthread_mutex_lock(&shared->mutex);
+ pthread_cond_signal(&shared->cond);
+ pthread_mutex_unlock(&shared->mutex);
+ }
+}
+
+/* fast, approximate integer square root */
+static size_t
+isqrt(size_t x)
+{
+ size_t s = 1L << (flsl(x) / 2);
+ return (s + x / s) / 2;
+}
+
+void
+#ifdef I_AM_PSORT_R
+psort_r(void *a, size_t n, size_t es, void *thunk, cmp_t *cmp)
+#elif defined(I_AM_PSORT_B)
+psort_b(void *a, size_t n, size_t es, cmp_t ^cmp)
+#else
+psort(void *a, size_t n, size_t es, cmp_t *cmp)
+#endif
+{
+ if (n >= PARALLEL_MIN_SIZE && _NumCPUs() > 1) {
+ struct shared shared;
+ union args *args;
+
+ bzero(&shared, sizeof(shared));
+ shared.sharedlock = OS_SPINLOCK_INIT;
+ if ((args = getargs(&shared)) != NULL) {
+ struct page *p, *pp;
+#ifdef I_AM_PSORT_R
+ shared.who = "psort_r";
+ shared.thunk = thunk;
+#elif defined(I_AM_PSORT_B)
+ shared.who = "psort_b";
+#else
+ shared.who = "psort";
+#endif
+ shared.cmp = cmp;
+ shared.es = es;
+ shared.queue = dispatch_get_concurrent_queue(0);
+ shared.cond = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
+ shared.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
+ args->a = a;
+ args->n = n;
+ args->depth_limit = DEPTH(n);
+ args->shared = &shared;
+ /*
+ * The turnoff value is the size of a partition that,
+ * below which, we stop doing in parallel, and just do
+ * in the current thread. The value of sqrt(n) was
+ * determined heuristically. There is a smaller
+ * dependence on the slowness of the comparison
+ * function, and there might be a dependence on the
+ * number of processors, but the algorithm has not been
+ * determined. Because the sensitivity to the turnoff
+ * value is relatively low, we use a fast, approximate
+ * integer square root routine that is good enough for
+ * this purpose.
+ */
+ shared.turnoff = isqrt(n);
+ OSAtomicIncrement32(&shared.count);
+ _psort_parallel(args);
+
+ /* wait for queue to drain */
+ pthread_mutex_lock(&shared.mutex);
+ while(shared.count > 0)
+ pthread_cond_wait(&shared.cond, &shared.mutex);
+
+ pthread_mutex_unlock(&shared.mutex);
+ pthread_mutex_destroy(&shared.mutex);
+ pthread_cond_destroy(&shared.cond);
+ for(p = shared.pagelist; p; p = pp) {
+ pp = p->next;
+ munmap(p, PAGESIZE);
+ }
+ return;
+ }
+ }
+ /* Just call qsort */
+#ifdef I_AM_PSORT_R
+ qsort_r(a, n, es, thunk, cmp);
+#elif defined(I_AM_PSORT_B)
+ qsort_b(a, n, es, cmp);
+#else
+ qsort(a, n, es, cmp);
+#endif
+}
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
static char sccsid[] = "@(#)qsort.c 8.1 (Berkeley) 6/4/93";
#endif /* LIBC_SCCS and not lint */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/stdlib/qsort.c,v 1.12 2002/09/10 02:04:49 wollman Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/qsort.c,v 1.15 2008/01/14 09:21:34 das Exp $");
#include <stdlib.h>
+#include <string.h>
#ifdef I_AM_QSORT_R
typedef int cmp_t(void *, const void *, const void *);
#else
typedef int cmp_t(const void *, const void *);
#endif
+#ifdef I_AM_QSORT_B
+static inline char *med3(char *, char *, char *, cmp_t ^, void *) __attribute__((always_inline));
+#else
static inline char *med3(char *, char *, char *, cmp_t *, void *) __attribute__((always_inline));
+#endif
static inline void swapfunc(char *, char *, int, int) __attribute__((always_inline));
#define min(a, b) (a) < (b) ? a : b
#endif
static inline char *
-med3(char *a, char *b, char *c, cmp_t *cmp, void *thunk
+med3(char *a, char *b, char *c,
+#ifdef I_AM_QSORT_B
+cmp_t ^cmp,
+#else
+cmp_t *cmp,
+#endif
+void *thunk
#ifndef I_AM_QSORT_R
__unused
#endif
:(CMP(thunk, b, c) > 0 ? b : (CMP(thunk, a, c) < 0 ? a : c ));
}
+#ifdef __LP64__
+#define DEPTH(x) (2 * (flsl((long)(x)) - 1))
+#else /* !__LP64__ */
+#define DEPTH(x) (2 * (fls((int)(x)) - 1))
+#endif /* __LP64__ */
+
#ifdef I_AM_QSORT_R
-void
-qsort_r(void *a, size_t n, size_t es, void *thunk, cmp_t *cmp)
+int __heapsort_r(void *, size_t, size_t, void *, int (*)(void *, const void *, const void *));
+#endif
+
+static void
+_qsort(void *a, size_t n, size_t es,
+#ifdef I_AM_QSORT_R
+void *thunk,
#else
-#define thunk NULL
-void
-qsort(void *a, size_t n, size_t es, cmp_t *cmp)
+#define thunk NULL
+#endif
+#ifdef I_AM_QSORT_B
+cmp_t ^cmp,
+#else
+cmp_t *cmp,
#endif
+int depth_limit)
{
char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
- int d, r, swaptype, swap_cnt;
+ size_t d, r;
+ int cmp_result;
+ int swaptype, swap_cnt;
-loop: SWAPINIT(a, es);
+loop:
+ if (depth_limit-- <= 0) {
+#ifdef I_AM_QSORT_B
+ heapsort_b(a, n, es, cmp);
+#elif defined(I_AM_QSORT_R)
+ __heapsort_r(a, n, es, thunk, cmp);
+#else
+ heapsort(a, n, es, cmp);
+#endif
+ return;
+ }
+ SWAPINIT(a, es);
swap_cnt = 0;
if (n < 7) {
for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
pc = pd = (char *)a + (n - 1) * es;
for (;;) {
- while (pb <= pc && (r = CMP(thunk, pb, a)) <= 0) {
- if (r == 0) {
+ while (pb <= pc && (cmp_result = CMP(thunk, pb, a)) <= 0) {
+ if (cmp_result == 0) {
swap_cnt = 1;
swap(pa, pb);
pa += es;
}
pb += es;
}
- while (pb <= pc && (r = CMP(thunk, pc, a)) >= 0) {
- if (r == 0) {
+ while (pb <= pc && (cmp_result = CMP(thunk, pc, a)) >= 0) {
+ if (cmp_result == 0) {
swap_cnt = 1;
swap(pc, pd);
pd -= es;
pb += es;
pc -= es;
}
+
+ pn = (char *)a + n * es;
+ r = min(pa - (char *)a, pb - pa);
+ vecswap(a, pb - r, r);
+ r = min(pd - pc, pn - pd - es);
+ vecswap(pb, pn - r, r);
+
if (swap_cnt == 0) { /* Switch to insertion sort */
+ r = 1 + n / 4; /* n >= 7, so r >= 2 */
for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
for (pl = pm;
pl > (char *)a && CMP(thunk, pl - es, pl) > 0;
- pl -= es)
+ pl -= es) {
swap(pl, pl - es);
+ if (++swap_cnt > r) goto nevermind;
+ }
return;
}
- pn = (char *)a + n * es;
- r = min(pa - (char *)a, pb - pa);
- vecswap(a, pb - r, r);
- r = min(pd - pc, pn - pd - es);
- vecswap(pb, pn - r, r);
+nevermind:
if ((r = pb - pa) > es)
#ifdef I_AM_QSORT_R
- qsort_r(a, r / es, es, thunk, cmp);
+ _qsort(a, r / es, es, thunk, cmp, depth_limit);
#else
- qsort(a, r / es, es, cmp);
+ _qsort(a, r / es, es, cmp, depth_limit);
#endif
if ((r = pd - pc) > es) {
/* Iterate rather than recurse to save stack space */
}
/* qsort(pn - r, r / es, es, cmp);*/
}
+
+void
+#ifdef I_AM_QSORT_R
+qsort_r(void *a, size_t n, size_t es, void *thunk, cmp_t *cmp)
+#elif defined(I_AM_QSORT_B)
+qsort_b(void *a, size_t n, size_t es, cmp_t ^cmp)
+#else
+qsort(void *a, size_t n, size_t es, cmp_t *cmp)
+#endif
+{
+ _qsort(a, n, es,
+#ifdef I_AM_QSORT_R
+ thunk,
+#endif
+ cmp, DEPTH(n));
+}
.Os
.Sh NAME
.Nm heapsort ,
+#ifdef UNIFDEF_BLOCKS
+.Nm heapsort_b ,
+#endif
.Nm mergesort ,
+#ifdef UNIFDEF_BLOCKS
+.Nm mergesort_b ,
+#endif
.Nm qsort ,
+#ifdef UNIFDEF_BLOCKS
+.Nm qsort_b ,
+#endif
.Nm qsort_r
.Nd sort functions
-.Sh LIBRARY
-.Lb libc
.Sh SYNOPSIS
.In stdlib.h
.Ft int
.Fa "size_t width"
.Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
.Fc
+#ifdef UNIFDEF_BLOCKS
+.Ft int
+.Fo heapsort_b
+.Fa "void *base"
+.Fa "size_t nel"
+.Fa "size_t width"
+.Fa "int \*[lp]^compar\*[rp]\*[lp]const void *, const void *\*[rp]"
+.Fc
+#endif
.Ft int
.Fo mergesort
.Fa "void *base"
.Fa "size_t width"
.Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
.Fc
+#ifdef UNIFDEF_BLOCKS
+.Ft int
+.Fo mergesort_b
+.Fa "void *base"
+.Fa "size_t nel"
+.Fa "size_t width"
+.Fa "int \*[lp]^compar\*[rp]\*[lp]const void *, const void *\*[rp]"
+.Fc
+#endif
.Ft void
.Fo qsort
.Fa "void *base"
.Fa "size_t width"
.Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]"
.Fc
+#ifdef UNIFDEF_BLOCKS
+.Ft void
+.Fo qsort_b
+.Fa "void *base"
+.Fa "size_t nel"
+.Fa "size_t width"
+.Fa "int \*[lp]^compar\*[rp]\*[lp]const void *, const void *\*[rp]"
+.Fc
+#endif
.Ft void
.Fo qsort_r
.Fa "void *base"
.Em requires
that
.Fa width
-be greater than
+be greater than or equal to
.Dq "sizeof(void *) / 2" .
.Pp
The contents of the array
.Fn heapsort .
Memory availability and pre-existing order in the data can make this
untrue.
+#ifdef UNIFDEF_BLOCKS
+.Pp
+The
+.Fn heapsort_b ,
+.Fn mergesort_b ,
+and
+.Fn qsort_b
+routines are like the corresponding routines without the _b suffix, expect
+that the
+.Fa compar
+callback is a block pointer instead of a function pointer.
+#endif
.Sh RETURN VALUES
The
+#ifdef UNIFDEF_BLOCKS
+.Fn qsort ,
+.Fn qsort_b
+#else
.Fn qsort
+#endif
and
.Fn qsort_r
functions
return no value.
.Pp
-.Rv -std heapsort mergesort
+#ifdef UNIFDEF_BLOCKS
+.ds HEAPSORT_B heapsort_b
+.ds MERGESORT_B mergesort_b
+#endif
+.Rv -std heapsort \*[HEAPSORT_B] mergesort \*[MERGESORT_B]
.Sh ERRORS
The
+#ifdef UNIFDEF_BLOCKS
+.Fn heapsort ,
+.Fn heapsort_b ,
+.Fn mergesort
+and
+.Fn mergesort_b
+#else
.Fn heapsort
and
.Fn mergesort
+#endif
functions succeed unless:
.Bl -tag -width Er
.It Bq Er EINVAL
.Fa width
argument to
.Fn mergesort
+#ifdef UNIFDEF_BLOCKS
+or
+.Fn mergesort_b
+#endif
is less than
.Dq "sizeof(void *) / 2" .
.It Bq Er ENOMEM
The
+#ifdef UNIFDEF_BLOCKS
+.Fn heapsort ,
+.Fn heapsort_b ,
+.Fn mergesort
+and
+.Fn mergesort_b
+#else
.Fn heapsort
-or
+and
.Fn mergesort
+#endif
functions
were unable to allocate memory.
.El
--- /dev/null
+/*
+ * This file is in the public domain. Originally written by Garrett
+ * A. Wollman.
+ *
+ * $FreeBSD: src/lib/libc/stdlib/qsort_r.c,v 1.1 2002/09/10 02:04:49 wollman Exp $
+ */
+#define I_AM_QSORT_B
+#include "qsort-fbsd.c"
* in which case the path which caused trouble is left in (resolved).
*/
char *
-realpath(const char *path, char resolved[PATH_MAX])
+realpath(const char *path, char inresolved[PATH_MAX])
{
struct attrs attrs;
struct stat sb;
static dev_t rootdev;
static int rootdev_inited = 0;
ino_t inode;
+ char *resolved;
if (path == NULL) {
errno = EINVAL;
return (NULL);
}
#endif /* __DARWIN_UNIX03 */
+ /*
+ * Extension to the standard; if inresolved == NULL, allocate memory
+ * (first on the stack, then use strdup())
+ */
+ if (!inresolved) {
+ if ((resolved = alloca(PATH_MAX)) == NULL) return (NULL);
+ } else {
+ resolved = inresolved;
+ }
if (!rootdev_inited) {
rootdev_inited = 1;
if (stat("/", &sb) < 0) {
* that each component of the mountpoint
* is a directory (and not a symlink)
*/
- char temp[MNAMELEN];
+ char temp[sizeof(sfs.f_mntonname)];
char *cp;
int ok = 1;
*/
if (resolved_len > 1 && resolved[resolved_len - 1] == '/')
resolved[resolved_len - 1] = '\0';
+ if (!inresolved) resolved = strdup(resolved);
return (resolved);
}
.\" @(#)realpath.3 8.2 (Berkeley) 2/16/94
.\" $FreeBSD: src/lib/libc/stdlib/realpath.3,v 1.13 2003/03/27 20:48:53 fjoe Exp $
.\"
-.Dd February 16, 1994
+.Dd April 5, 2008
.Dt REALPATH 3
.Os
.Sh NAME
.Nm realpath
.Nd returns the canonicalized absolute pathname
-.Sh LIBRARY
-.Lb libc
+.\" .Sh LIBRARY
+.\" .Lb libc
.Sh SYNOPSIS
.In stdlib.h
.Ft "char *"
and
.Pa /../
in
-.Fa file_name ,
-and copies the resulting absolute pathname into
-the memory referenced by
-.Fa resolved_name .
-The
+.Fa file_name .
+If the
.Fa resolved_name
argument
+is non-NULL, the resulting absolute pathname is copied there (it
.Em must
refer to a buffer capable of storing at least
.Dv PATH_MAX
-characters.
+characters).
+.Pp
+As a permitted extension to the standard, if
+.Fa resolved_name
+is NULL,
+memory is allocated for the resulting absolute pathname, and is returned by
+.Fn realpath .
+This memory should be freed by a call to
+.Xr free 3
+when no longer needed.
.Pp
The
.Fn realpath
.Fn realpath
is called.
.Sh "RETURN VALUES"
-The
+On success, the
.Fn realpath
-function returns
+function returns the address of the resulting absolute pathname, which is
.Fa resolved_name
-on success.
+if it was non-NULL, or the address of newly allocated memory.
If an error occurs,
.Fn realpath
returns
-.Dv NULL
-and
+.Dv NULL .
+If
.Fa resolved_name
+was non-NULL, it will
contains the pathname which caused the problem.
.Sh ERRORS
The function
may fail and set the external variable
.Va errno
for any of the errors specified for the library functions
+.Xr alloca 3 ,
+.Xr getattrlist 2 ,
+.Xr getcwd 3 ,
.Xr lstat 2 ,
.Xr readlink 2 ,
+.Xr stat 2 ,
and
-.Xr getcwd 3 .
-.Sh CAVEATS
-This implementation of
-.Fn realpath
-differs slightly from the Solaris implementation.
-The
-.Bx 4.4
-version always returns absolute pathnames,
-whereas the Solaris implementation will,
-under certain circumstances, return a relative
-.Fa resolved_name
-when given a relative
-.Fa file_name .
+.Xr strdup 3 .
+.\" .Sh CAVEATS
+.\" This implementation of
+.\" .Fn realpath
+.\" differs slightly from the Solaris implementation.
+.\" The
+.\" .Bx 4.4
+.\" version always returns absolute pathnames,
+.\" whereas the Solaris implementation will,
+.\" under certain circumstances, return a relative
+.\" .Fa resolved_name
+.\" when given a relative
+.\" .Fa file_name .
.Sh LEGACY SYNOPSIS
.Fd #include <sys/param.h>
.Fd #include <stdlib.h>
.Fn realpath
is called.
.Sh "SEE ALSO"
+.Xr free 3 ,
.Xr getcwd 3 ,
.Xr compat 5
.Sh HISTORY
__private_extern__ void __unsetenv(const char *, char **, malloc_zone_t *);
#ifndef BUILDING_VARIANT
+/*
+ * Create the environment malloc zone and give it a recognizable name.
+ */
+__private_extern__ int
+init__zone0(int should_set_errno)
+{
+ if (__zone0) return (0);
+
+ __zone0 = malloc_create_zone(0, 0);
+ if (!__zone0) {
+ if (should_set_errno) {
+ errno = ENOMEM;
+ }
+ return (-1);
+ }
+ malloc_set_zone_name(__zone0, "environ");
+ return (0);
+}
+
/*
* The copy flag may have 3 values:
* 1 - make a copy of the name/value pair
void *
_allocenvstate(void)
{
- return (void *)malloc_create_zone(1000 /* unused */, 0 /* unused */);
+ malloc_zone_t *zone;
+ zone = malloc_create_zone(1000 /* unused */, 0 /* unused */);
+ if (zone) {
+ malloc_set_zone_name(zone, "environ");
+ }
+ return (void *)zone;
}
/*
int
_setenvp(const char *name, const char *value, int rewrite, char ***envp, void *state)
{
- /* insure __zone0 is set up */
- if (!__zone0) {
- __zone0 = malloc_create_zone(0, 0);
- if (!__zone0) {
- errno = ENOMEM;
- return (-1);
- }
- }
+ if (init__zone0(1)) return (-1);
return (__setenv(name, value, rewrite, 1, envp, (state ? (malloc_zone_t *)state : __zone0)));
}
int
_unsetenvp(const char *name, char ***envp, void *state)
{
- /* insure __zone0 is set up */
- if (!__zone0) {
- __zone0 = malloc_create_zone(0, 0);
- if (!__zone0) {
- errno = ENOMEM;
- return (-1);
- }
- }
+ if (init__zone0(1)) return (-1);
__unsetenv(name, *envp, (state ? (malloc_zone_t *)state : __zone0));
return 0;
}
if (*value == '=') /* no `=' in value */
++value;
/* insure __zone0 is set up before calling __malloc_check_env_name */
- if (!__zone0) {
- __zone0 = malloc_create_zone(0, 0);
- if (!__zone0) {
- errno = ENOMEM;
- return (-1);
- }
- }
+ if (init__zone0(1)) return (-1);
__malloc_check_env_name(name); /* see if we are changing a malloc environment variable */
return (__setenv(name, value, rewrite, 1, _NSGetEnviron(), __zone0));
}
return (-1);
}
/* insure __zone0 is set up before calling __malloc_check_env_name */
- if (!__zone0) {
- __zone0 = malloc_create_zone(0, 0);
- if (!__zone0) {
- errno = ENOMEM;
- return (-1);
- }
- }
+ if (init__zone0(1)) return (-1);
#else /* !__DARWIN_UNIX03 */
/* no null ptr or empty str */
if(name == NULL || *name == 0)
return;
/* insure __zone0 is set up before calling __malloc_check_env_name */
- if (!__zone0) {
- __zone0 = malloc_create_zone(0, 0);
- if (!__zone0)
- return;
- }
+ if (init__zone0(0)) return;
#endif /* __DARWIN_UNIX03 */
__malloc_check_env_name(name); /* see if we are changing a malloc environment variable */
__unsetenv(name, *_NSGetEnviron(), __zone0);
#include <stdlib.h>
#include <stddef.h>
#include <unistd.h>
+#include <spawn.h>
#include <paths.h>
#include <errno.h>
#include "un-namespace.h"
#include "libc_private.h"
+#include <crt_externs.h>
+#define environ (*_NSGetEnviron())
+
#if __DARWIN_UNIX03
#include <pthread.h>
const char *command;
{
pid_t pid, savedpid;
- int pstat;
+ int pstat, err;
struct sigaction ign, intact, quitact;
- sigset_t newsigblock, oldsigblock;
+ sigset_t newsigblock, oldsigblock, defaultsig;
+ posix_spawnattr_t attr;
+ short flags = POSIX_SPAWN_SETSIGMASK;
+ const char *argv[] = {"sh", "-c", command, NULL};
#if __DARWIN_UNIX03
if (__unix_conforming == 0)
return(1);
}
+ if ((err = posix_spawnattr_init(&attr)) != 0) {
+ errno = err;
+ return -1;
+ }
+ (void)sigemptyset(&defaultsig);
+
#if __DARWIN_UNIX03
pthread_mutex_lock(&__systemfn_mutex);
#endif /* __DARWIN_UNIX03 */
(void)sigemptyset(&ign.sa_mask);
ign.sa_flags = 0;
(void)_sigaction(SIGINT, &ign, &intact);
+ if (intact.sa_handler != SIG_IGN) {
+ sigaddset(&defaultsig, SIGINT);
+ flags |= POSIX_SPAWN_SETSIGDEF;
+ }
(void)_sigaction(SIGQUIT, &ign, &quitact);
+ if (quitact.sa_handler != SIG_IGN) {
+ sigaddset(&defaultsig, SIGQUIT);
+ flags |= POSIX_SPAWN_SETSIGDEF;
+ }
(void)sigemptyset(&newsigblock);
(void)sigaddset(&newsigblock, SIGCHLD);
(void)_sigprocmask(SIG_BLOCK, &newsigblock, &oldsigblock);
- switch(pid = fork()) {
- case -1: /* error */
- break;
- case 0: /* child */
- /*
- * Restore original signal dispositions and exec the command.
- */
- (void)_sigaction(SIGINT, &intact, NULL);
- (void)_sigaction(SIGQUIT, &quitact, NULL);
- (void)_sigprocmask(SIG_SETMASK, &oldsigblock, NULL);
-#if __DARWIN_UNIX03
- pthread_mutex_unlock(&__systemfn_mutex);
-#endif /* __DARWIN_UNIX03 */
- execl(_PATH_BSHELL, "sh", "-c", command, (char *)NULL);
- _exit(127);
- default: /* parent */
+ (void)posix_spawnattr_setsigmask(&attr, &oldsigblock);
+ if (flags & POSIX_SPAWN_SETSIGDEF) {
+ (void)posix_spawnattr_setsigdefault(&attr, &defaultsig);
+ }
+ (void)posix_spawnattr_setflags(&attr, flags);
+
+ err = posix_spawn(&pid, _PATH_BSHELL, NULL, &attr, (char *const *)argv, environ);
+ (void)posix_spawnattr_destroy(&attr);
+ if (err == 0) {
savedpid = pid;
do {
pid = _wait4(savedpid, &pstat, 0, (struct rusage *)0);
} while (pid == -1 && errno == EINTR);
- break;
+ if (pid == -1) pstat = -1;
+ } else if (err == ENOMEM || err == EAGAIN) { /* as if fork failed */
+ pstat = -1;
+ } else {
+ pstat = W_EXITCODE(127, 0); /* couldn't exec shell */
}
+
(void)_sigaction(SIGINT, &intact, NULL);
(void)_sigaction(SIGQUIT, &quitact, NULL);
(void)_sigprocmask(SIG_SETMASK, &oldsigblock, NULL);
#if __DARWIN_UNIX03
pthread_mutex_unlock(&__systemfn_mutex);
#endif /* __DARWIN_UNIX03 */
- return(pid == -1 ? -1 : pstat);
+ return(pstat);
}
__weak_reference(__system, system);
---- asctime.c.orig 2004-11-25 11:38:44.000000000 -0800
-+++ asctime.c 2004-12-07 23:48:08.000000000 -0800
-@@ -23,9 +23,7 @@
+--- asctime.c.orig 2008-12-15 11:41:07.000000000 -0800
++++ asctime.c 2009-01-21 17:09:27.000000000 -0800
+@@ -22,10 +22,10 @@ __FBSDID("$FreeBSD: src/lib/libc/stdtime
+ ** A la ISO/IEC 9945-1, ANSI/IEEE Std 1003.1, Second Edition, 1996-07-12.
*/
++#define EXPECTEDLEN 26
++
char *
-asctime_r(timeptr, buf)
-const struct tm * timeptr;
{
static const char wday_name[][3] = {
"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
+@@ -36,6 +36,8 @@ char * buf;
+ };
+ const char * wn;
+ const char * mn;
++ int len;
++ char tmp[EXPECTEDLEN];
+
+ if (timeptr->tm_wday < 0 || timeptr->tm_wday >= DAYSPERWEEK)
+ wn = "???";
+@@ -48,31 +50,28 @@ char * buf;
+ ** "%.3s %.3s%3d %02.2d:%02.2d:%02.2d %d\n"
+ ** Since the .2 in 02.2d is ignored, we drop it.
+ */
+- (void) sprintf(buf, "%.3s %.3s%3d %02d:%02d:%02d %d\n",
++ /*
++ ** Because various values in the tm structure may cause the
++ ** resulting string to be longer than the 26-bytes that is
++ ** specified in the spec, we should return NULL rather than
++ ** possibly overwrite beyond the string.
++ */
++ len = snprintf(tmp, EXPECTEDLEN, "%.3s %.3s%3d %02d:%02d:%02d %d\n",
+ wn, mn,
+ timeptr->tm_mday, timeptr->tm_hour,
+ timeptr->tm_min, timeptr->tm_sec,
+ TM_YEAR_BASE + timeptr->tm_year);
++ if (len >= EXPECTEDLEN)
++ return NULL;
++ strcpy(buf, tmp);
+ return buf;
+ }
+
+-/*
+-** A la X3J11, with core dump avoidance.
+-*/
+-
+ char *
+ asctime(timeptr)
+ const struct tm * timeptr;
+ {
+- /*
+- ** Big enough for something such as
+- ** ??? ???-2147483648 -2147483648:-2147483648:-2147483648 -2147483648\n
+- ** (two three-character abbreviations, five strings denoting integers,
+- ** three explicit spaces, two explicit colons, a newline,
+- ** and a trailing ASCII nul).
+- */
+- static char result[3 * 2 + 5 * INT_STRLEN_MAXIMUM(int) +
+- 3 + 2 + 1 + 1];
++ static char result[EXPECTEDLEN];
+
+ return asctime_r(timeptr, result);
+ }
---- localtime.c.orig 2007-05-23 18:18:18.000000000 -0700
-+++ localtime.c 2007-05-23 18:20:52.000000000 -0700
-@@ -22,8 +22,22 @@
+--- localtime.c.orig 2008-12-15 11:41:07.000000000 -0800
++++ localtime.c 2009-01-21 15:43:59.000000000 -0800
+@@ -22,8 +22,22 @@ __FBSDID("$FreeBSD: src/lib/libc/stdtime
#include "namespace.h"
#include <sys/types.h>
#include <sys/stat.h>
#include "private.h"
#include "un-namespace.h"
-@@ -33,6 +47,7 @@
-
- #define _MUTEX_LOCK(x) if (__isthreaded) _pthread_mutex_lock(x)
- #define _MUTEX_UNLOCK(x) if (__isthreaded) _pthread_mutex_unlock(x)
-+extern int __pthread_tsd_first;
-
- /*
- ** SunOS 4.1.1 headers lack O_BINARY.
-@@ -135,40 +150,96 @@
+@@ -135,40 +149,96 @@ struct rule {
#define DAY_OF_YEAR 1 /* n - day of year */
#define MONTH_NTH_DAY_OF_WEEK 2 /* Mm.n.d - month, week, day of week */
+ int unix03);
+__private_extern__
+void tzset_basic(void);
-+
-+#define lcl_mutex _st_lcl_mutex
++#define lcl_mutex _st_lcl_mutex
++
+#if !BUILDING_VARIANT
static long detzcode(const char * codep);
-static const char * getzname(const char * strp);
static int tmcomp(const struct tm * atmp,
const struct tm * btmp);
static time_t transtime(time_t janfirst, int year,
-@@ -194,10 +265,15 @@
+@@ -194,10 +264,15 @@ static struct state gmtmem;
#endif /* !defined TZ_STRLEN_MAX */
static char lcl_TZname[TZ_STRLEN_MAX + 1];
char * tzname[2] = {
wildabbr,
-@@ -214,15 +290,62 @@
+@@ -214,15 +289,62 @@ char * tzname[2] = {
static struct tm tm;
static long
detzcode(codep)
const char * const codep;
-@@ -246,14 +369,14 @@
+@@ -246,14 +368,14 @@ settzname(void)
tzname[1] = wildabbr;
#ifdef USG_COMPAT
daylight = 0;
return;
}
#endif /* defined ALL_STATE */
-@@ -266,7 +389,7 @@
+@@ -266,7 +388,7 @@ settzname(void)
if (ttisp->tt_isdst)
daylight = 1;
if (i == 0 || !ttisp->tt_isdst)
#endif /* defined USG_COMPAT */
#ifdef ALTZONE
if (i == 0 || ttisp->tt_isdst)
-@@ -286,6 +409,119 @@
+@@ -286,6 +408,119 @@ settzname(void)
}
}
static int
tzload(name, sp)
const char * name;
-@@ -295,6 +531,9 @@
+@@ -295,6 +530,9 @@ struct state * const sp;
int i;
int fid;
/* XXX The following is from OpenBSD, and I'm not sure it is correct */
if (name != NULL && issetugid() != 0)
if ((name[0] == ':' && name[1] == '/') ||
-@@ -312,7 +551,15 @@
+@@ -312,7 +550,15 @@ struct state * const sp;
** to hold the longest file name string that the implementation
** guarantees can be opened."
*/
if (name[0] == ':')
++name;
-@@ -320,7 +567,11 @@
+@@ -320,7 +566,11 @@ struct state * const sp;
if (!doaccess) {
if ((p = TZDIR) == NULL)
return -1;
return -1;
(void) strcpy(fullname, p);
(void) strcat(fullname, "/");
-@@ -332,6 +583,10 @@
+@@ -332,6 +582,10 @@ struct state * const sp;
doaccess = TRUE;
name = fullname;
}
if (doaccess && access(name, R_OK) != 0)
return -1;
if ((fid = _open(name, OPEN_MODE)) == -1)
-@@ -350,6 +605,9 @@
+@@ -350,6 +604,9 @@ struct state * const sp;
int ttisstdcnt;
int ttisgmtcnt;
i = _read(fid, u.buf, sizeof u.buf);
if (_close(fid) != 0)
return -1;
-@@ -456,14 +714,24 @@
+@@ -456,14 +713,24 @@ static const int year_lengths[2] = {
*/
static const char *
return strp;
}
-@@ -743,16 +1011,15 @@
+@@ -743,16 +1010,15 @@ const int lastditch;
int load_result;
INITIALIZE(dstname);
if (stdlen < 3)
return -1;
if (*name == '\0')
-@@ -764,12 +1031,14 @@
+@@ -764,12 +1030,14 @@ const int lastditch;
}
}
load_result = tzload(TZDEFRULES, sp);
if (dstlen < 3)
return -1;
if (*name != '\0' && *name != ',' && *name != ';') {
-@@ -951,8 +1220,19 @@
+@@ -951,8 +1219,19 @@ struct state * const sp;
static void
tzsetwall_basic(void)
{
lcl_is_set = -1;
#ifdef ALL_STATE
-@@ -966,18 +1246,24 @@
+@@ -966,18 +1245,24 @@ tzsetwall_basic(void)
#endif /* defined ALL_STATE */
if (tzload((char *) NULL, lclptr) != 0)
gmtload(lclptr);
tzset_basic(void)
{
const char * name;
-@@ -988,8 +1274,18 @@
+@@ -988,8 +1273,18 @@ tzset_basic(void)
return;
}
lcl_is_set = strlen(name) < sizeof lcl_TZname;
if (lcl_is_set)
(void) strcpy(lcl_TZname, name);
-@@ -1014,15 +1310,25 @@
+@@ -1014,15 +1309,25 @@ tzset_basic(void)
lclptr->ttis[0].tt_gmtoff = 0;
lclptr->ttis[0].tt_abbrind = 0;
(void) strcpy(lclptr->chars, gmt);
_MUTEX_LOCK(&lcl_mutex);
tzset_basic();
_MUTEX_UNLOCK(&lcl_mutex);
-@@ -1038,7 +1344,11 @@
+@@ -1038,7 +1343,11 @@ tzset(void)
*/
/*ARGSUSED*/
localsub(timep, offset, tmp)
const time_t * const timep;
const long offset;
-@@ -1049,11 +1359,18 @@
+@@ -1049,11 +1358,18 @@ struct tm * const tmp;
int i;
const time_t t = *timep;
}
#endif /* defined ALL_STATE */
if (sp->timecnt == 0 || t < sp->ats[0]) {
-@@ -1076,12 +1393,20 @@
+@@ -1076,12 +1392,20 @@ struct tm * const tmp;
** t += ttisp->tt_gmtoff;
** timesub(&t, 0L, sp, tmp);
*/
}
struct tm *
-@@ -1094,8 +1419,9 @@
+@@ -1094,8 +1418,9 @@ const time_t * const timep;
if (__isthreaded != 0) {
_pthread_mutex_lock(&localtime_mutex);
- if (localtime_key < 0) {
- if (_pthread_key_create(&localtime_key, free) < 0) {
+ if (localtime_key == (pthread_key_t)-1) {
-+ localtime_key = __pthread_tsd_first + 2;
++ localtime_key = __LIBC_PTHREAD_KEY_LOCALTIME;
+ if (pthread_key_init_np(localtime_key, free) < 0) {
_pthread_mutex_unlock(&localtime_mutex);
return(NULL);
}
-@@ -1110,13 +1436,21 @@
+@@ -1110,13 +1435,21 @@ const time_t * const timep;
}
_pthread_mutex_lock(&lcl_mutex);
tzset_basic();
}
}
-@@ -1125,13 +1459,15 @@
+@@ -1125,13 +1458,15 @@ const time_t * const timep;
*/
struct tm *
_MUTEX_UNLOCK(&lcl_mutex);
return tm;
}
-@@ -1140,23 +1476,48 @@
+@@ -1140,23 +1475,48 @@ struct tm * tm;
** gmtsub is to gmtime as localsub is to localtime.
*/
#ifdef TM_ZONE
/*
** Could get fancy here and deliver something such as
-@@ -1168,7 +1529,7 @@
+@@ -1168,7 +1528,7 @@ struct tm * const tmp;
else {
#ifdef ALL_STATE
if (gmtptr == NULL)
else tmp->TM_ZONE = gmtptr->chars;
#endif /* defined ALL_STATE */
#ifndef ALL_STATE
-@@ -1176,6 +1537,9 @@
+@@ -1176,6 +1536,9 @@ struct tm * const tmp;
#endif /* State Farm */
}
#endif /* defined TM_ZONE */
}
struct tm *
-@@ -1186,10 +1550,12 @@
+@@ -1186,10 +1549,12 @@ const time_t * const timep;
static pthread_key_t gmtime_key = -1;
struct tm *p_tm;
- if (gmtime_key < 0) {
- if (_pthread_key_create(&gmtime_key, free) < 0) {
+ if (gmtime_key == (pthread_key_t)-1) {
-+ gmtime_key = __pthread_tsd_first + 3;
++ gmtime_key = __LIBC_PTHREAD_KEY_GMTIME;
+ if (pthread_key_init_np(gmtime_key, free) < 0) {
_pthread_mutex_unlock(&gmtime_mutex);
return(NULL);
}
-@@ -1206,12 +1572,20 @@
+@@ -1206,12 +1571,20 @@ const time_t * const timep;
}
_pthread_setspecific(gmtime_key, p_tm);
}
}
}
-@@ -1224,8 +1598,13 @@
+@@ -1224,8 +1597,13 @@ gmtime_r(timep, tm)
const time_t * const timep;
struct tm * tm;
{
}
#ifdef STD_INSPIRED
-@@ -1235,13 +1614,21 @@
+@@ -1235,13 +1613,21 @@ offtime(timep, offset)
const time_t * const timep;
const long offset;
{
timesub(timep, offset, sp, tmp)
const time_t * const timep;
const long offset;
-@@ -1330,7 +1717,16 @@
+@@ -1330,7 +1716,16 @@ struct tm * const tmp;
LEAPS_THRU_END_OF(y - 1);
y = newy;
}
tmp->tm_yday = (int) days;
ip = mon_lengths[yleap];
for (tmp->tm_mon = 0; days >= (long) ip[tmp->tm_mon]; ++(tmp->tm_mon))
-@@ -1340,6 +1736,9 @@
+@@ -1340,6 +1735,9 @@ struct tm * const tmp;
#ifdef TM_GMTOFF
tmp->TM_GMTOFF = offset;
#endif /* defined TM_GMTOFF */
}
char *
-@@ -1427,12 +1826,17 @@
+@@ -1352,7 +1750,20 @@ const time_t * const timep;
+ ** to local time in the form of a string. It is equivalent to
+ ** asctime(localtime(timer))
+ */
++#ifdef __LP64__
++ /*
++ * In 64-bit, the timep value may produce a time value with a year
++ * that exceeds 32-bits in size (won't fit in struct tm), so localtime
++ * will return NULL.
++ */
++ struct tm *tm = localtime(timep);
++
++ if (tm == NULL)
++ return NULL;
++ return asctime(tm);
++#else /* !__LP64__ */
+ return asctime(localtime(timep));
++#endif /* __LP64__ */
+ }
+
+ char *
+@@ -1362,7 +1773,18 @@ char * buf;
+ {
+ struct tm tm;
+
++#ifdef __LP64__
++ /*
++ * In 64-bit, the timep value may produce a time value with a year
++ * that exceeds 32-bits in size (won't fit in struct tm), so localtime_r
++ * will return NULL.
++ */
++ if (localtime_r(timep, &tm) == NULL)
++ return NULL;
++ return asctime_r(&tm, buf);
++#else /* !__LP64__ */
+ return asctime_r(localtime_r(timep, &tm), buf);
++#endif /* __LP64__ */
+ }
+
+ /*
+@@ -1427,12 +1849,17 @@ const struct tm * const btmp;
}
static time_t
{
const struct state * sp;
int dir;
-@@ -1442,6 +1846,9 @@
+@@ -1442,6 +1869,9 @@ const int do_norm_secs;
time_t newt;
time_t t;
struct tm yourtm, mytm;
*okayp = FALSE;
yourtm = *tmp;
-@@ -1460,33 +1867,64 @@
+@@ -1460,33 +1890,64 @@ const int do_norm_secs;
** Turn yourtm.tm_year into an actual year number for now.
** It is converted back to an offset from TM_YEAR_BASE later.
*/
/* Don't go below 1900 for POLA */
if (yourtm.tm_year < 0)
return WRONG;
-@@ -1527,8 +1965,19 @@
+@@ -1513,7 +1974,13 @@ const int do_norm_secs;
+ ** Divide the search space in half
+ ** (this works whether time_t is signed or unsigned).
+ */
++#ifdef __LP64__
++ /* optimization: see if the value is 31-bit (signed) */
++ t = (((time_t) 1) << (TYPE_BIT(int) - 1)) - 1;
++ bits = ((*funcp)(&t, offset, &mytm) == NULL || tmcomp(&mytm, &yourtm) < 0) ? TYPE_BIT(time_t) - 1 : TYPE_BIT(int) - 1;
++#else /* !__LP64__ */
+ bits = TYPE_BIT(time_t) - 1;
++#endif /* __LP64__ */
+ /*
+ ** If we have more than this, we will overflow tm_year for tmcomp().
+ ** We should really return an error if we cannot represent it.
+@@ -1527,8 +1994,19 @@ const int do_norm_secs;
*/
t = TYPE_SIGNED(time_t) ? 0 : (((time_t) 1) << bits);
for ( ; ; ) {
if (dir != 0) {
if (bits-- < 0)
return WRONG;
-@@ -1539,6 +1988,9 @@
+@@ -1539,6 +2017,9 @@ const int do_norm_secs;
else t += ((time_t) 1) << bits;
continue;
}
if (yourtm.tm_isdst < 0 || mytm.tm_isdst == yourtm.tm_isdst)
break;
/*
-@@ -1547,7 +1999,6 @@
+@@ -1547,7 +2028,6 @@ const int do_norm_secs;
** It's okay to guess wrong since the guess
** gets checked.
*/
#ifdef ALL_STATE
if (sp == NULL)
return WRONG;
-@@ -1560,7 +2011,12 @@
+@@ -1560,7 +2040,12 @@ const int do_norm_secs;
continue;
newt = t + sp->ttis[j].tt_gmtoff -
sp->ttis[i].tt_gmtoff;
if (tmcomp(&mytm, &yourtm) != 0)
continue;
if (mytm.tm_isdst != yourtm.tm_isdst)
-@@ -1579,17 +2035,27 @@
+@@ -1579,17 +2064,27 @@ label:
if ((newt < t) != (saved_seconds < 0))
return WRONG;
t = newt;
{
time_t t;
-@@ -1598,15 +2064,20 @@
+@@ -1598,15 +2093,20 @@ int * const okayp;
** (in case tm_sec contains a value associated with a leap second).
** If that fails, try with normalization of seconds.
*/
{
time_t t;
const struct state * sp;
-@@ -1620,7 +2091,7 @@
+@@ -1620,7 +2120,7 @@ const long offset;
if (tmp->tm_isdst > 1)
tmp->tm_isdst = 1;
#ifdef PCTS
/*
** PCTS code courtesy Grant Sullivan (grant@osf.org).
-@@ -1664,7 +2135,7 @@
+@@ -1664,7 +2164,7 @@ const long offset;
tmp->tm_sec += sp->ttis[otheri].tt_gmtoff -
sp->ttis[samei].tt_gmtoff;
tmp->tm_isdst = !tmp->tm_isdst;
if (okay)
return t;
tmp->tm_sec -= sp->ttis[otheri].tt_gmtoff -
-@@ -1674,19 +2145,25 @@
+@@ -1674,19 +2174,25 @@ const long offset;
}
return WRONG;
}
#ifdef STD_INSPIRED
time_t
-@@ -1702,7 +2179,7 @@
+@@ -1702,7 +2208,7 @@ timegm(tmp)
struct tm * const tmp;
{
tmp->tm_isdst = 0;
}
time_t
-@@ -1711,7 +2188,7 @@
+@@ -1711,7 +2217,7 @@ struct tm * const tmp;
const long offset;
{
tmp->tm_isdst = 0;
}
#endif /* defined STD_INSPIRED */
-@@ -1811,3 +2288,4 @@
+@@ -1811,3 +2317,4 @@ time_t t;
}
#endif /* defined STD_INSPIRED */
---- strptime.c.orig 2008-04-24 01:10:36.000000000 -0700
-+++ strptime.c 2008-04-24 02:01:31.000000000 -0700
-@@ -61,10 +61,13 @@ static char sccsid[] __unused = "@(#)str
+--- strptime.c.orig 2009-03-04 16:49:20.000000000 -0800
++++ strptime.c 2009-05-13 16:39:36.000000000 -0700
+@@ -61,41 +61,56 @@ static char sccsid[] __unused = "@(#)str
#endif /* not lint */
__FBSDID("$FreeBSD: src/lib/libc/stdtime/strptime.c,v 1.35 2003/11/17 04:19:15 nectar Exp $");
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
-@@ -72,30 +75,41 @@ __FBSDID("$FreeBSD: src/lib/libc/stdtime
++#include <stdint.h>
++#include <limits.h>
+ #include "un-namespace.h"
#include "libc_private.h"
#include "timelocal.h"
#define asizeof(a) (sizeof (a) / sizeof ((a)[0]))
+enum {CONVERT_NONE, CONVERT_GMT, CONVERT_ZONE};
++
++#define _strptime(b,f,t,c,l) _strptime0(b,f,t,c,l,-1,0,-1)
+
static char *
-_strptime(const char *buf, const char *fmt, struct tm *tm, int *GMTp)
-+_strptime(const char *buf, const char *fmt, struct tm *tm, int *convp, locale_t loc)
++_strptime0(const char *buf, const char *fmt, struct tm *tm, int *convp, locale_t loc, int year, int yday, int wday)
{
char c;
const char *ptr;
int i,
-+ year = -1,
-+ yday = 0,
-+ wday = -1,
len;
int Ealternative, Oalternative;
- struct lc_time_T *tptr = __get_current_time_locale();
buf++;
else if (c != *buf++)
return 0;
-@@ -114,18 +128,18 @@ label:
+@@ -114,18 +129,18 @@ label:
break;
case '+':
i *= 10;
i += *buf - '0';
len--;
-@@ -133,17 +147,21 @@ label:
+@@ -133,17 +148,21 @@ label:
if (i < 19)
return 0;
if (buf == 0)
return 0;
break;
-@@ -161,47 +179,55 @@ label:
+@@ -161,47 +180,55 @@ label:
goto label;
case 'F':
i *= 10;
i += *buf - '0';
len--;
-@@ -209,19 +235,19 @@ label:
+@@ -209,19 +236,19 @@ label:
if (i < 1 || i > 366)
return 0;
i *= 10;
i += *buf - '0';
len--;
-@@ -237,8 +263,8 @@ label:
+@@ -237,8 +264,8 @@ label:
tm->tm_sec = i;
}
ptr++;
break;
-@@ -254,11 +280,11 @@ label:
+@@ -254,11 +281,11 @@ label:
* XXX The %l specifier may gobble one too many
* digits if used incorrectly.
*/
i *= 10;
i += *buf - '0';
len--;
-@@ -271,8 +297,8 @@ label:
+@@ -271,8 +298,8 @@ label:
tm->tm_hour = i;
ptr++;
break;
-@@ -282,7 +308,7 @@ label:
+@@ -282,7 +309,7 @@ label:
* specifiers.
*/
len = strlen(tptr->am);
if (tm->tm_hour > 12)
return 0;
if (tm->tm_hour == 12)
-@@ -292,7 +318,7 @@ label:
+@@ -292,7 +319,7 @@ label:
}
len = strlen(tptr->pm);
if (tm->tm_hour > 12)
return 0;
if (tm->tm_hour != 12)
-@@ -307,34 +333,28 @@ label:
+@@ -307,34 +334,28 @@ label:
case 'a':
for (i = 0; i < asizeof(tptr->weekday); i++) {
len = strlen(tptr->weekday[i]);
i *= 10;
i += *buf - '0';
len--;
-@@ -342,23 +362,46 @@ label:
+@@ -342,23 +363,46 @@ label:
if (i > 53)
return 0;
ptr++;
break;
-@@ -372,11 +415,18 @@ label:
+@@ -372,11 +416,18 @@ label:
* XXX The %e specifier may gobble one too many
* digits if used incorrectly.
*/
i *= 10;
i += *buf - '0';
len--;
-@@ -386,8 +436,8 @@ label:
+@@ -386,8 +437,8 @@ label:
tm->tm_mday = i;
ptr++;
break;
-@@ -398,19 +448,19 @@ label:
+@@ -398,19 +449,19 @@ label:
if (Oalternative) {
if (c == 'B') {
len = strlen(tptr->alt_month[i]);
break;
}
}
-@@ -422,11 +472,11 @@ label:
+@@ -422,11 +473,11 @@ label:
break;
case 'm':
i *= 10;
i += *buf - '0';
len--;
-@@ -436,8 +486,8 @@ label:
+@@ -436,8 +487,8 @@ label:
tm->tm_mon = i - 1;
ptr++;
break;
-@@ -450,7 +500,7 @@ label:
+@@ -450,7 +501,7 @@ label:
sverrno = errno;
errno = 0;
if (errno == ERANGE || (long)(t = n) != n) {
errno = sverrno;
return 0;
-@@ -458,24 +508,37 @@ label:
+@@ -458,24 +509,82 @@ label:
errno = sverrno;
buf = cp;
gmtime_r(&t, tm);
+#if __DARWIN_UNIX03
+ if (c == 'Y') {
-+ for (i = 0; *buf != 0 && isdigit_l((unsigned char)*buf, loc); buf++) {
-+ i *= 10;
-+ i += *buf - '0';
++ int savei = 0;
++ const char *savebuf = buf;
++ int64_t i64 = 0;
++ int overflow = 0;
++
++ for (len = 0; *buf != 0 && isdigit_l((unsigned char)*buf, loc); buf++) {
++ i64 *= 10;
++ i64 += *buf - '0';
++ if (++len <= 4) {
++ savei = i64;
++ savebuf = buf + 1;
++ }
++ if (i64 > INT_MAX) {
++ overflow++;
++ break;
++ }
++ }
++ /*
++ * Conformance requires %Y to be more then 4
++ * digits. However, there are several cases
++ * where %Y is immediately followed by other
++ * digits values. So we do the conformance
++ * case first (as many digits as possible),
++ * and if we fail, we backup and try just 4
++ * digits for %Y.
++ */
++ if (len > 4 && !overflow) {
++ struct tm savetm = *tm;
++ int saveconv = *convp;
++ const char *saveptr = ptr;
++ char *ret;
++
++ if (i64 < 1900)
++ return 0;
++
++ tm->tm_year = i64 - 1900;
++
++ if (*buf != 0 && isspace_l((unsigned char)*buf, loc))
++ while (*ptr != 0 && !isspace_l((unsigned char)*ptr, loc) && *ptr != '%')
++ ptr++;
++ ret = _strptime0(buf, ptr, tm, convp, loc, tm->tm_year, yday, wday);
++ if (ret) return ret;
++ /* Failed, so try 4-digit year */
++ *tm = savetm;
++ *convp = saveconv;
++ ptr = saveptr;
+ }
++ buf = savebuf;
++ i = savei;
+ } else {
+ len = 2;
+#else /* !__DARWIN_UNIX03 */
if (c == 'Y')
i -= 1900;
if (c == 'y' && i < 69)
-@@ -483,35 +546,58 @@ label:
+@@ -483,35 +592,58 @@ label:
if (i < 0)
return 0;
+ *convp = CONVERT_GMT;
+ buf += len;
+ break;
- }
++ }
+ tzset();
+ tzlen = strlen(tzname[0]);
+ if (len == tzlen && strncmp(buf, tzname[0], tzlen) == 0) {
+ tm->tm_isdst = 1;
+ buf += len;
+ break;
-+ }
+ }
+ return 0;
+ }
+
}
break;
}
-@@ -524,14 +610,39 @@ char *
+@@ -524,14 +656,39 @@ char *
strptime(const char * __restrict buf, const char * __restrict fmt,
struct tm * __restrict tm)
{
# set the LIBC_ALIAS_* macros so we can decorate the symbol independent
# of other macro settings
+CFLAGS-getdate.c += -D_DARWIN_UNLIMITED_STREAMS
CFLAGS-localtime-fbsd.c += -DLIBC_ALIAS_MKTIME
CFLAGS-strftime-fbsd.c += -DLIBC_ALIAS_STRFTIME -DLIBC_ALIAS_STRFTIME_L
CFLAGS-strptime-fbsd.c += -DLIBC_ALIAS_STRPTIME -DLIBC_ALIAS_STRPTIME_L
** A la ISO/IEC 9945-1, ANSI/IEEE Std 1003.1, Second Edition, 1996-07-12.
*/
+#define EXPECTEDLEN 26
+
char *
asctime_r(const struct tm * __restrict timeptr, char * __restrict buf)
{
};
const char * wn;
const char * mn;
+ int len;
+ char tmp[EXPECTEDLEN];
if (timeptr->tm_wday < 0 || timeptr->tm_wday >= DAYSPERWEEK)
wn = "???";
** "%.3s %.3s%3d %02.2d:%02.2d:%02.2d %d\n"
** Since the .2 in 02.2d is ignored, we drop it.
*/
- (void) sprintf(buf, "%.3s %.3s%3d %02d:%02d:%02d %d\n",
+ /*
+ ** Because various values in the tm structure may cause the
+ ** resulting string to be longer than the 26-bytes that is
+ ** specified in the spec, we should return NULL rather than
+ ** possibly overwrite beyond the string.
+ */
+ len = snprintf(tmp, EXPECTEDLEN, "%.3s %.3s%3d %02d:%02d:%02d %d\n",
wn, mn,
timeptr->tm_mday, timeptr->tm_hour,
timeptr->tm_min, timeptr->tm_sec,
TM_YEAR_BASE + timeptr->tm_year);
+ if (len >= EXPECTEDLEN)
+ return NULL;
+ strcpy(buf, tmp);
return buf;
}
-/*
-** A la X3J11, with core dump avoidance.
-*/
-
char *
asctime(timeptr)
const struct tm * timeptr;
{
- /*
- ** Big enough for something such as
- ** ??? ???-2147483648 -2147483648:-2147483648:-2147483648 -2147483648\n
- ** (two three-character abbreviations, five strings denoting integers,
- ** three explicit spaces, two explicit colons, a newline,
- ** and a trailing ASCII nul).
- */
- static char result[3 * 2 + 5 * INT_STRLEN_MAXIMUM(int) +
- 3 + 2 + 1 + 1];
+ static char result[EXPECTEDLEN];
return asctime_r(timeptr, result);
}
#define _MUTEX_LOCK(x) if (__isthreaded) _pthread_mutex_lock(x)
#define _MUTEX_UNLOCK(x) if (__isthreaded) _pthread_mutex_unlock(x)
-extern int __pthread_tsd_first;
/*
** SunOS 4.1.1 headers lack O_BINARY.
if (__isthreaded != 0) {
_pthread_mutex_lock(&localtime_mutex);
if (localtime_key == (pthread_key_t)-1) {
- localtime_key = __pthread_tsd_first + 2;
+ localtime_key = __LIBC_PTHREAD_KEY_LOCALTIME;
if (pthread_key_init_np(localtime_key, free) < 0) {
_pthread_mutex_unlock(&localtime_mutex);
return(NULL);
if (__isthreaded != 0) {
_pthread_mutex_lock(&gmtime_mutex);
if (gmtime_key == (pthread_key_t)-1) {
- gmtime_key = __pthread_tsd_first + 3;
+ gmtime_key = __LIBC_PTHREAD_KEY_GMTIME;
if (pthread_key_init_np(gmtime_key, free) < 0) {
_pthread_mutex_unlock(&gmtime_mutex);
return(NULL);
** to local time in the form of a string. It is equivalent to
** asctime(localtime(timer))
*/
+#ifdef __LP64__
+ /*
+ * In 64-bit, the timep value may produce a time value with a year
+ * that exceeds 32-bits in size (won't fit in struct tm), so localtime
+ * will return NULL.
+ */
+ struct tm *tm = localtime(timep);
+
+ if (tm == NULL)
+ return NULL;
+ return asctime(tm);
+#else /* !__LP64__ */
return asctime(localtime(timep));
+#endif /* __LP64__ */
}
char *
{
struct tm tm;
+#ifdef __LP64__
+ /*
+ * In 64-bit, the timep value may produce a time value with a year
+ * that exceeds 32-bits in size (won't fit in struct tm), so localtime_r
+ * will return NULL.
+ */
+ if (localtime_r(timep, &tm) == NULL)
+ return NULL;
+ return asctime_r(&tm, buf);
+#else /* !__LP64__ */
return asctime_r(localtime_r(timep, &tm), buf);
+#endif /* __LP64__ */
}
/*
** Divide the search space in half
** (this works whether time_t is signed or unsigned).
*/
+#ifdef __LP64__
+ /* optimization: see if the value is 31-bit (signed) */
+ t = (((time_t) 1) << (TYPE_BIT(int) - 1)) - 1;
+ bits = ((*funcp)(&t, offset, &mytm) == NULL || tmcomp(&mytm, &yourtm) < 0) ? TYPE_BIT(time_t) - 1 : TYPE_BIT(int) - 1;
+#else /* !__LP64__ */
bits = TYPE_BIT(time_t) - 1;
+#endif /* __LP64__ */
/*
** If we have more than this, we will overflow tm_year for tmcomp().
** We should really return an error if we cannot represent it.
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
+#include <stdint.h>
+#include <limits.h>
#include "un-namespace.h"
#include "libc_private.h"
#include "timelocal.h"
enum {CONVERT_NONE, CONVERT_GMT, CONVERT_ZONE};
+#define _strptime(b,f,t,c,l) _strptime0(b,f,t,c,l,-1,0,-1)
+
static char *
-_strptime(const char *buf, const char *fmt, struct tm *tm, int *convp, locale_t loc)
+_strptime0(const char *buf, const char *fmt, struct tm *tm, int *convp, locale_t loc, int year, int yday, int wday)
{
char c;
const char *ptr;
int i,
- year = -1,
- yday = 0,
- wday = -1,
len;
int Ealternative, Oalternative;
struct lc_time_T *tptr = __get_current_time_locale(loc);
#if __DARWIN_UNIX03
if (c == 'Y') {
- for (i = 0; *buf != 0 && isdigit_l((unsigned char)*buf, loc); buf++) {
- i *= 10;
- i += *buf - '0';
+ int savei = 0;
+ const char *savebuf = buf;
+ int64_t i64 = 0;
+ int overflow = 0;
+
+ for (len = 0; *buf != 0 && isdigit_l((unsigned char)*buf, loc); buf++) {
+ i64 *= 10;
+ i64 += *buf - '0';
+ if (++len <= 4) {
+ savei = i64;
+ savebuf = buf + 1;
+ }
+ if (i64 > INT_MAX) {
+ overflow++;
+ break;
+ }
+ }
+ /*
+ * Conformance requires %Y to be more then 4
+ * digits. However, there are several cases
+ * where %Y is immediately followed by other
+ * digits values. So we do the conformance
+ * case first (as many digits as possible),
+ * and if we fail, we backup and try just 4
+ * digits for %Y.
+ */
+ if (len > 4 && !overflow) {
+ struct tm savetm = *tm;
+ int saveconv = *convp;
+ const char *saveptr = ptr;
+ char *ret;
+
+ if (i64 < 1900)
+ return 0;
+
+ tm->tm_year = i64 - 1900;
+
+ if (*buf != 0 && isspace_l((unsigned char)*buf, loc))
+ while (*ptr != 0 && !isspace_l((unsigned char)*ptr, loc) && *ptr != '%')
+ ptr++;
+ ret = _strptime0(buf, ptr, tm, convp, loc, tm->tm_year, yday, wday);
+ if (ret) return ret;
+ /* Failed, so try 4-digit year */
+ *tm = savetm;
+ *convp = saveconv;
+ ptr = saveptr;
}
+ buf = savebuf;
+ i = savei;
} else {
len = 2;
#else /* !__DARWIN_UNIX03 */
---- _SB/Libc/string/FreeBSD/memccpy.3 2003-05-20 15:23:54.000000000 -0700
-+++ _SB/Libc/string/FreeBSD/memccpy.3.edit 2006-06-28 16:55:53.000000000 -0700
-@@ -43,26 +43,31 @@
+--- memccpy.3.orig 2008-02-29 10:45:52.000000000 -0800
++++ memccpy.3 2008-02-29 12:03:32.000000000 -0800
+@@ -43,27 +43,35 @@
.Sh SYNOPSIS
.In string.h
.Ft void *
-.Fa len
+.Fa n
bytes are copied, and a NULL pointer is returned.
++.Pp
++The source and destination strings should not overlap, as the
++behavior is undefined.
.Sh SEE ALSO
.Xr bcopy 3 ,
+ .Xr memcpy 3 ,
---- memset.3 2003-05-20 15:23:54.000000000 -0700
-+++ memset.3.edit 2006-06-28 16:55:53.000000000 -0700
-@@ -41,29 +41,34 @@
+--- memset.3.orig 2008-02-29 10:45:51.000000000 -0800
++++ memset.3 2008-02-29 10:59:18.000000000 -0800
+@@ -41,7 +41,7 @@
.Os
.Sh NAME
.Nm memset
-.Nd write a byte to byte string
-+.Nd write a byte to a byte string
++.Nd fill a byte string with a byte value
.Sh LIBRARY
.Lb libc
.Sh SYNOPSIS
- .In string.h
- .Ft void *
--.Fn memset "void *b" "int c" "size_t len"
-+.Fo memset
-+.Fa "void *b"
-+.Fa "int c"
-+.Fa "size_t n"
-+.Fc
- .Sh DESCRIPTION
- The
- .Fn memset
- function
- writes
--.Fa len
-+.Fa n
+@@ -56,7 +56,7 @@
+ .Fa len
bytes of value
.Fa c
- (converted to an unsigned char) to the string
--.Fa b .
-+.Fa s .
+-(converted to an unsigned char) to the string
++(converted to an unsigned char) to the byte string
+ .Fa b .
.Sh RETURN VALUES
The
- .Fn memset
+@@ -64,6 +64,7 @@
function returns its first argument.
.Sh SEE ALSO
.Xr bzero 3 ,
---- _SB/Libc/string/FreeBSD/strcat.3 2003-05-20 15:23:54.000000000 -0700
-+++ _SB/Libc/string/FreeBSD/strcat.3.edit 2006-06-28 16:55:53.000000000 -0700
+--- strcat.3.orig 2008-02-29 10:45:51.000000000 -0800
++++ strcat.3 2008-02-29 12:07:09.000000000 -0800
@@ -40,16 +40,24 @@
.Dt STRCAT 3
.Os
.Sh DESCRIPTION
The
.Fn strcat
-@@ -57,22 +65,22 @@
+@@ -57,24 +65,27 @@
.Fn strncat
functions
append a copy of the null-terminated string
+.Fa s2 ,
and then adds a terminating
.Ql \e0 .
++.Pp
++The source and destination strings should not overlap, as the
++behavior is undefined.
.Sh RETURN VALUES
-@@ -82,7 +90,7 @@
+ The
+ .Fn strcat
+@@ -82,7 +93,7 @@
.Fn strncat
functions
return the pointer
.Sh SECURITY CONSIDERATIONS
The
.Fn strcat
-@@ -114,7 +122,7 @@
+@@ -114,7 +125,7 @@
void
foo(const char *arbitrary_string)
{
#if defined(BAD)
/*
-@@ -149,11 +157,6 @@
+@@ -149,11 +160,6 @@
.Xr strcpy 3 ,
.Xr strlcat 3 ,
.Xr strlcpy 3
---- _SB/Libc/string/FreeBSD/strcpy.3 2003-05-20 15:23:54.000000000 -0700
-+++ _SB/Libc/string/FreeBSD/strcpy.3.edit 2006-06-28 16:55:53.000000000 -0700
+--- strcpy.3.orig 2008-02-29 10:45:51.000000000 -0800
++++ strcpy.3 2008-02-29 12:08:34.000000000 -0800
@@ -40,18 +40,30 @@
.Dt STRCPY 3
.Os
.Sh DESCRIPTION
The
.Fn stpcpy
-@@ -59,33 +71,33 @@
+@@ -59,36 +71,39 @@
.Fn strcpy
functions
copy the string
is
.Em not
terminated.
-@@ -96,13 +108,13 @@
++.Pp
++The source and destination strings should not overlap, as the
++behavior is undefined.
+ .Sh RETURN VALUES
+ The
+ .Fn strcpy
+@@ -96,13 +111,13 @@
.Fn strncpy
functions
return
.Sh EXAMPLES
The following sets
.Va chararray
-@@ -128,7 +140,7 @@
+@@ -128,7 +143,7 @@
.Em not
.Tn NUL
terminate
because the length of the source string is greater than or equal
to the length argument.
.Pp
-@@ -159,7 +171,7 @@
+@@ -159,7 +174,7 @@
.Pp
.Dl "(void)strlcpy(buf, input, sizeof(buf));"
.Pp
.Xr strlcpy 3
is not defined in any standards, it should
only be used when portability is not a concern.
-@@ -179,11 +191,6 @@
+@@ -179,11 +194,6 @@
.Xr memcpy 3 ,
.Xr memmove 3 ,
.Xr strlcpy 3
--- /dev/null
+--- strlcpy.3.orig 2008-02-29 10:45:51.000000000 -0800
++++ strlcpy.3 2008-02-29 12:11:21.000000000 -0800
+@@ -103,6 +103,9 @@
+ It will append at most
+ .Fa size
+ - strlen(dst) - 1 bytes, NUL-terminating the result.
++.Pp
++The source and destination strings should not overlap, as the
++behavior is undefined.
+ .Sh RETURN VALUES
+ The
+ .Fn strlcpy
Otherwise,
.Fa n
bytes are copied, and a NULL pointer is returned.
+.Pp
+The source and destination strings should not overlap, as the
+behavior is undefined.
.Sh SEE ALSO
.Xr bcopy 3 ,
.Xr memcpy 3 ,
.Os
.Sh NAME
.Nm memset
-.Nd write a byte to a byte string
+.Nd fill a byte string with a byte value
.Sh LIBRARY
.Lb libc
.Sh SYNOPSIS
.In string.h
.Ft void *
-.Fo memset
-.Fa "void *b"
-.Fa "int c"
-.Fa "size_t n"
-.Fc
+.Fn memset "void *b" "int c" "size_t len"
.Sh DESCRIPTION
The
.Fn memset
function
writes
-.Fa n
+.Fa len
bytes of value
.Fa c
-(converted to an unsigned char) to the string
-.Fa s .
+(converted to an unsigned char) to the byte string
+.Fa b .
.Sh RETURN VALUES
The
.Fn memset
.Fa s2 ,
and then adds a terminating
.Ql \e0 .
+.Pp
+The source and destination strings should not overlap, as the
+behavior is undefined.
.Sh RETURN VALUES
The
.Fn strcat
is
.Em not
terminated.
+.Pp
+The source and destination strings should not overlap, as the
+behavior is undefined.
.Sh RETURN VALUES
The
.Fn strcpy
+++ /dev/null
-./strlcpy.3
\ No newline at end of file
--- /dev/null
+.\" $OpenBSD: strlcpy.3,v 1.5 1999/06/06 15:17:32 aaron Exp $
+.\"
+.\" Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. The name of the author may not be used to endorse or promote products
+.\" derived from this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+.\" INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+.\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+.\" THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+.\" EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+.\" PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+.\" OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+.\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+.\" OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+.\" ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" $FreeBSD: src/lib/libc/string/strlcpy.3,v 1.13 2004/07/02 23:52:13 ru Exp $
+.\"
+.Dd June 22, 1998
+.Dt STRLCPY 3
+.Os
+.Sh NAME
+.Nm strlcpy ,
+.Nm strlcat
+.Nd size-bounded string copying and concatenation
+.Sh LIBRARY
+.Lb libc
+.Sh SYNOPSIS
+.In string.h
+.Ft size_t
+.Fn strlcpy "char *dst" "const char *src" "size_t size"
+.Ft size_t
+.Fn strlcat "char *dst" "const char *src" "size_t size"
+.Sh DESCRIPTION
+The
+.Fn strlcpy
+and
+.Fn strlcat
+functions copy and concatenate strings respectively.
+They are designed
+to be safer, more consistent, and less error prone replacements for
+.Xr strncpy 3
+and
+.Xr strncat 3 .
+Unlike those functions,
+.Fn strlcpy
+and
+.Fn strlcat
+take the full size of the buffer (not just the length) and guarantee to
+NUL-terminate the result (as long as
+.Fa size
+is larger than 0 or, in the case of
+.Fn strlcat ,
+as long as there is at least one byte free in
+.Fa dst ) .
+Note that you should include a byte for the NUL in
+.Fa size .
+Also note that
+.Fn strlcpy
+and
+.Fn strlcat
+only operate on true
+.Dq C
+strings.
+This means that for
+.Fn strlcpy
+.Fa src
+must be NUL-terminated and for
+.Fn strlcat
+both
+.Fa src
+and
+.Fa dst
+must be NUL-terminated.
+.Pp
+The
+.Fn strlcpy
+function copies up to
+.Fa size
+- 1 characters from the NUL-terminated string
+.Fa src
+to
+.Fa dst ,
+NUL-terminating the result.
+.Pp
+The
+.Fn strlcat
+function appends the NUL-terminated string
+.Fa src
+to the end of
+.Fa dst .
+It will append at most
+.Fa size
+- strlen(dst) - 1 bytes, NUL-terminating the result.
+.Pp
+The source and destination strings should not overlap, as the
+behavior is undefined.
+.Sh RETURN VALUES
+The
+.Fn strlcpy
+and
+.Fn strlcat
+functions return the total length of the string they tried to
+create.
+For
+.Fn strlcpy
+that means the length of
+.Fa src .
+For
+.Fn strlcat
+that means the initial length of
+.Fa dst
+plus
+the length of
+.Fa src .
+While this may seem somewhat confusing it was done to make
+truncation detection simple.
+.Pp
+Note however, that if
+.Fn strlcat
+traverses
+.Fa size
+characters without finding a NUL, the length of the string is considered
+to be
+.Fa size
+and the destination string will not be NUL-terminated (since there was
+no space for the NUL).
+This keeps
+.Fn strlcat
+from running off the end of a string.
+In practice this should not happen (as it means that either
+.Fa size
+is incorrect or that
+.Fa dst
+is not a proper
+.Dq C
+string).
+The check exists to prevent potential security problems in incorrect code.
+.Sh EXAMPLES
+The following code fragment illustrates the simple case:
+.Bd -literal -offset indent
+char *s, *p, buf[BUFSIZ];
+
+\&...
+
+(void)strlcpy(buf, s, sizeof(buf));
+(void)strlcat(buf, p, sizeof(buf));
+.Ed
+.Pp
+To detect truncation, perhaps while building a pathname, something
+like the following might be used:
+.Bd -literal -offset indent
+char *dir, *file, pname[MAXPATHLEN];
+
+\&...
+
+if (strlcpy(pname, dir, sizeof(pname)) >= sizeof(pname))
+ goto toolong;
+if (strlcat(pname, file, sizeof(pname)) >= sizeof(pname))
+ goto toolong;
+.Ed
+.Pp
+Since we know how many characters we copied the first time, we can
+speed things up a bit by using a copy instead of an append:
+.Bd -literal -offset indent
+char *dir, *file, pname[MAXPATHLEN];
+size_t n;
+
+\&...
+
+n = strlcpy(pname, dir, sizeof(pname));
+if (n >= sizeof(pname))
+ goto toolong;
+if (strlcpy(pname + n, file, sizeof(pname) - n) >= sizeof(pname) - n)
+ goto toolong;
+.Ed
+.Pp
+However, one may question the validity of such optimizations, as they
+defeat the whole purpose of
+.Fn strlcpy
+and
+.Fn strlcat .
+As a matter of fact, the first version of this manual page got it wrong.
+.Sh SEE ALSO
+.Xr snprintf 3 ,
+.Xr strncat 3 ,
+.Xr strncpy 3
+.Sh HISTORY
+The
+.Fn strlcpy
+and
+.Fn strlcat
+functions first appeared in
+.Ox 2.4 ,
+and made their appearance in
+.Fx 3.3 .
# Include machine dependent definitions.
#
-# MDASM names override the default syscall names in MIASM.
-# NOASM will prevent the default syscall code from being generated.
-#
.sinclude "${.CURDIR}/${MACHINE_ARCH}/sys/Makefile.inc"
# sys sources
.PATH: ${.CURDIR}/sys
CWD := ${.CURDIR}/sys
-# Include the generated makefile containing the *complete* list
-# of syscall names in MIASM.
-#.include "${.CURDIR}/../../sys/sys/syscall.mk"
-
# Sources common to both syscall interfaces:
MISRCS += chmod.c chmodx_np.c crt_externs.c \
mmap.c \
openx_np.c \
posix_spawn.c \
- select.c sem_open.c sem_unlink.c semctl.c \
- setrlimit.c \
- shm_open.c shm_unlink.c sigaction.c sigcatch.c sigsuspend.c \
+ remove_counter.c rename.c rmdir.c \
+ select.c setrlimit.c settimeofday.c \
+ sigaction.c sigcatch.c sigsuspend.c \
sigtramp.c statx_np.c \
- umaskx_np.c
+ umaskx_np.c unlink.c
.ifdef FEATURE_MEM_THERM_NOTIFICATION_APIS
MISRCS += OSMemoryNotification.c OSThermalNotification.c
.if (${MACHINE_ARCH} != ppc)
.if (${MACHINE_ARCH} != i386)
+.if (${MACHINE_ARCH} != x86_64)
MISRCS+= context-stubs.c
.endif
.endif
+.endif
.include "Makefile.obsd_begin"
OBSDMISRCS= stack_protector.c
.ifdef FEATURE_PATCH_3375657
# patches for sem_open() sem_unlink() shm_open() shm_unlink()
-MISRCS+= fix-3375657.c
+MISRCS+= fix-3375657.c sem_open.c sem_unlink.c shm_open.c shm_unlink.c
.endif # FEATURE_PATCH_3375657
+DARWINEXTSNSRCS += getgroups.c
+
INODE32SRCS += statx_np.c
CANCELABLESRCS += select.c sigsuspend.c
.if defined(LP64) || (${MACHINE_ARCH} == arm)
CANCELABLESRCS+= fcntl.c
MISRCS+= fcntl.c ioctl.c
+.endif
+.if defined(LP64)
PRE1050SRCS+= select.c
.endif
select.c semctl.c sendmsg.c sendto.c setattrlist.c \
shmctl.c sigsuspend.c socketpair.c
-# we need to create open.h, which just contains a definition for O_NOCTTY
-open.${OBJSUFFIX}: open.h
-open.h :
- ${CC} -E -dD ${CFLAGS} -include fcntl.h -x c /dev/null | grep O_NOCTTY > ${.TARGET}
-
.for _src in msgctl.c semctl.c shmctl.c
CFLAGS-${_src} += -DKERNEL
.endfor
#CFLAGS-msgsnd.c += -DLIBC_ALIAS_MSGSND
CFLAGS-msync.c += -DLIBC_ALIAS_MSYNC
CFLAGS-munmap.c += -DLIBC_ALIAS_MUNMAP
-CFLAGS-open.c += -DLIBC_ALIAS_OPEN
+#CFLAGS-open.c += -DLIBC_ALIAS_OPEN
#CFLAGS-poll.c += -DLIBC_ALIAS_POLL
#CFLAGS-pread.c += -DLIBC_ALIAS_PREAD
#CFLAGS-pwrite.c += -DLIBC_ALIAS_PWRITE
#CFLAGS-write.c += -DLIBC_ALIAS_WRITE
#CFLAGS-writev.c += -DLIBC_ALIAS_WRITEV
-# Add machine dependent asm sources:
-SRCS+=${MDASM}
-
-# Look though the complete list of syscalls (MIASM) for names that are
-# not defined with machine dependent implementations (MDASM) and are
-# not declared for no generation of default code (NOASM). Add each
-# syscall that satisfies these conditions to the ASM list.
-.for _asm in ${MIASM}
-.if (${MDASM:R:M${_asm:R}} == "")
-.if (${NOASM:R:M${_asm:R}} == "")
-ASM+=$(_asm)
-.endif
-.endif
-.endfor
-
-OBJS+= ${ASM} ${PSEUDO}
-
-SASM= ${ASM:S/.o/.S/}
-
-SPSEUDO= ${PSEUDO:S/.o/.S/}
-
-SRCS+= ${SASM} ${SPSEUDO}
-
-# Generated files
-CLEANFILES+= ${SASM} ${SPSEUDO}
-
-${SASM}:
- printf '#include <SYS.h>\nRSYSCALL(${.PREFIX})\n' > ${.TARGET}
-
-${SPSEUDO}:
- printf '#include <SYS.h>\nPSEUDO(${.PREFIX:S/_//})\n' \
- > ${.TARGET}
-
COPYFILES+= ${.CURDIR}/sys/libc.syscall
.if ${LIB} == "c"
the barrier. On a uniprocessor, the barrier operation is typically a nop.
On a multiprocessor, the barrier can be quite expensive.
.Pp
-Most code will want to use the barrier functions to insure that memory shared
+Most code will want to use the barrier functions to ensure that memory shared
between threads is properly synchronized. For example, if you want to initialize
a shared data structure and then atomically increment a variable to indicate
that the initialization is complete, then you must use OSAtomicIncrement32Barrier()
#include <stdlib.h>
#endif
-static int chmodx_syscall(void *obj, uid_t fsowner, gid_t fsgrp, int mode, struct kauth_filesec *fsacl);
-static int fchmodx_syscall(void *obj, uid_t fsowner, gid_t fsgrp, int mode, struct kauth_filesec *fsacl);
+static int chmodx_syscall(void *obj, uid_t fsowner, gid_t fsgrp, int mode, kauth_filesec_t fsacl);
+static int fchmodx_syscall(void *obj, uid_t fsowner, gid_t fsgrp, int mode, kauth_filesec_t fsacl);
static int chmodx1(void *obj,
int (* chmod_syscall)(void *obj, uid_t fsowner, gid_t fsgrp, int mode,
- struct kauth_filesec *fsacl),
+ kauth_filesec_t fsacl),
filesec_t fsec);
/*
/*
* Chmod syscalls.
*/
-extern int __chmod_extended(char *, uid_t, gid_t, int, struct kauth_filesec *);
-extern int __fchmod_extended(int, uid_t, gid_t, int, struct kauth_filesec *);
+extern int __chmod_extended(char *, uid_t, gid_t, int, kauth_filesec_t);
+extern int __fchmod_extended(int, uid_t, gid_t, int, kauth_filesec_t);
static int
-chmodx_syscall(void *obj, uid_t fsowner, gid_t fsgrp, int mode, struct kauth_filesec *fsacl)
+chmodx_syscall(void *obj, uid_t fsowner, gid_t fsgrp, int mode, kauth_filesec_t fsacl)
{
char *path = *(char **)obj;
}
static int
-fchmodx_syscall(void *obj, uid_t fsowner, gid_t fsgrp, int mode, struct kauth_filesec *fsacl)
+fchmodx_syscall(void *obj, uid_t fsowner, gid_t fsgrp, int mode, kauth_filesec_t fsacl)
{
int fd = *(int *)obj;
return(__fchmod_extended(fd, fsowner, fsgrp, mode, fsacl));
static int
chmodx1(void *obj,
- int (chmod_syscall)(void *obj, uid_t fsowner, gid_t fsgrp, int mode, struct kauth_filesec *fsacl),
+ int (chmod_syscall)(void *obj, uid_t fsowner, gid_t fsgrp, int mode, kauth_filesec_t fsacl),
filesec_t fsec)
{
uid_t fsowner = KAUTH_UID_NONE;
size_t size = 0;
int fsacl_used = 0;
int delete_acl = 0;
- struct kauth_filesec *fsacl = NULL;
+ kauth_filesec_t fsacl = KAUTH_FILESEC_NONE;
struct kauth_filesec static_filesec;
if (fsec == NULL) {
}
/* no ACL, use local filesec */
- if (fsacl == NULL) {
+ if (fsacl == KAUTH_FILESEC_NONE) {
bzero(&static_filesec, sizeof(static_filesec));
fsacl = &static_filesec;
fsacl->fsec_magic = KAUTH_FILESEC_MAGIC;
if (delete_acl) {
fsacl = _FILESEC_REMOVE_ACL;
} else {
- fsacl = NULL;
+ fsacl = KAUTH_FILESEC_NONE;
}
}
#if defined(__DYNAMIC__)
#include "mach-o/dyld.h" /* defines _dyld_lookup_and_bind() */
-#define STRINGIFY(a) # a
#define DECLARE_VAR(var, type) \
static type * var ## _pointer = 0
#define DECLARE_PROGNAME(var, type) \
static type * var ## _pointer = 0; \
static type _priv_ ## var = 0
-#define SETUP_VAR(var) \
- if ( var ## _pointer == 0) { \
- _dyld_lookup_and_bind( STRINGIFY(_ ## var), \
- (unsigned long *) & var ## _pointer, 0); \
- }
-#define SETUP_PROGNAME(var) \
- if ( var ## _pointer == 0) { \
- if(NSIsSymbolNameDefined( STRINGIFY(_ ## var) )) \
- _dyld_lookup_and_bind( STRINGIFY(_ ## var), \
- (unsigned long *) & var ## _pointer, 0); \
- else { \
- char *progname = _dyld_get_image_name(0); \
- if(_priv_ ## var = strrchr(progname, '/')) \
- _priv_ ## var ++; \
- else \
- _priv_ ## var = progname; \
- var ## _pointer = & _priv_ ## var; \
- } \
- }
#define USE_VAR(var) (var ## _pointer)
#else
#define DECLARE_VAR(var, type) extern type var
#define DECLARE_PROGNAME(var, type) DECLARE_VAR(var, type)
-#define SETUP_VAR(var)
-#define SETUP_PROGNAME(var) SETUP_VAR(var)
#define USE_VAR(var) (& var)
#endif
DECLARE_PROGNAME(__progname, char *);
char ***_NSGetArgv(void) {
- SETUP_VAR(NXArgv);
return(USE_VAR(NXArgv));
}
int *_NSGetArgc(void) {
- SETUP_VAR(NXArgc);
return(USE_VAR(NXArgc));
}
char ***_NSGetEnviron(void) {
- SETUP_VAR(environ);
return(USE_VAR(environ));
}
char **_NSGetProgname(void) {
- SETUP_PROGNAME(__progname);
return(USE_VAR(__progname));
}
struct mach_header *_NSGetMachExecuteHeader(void) {
- SETUP_VAR(_mh_execute_header);
return(USE_VAR(_mh_execute_header));
}
char** __prognamePtr;
};
+
+#define SUPPORT_PRE_GM_10_5_EXECUTABLES (__ppc__ || __i386__)
+
+
/*
* dyld calls libSystem_initializer() and passes it a ProgramVars struct containing pointers to the
* main executable's NXArg* global variables. libSystem_initializer() calls __libc_init() which calls
*/
void __attribute__((visibility("hidden")))
_program_vars_init(const struct ProgramVars* vars) {
+#if SUPPORT_PRE_GM_10_5_EXECUTABLES
// to support transitional 10.5 main executables that don't have extended __dyld section and instead call _NSSetProgramVars,
// don't overwrite values set by _NSSetProgramVars()
if ( NXArgv_pointer != NULL )
return;
+#endif
NXArgv_pointer = vars->NXArgvPtr;
NXArgc_pointer = vars->NXArgcPtr;
environ_pointer = vars->environPtr;
_mh_execute_header_pointer = vars->mh;
}
+#if SUPPORT_PRE_GM_10_5_EXECUTABLES
/*
* This is only called by main executables built with pre 10-5 GM crt1.10.5.o. In those programs,
* there is no extended __dyld section, dyld cannot tell _program_vars_init() where the real program
__progname_pointer = crt_progname;
_mh_execute_header_pointer = crt_mh;
}
-#endif
+#endif
+#endif /* __DYNAMIC__ */
+#if __ppc__
/*
* Fix for Radar bug 2200596 --
* EH symbol definitions for gcc 2.7.2.x implementation of
/* This is what egcs uses for its global data pointer */
void *__eh_global_dataptr = (void *)0;
+#endif /* __ppc__ */
+
case F_LOG2PHYS:
case F_GETPATH:
case F_PATHPKG_CHECK:
+ case F_OPENFROM:
+ case F_UNLINKFROM:
+ case F_ADDSIGS:
arg = va_arg(ap, void *);
break;
default:
--- /dev/null
+/*
+ * Copyright (c) 2008 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+#define _DARWIN_C_SOURCE
+
+#include <errno.h>
+#include <pwd.h>
+#include <stdint.h>
+#include <unistd.h>
+
+int32_t getgroupcount(const char *name, gid_t basegid);
+
+/*
+ * getgroups extension; not limited by NGROUPS_MAX
+ */
+int
+getgroups(int gidsetsize, gid_t grouplist[])
+{
+ struct passwd *pw;
+ int n;
+
+ if ((pw = getpwuid(getuid())) == NULL) {
+ errno = EINVAL;
+ return -1;
+ }
+ if (gidsetsize == 0) {
+ if ((n = getgroupcount(pw->pw_name, pw->pw_gid)) == 0) {
+ errno = EINVAL;
+ return -1;
+ }
+ return n;
+ }
+ n = gidsetsize;
+ if (getgrouplist(pw->pw_name, pw->pw_gid, (int *)grouplist, &n) < 0) {
+ errno = EINVAL;
+ return -1;
+ }
+ return n;
+}
/*
- * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2006, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <unistd.h>
#include <errno.h>
-int lchown(const char *, uid_t, gid_t);
+int __lchown(const char *, uid_t, gid_t);
/*
* lchown stub, legacy version
+___sandbox_me ___mac_execve
+___sandbox_mm ___mac_mount
+___sandbox_ms ___mac_syscall
+___sandbox_msp ___mac_set_proc
__exit ___exit
_accessx_np ___access_extended
_getsgroups_np ___getsgroups
/*
- * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2005, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
*/
#include <sys/types.h>
-/*
- * We need O_NOCTTY from fcntl.h, but that would also drag in the variadic
- * prototype for open(), and so we'd have to use stdarg.h to get the mode.
- * So open.h just contains O_NOCTTY, which it gets from fcntl.h.
- *
- * This is for legacy only.
- */
-#include "open.h"
+#include <fcntl.h>
+#include <stdarg.h>
int __open_nocancel(const char *path, int flags, mode_t mode);
-int open(const char *path, int flags, mode_t mode) LIBC_ALIAS_C(open);
/*
* open stub: The legacy interface never automatically associated a controlling
* tty, so we always pass O_NOCTTY.
*/
int
-open(const char *path, int flags, mode_t mode)
+open(const char *path, int flags, ...)
{
+ mode_t mode = 0;
+
+ if(flags & O_CREAT) {
+ va_list ap;
+ va_start(ap, flags);
+ // compiler warns to pass int (not mode_t) to va_arg
+ mode = va_arg(ap, int);
+ va_end(ap);
+ }
return(__open_nocancel(path, flags | O_NOCTTY, mode));
}
/*
- * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2006-2008 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <sys/types.h> /* for user_size_t */
#include <spawn.h>
+#include <spawn_private.h>
#include <sys/spawn_internal.h>
#include <stdlib.h>
#include <errno.h>
/* Default is no port actions to take */
(*psattrp)->psa_ports = NULL;
+
+ /*
+ * The default value of this attribute shall be an no
+ * process control on resource starvation
+ */
+ (*psattrp)->psa_pcontrol = 0;
}
return (err);
* NOTIMP: Allowed failures (checking NOT required):
* EINVAL The value specified by attr is invalid.
*/
+int posix_spawn_destroyportactions_np(posix_spawnattr_t *);
+
int
posix_spawnattr_destroy(posix_spawnattr_t *attr)
{
*ocount = i;
return 0;
}
+
+
+/*
+ * posix_spawnattr_getpcontrol_np
+ *
+ * Description: Retrieve the process control property set default according to
+ * the spawn attribute value referenced by 'attr' and place the
+ * result into the memory containing the control referenced by
+ * 'pcontrol'
+ *
+ * Parameters: attr The spawn attributes object whose
+ * signal set for default signals is to
+ * be retrieved
+ * pcontrol A pointer to an int to receive
+ * the process control info
+ *
+ * Returns: 0 Success
+ *
+ * Implicit Returns:
+ * *pcontrol (modified) The signal set of signals to default
+ * from the spawn attributes object
+ */
+int
+posix_spawnattr_getpcontrol_np(const posix_spawnattr_t * __restrict attr,
+ int * __restrict pcontrol)
+{
+ _posix_spawnattr_t psattr;
+
+ if (attr == NULL || *attr == NULL)
+ return EINVAL;
+
+ psattr = *(_posix_spawnattr_t *)attr;
+ *pcontrol = psattr->psa_pcontrol;
+
+ return (0);
+}
+
/*
* posix_spawnattr_setsigdefault
*
return 0;
}
+
+/*
+ * posix_spawnattr_setpcontrol_np
+ *
+ * Description: Set the process control property according to
+ * attribute value referenced by 'attr' from the memory
+ * containing the int value 'pcontrol'
+ *
+ * Parameters: attr The spawn attributes object whose
+ * signal set for default signals is to
+ * be set
+ * pcontrol An int value of the process control info
+ *
+ * Returns: 0 Success
+ */
+int
+posix_spawnattr_setpcontrol_np(posix_spawnattr_t * __restrict attr,
+ const int pcontrol)
+{
+ _posix_spawnattr_t psattr;
+
+ if (attr == NULL || *attr == NULL)
+ return EINVAL;
+
+ psattr = *(_posix_spawnattr_t *)attr;
+ psattr->psa_pcontrol = pcontrol;
+
+ return (0);
+}
/*
* posix_spawn_createportactions_np
* Description: create a new posix_spawn_port_actions struct and link
return err;
}
+/*
+ * posix_spawnattr_setauditsessionport_np
+ *
+ * Description: Set the audit session port rights attribute in the spawned task.
+ * This is used to securely set the audit session information for
+ * the new task.
+ *
+ * Parameters: attr The spawn attributes object for the
+ * new process
+ * au_sessionport The audit session send port right
+ *
+ * Returns: 0 Success
+ */
+int
+posix_spawnattr_setauditsessionport_np(
+ posix_spawnattr_t *attr,
+ mach_port_t au_sessionport)
+{
+ _posix_spawnattr_t psattr;
+ int err = 0;
+ _ps_port_action_t *action;
+ _posix_spawn_port_actions_t ports;
+
+ if (attr == NULL || *attr == NULL)
+ return EINVAL;
+
+ psattr = *(_posix_spawnattr_t *)attr;
+ ports = psattr->psa_ports;
+ /* Have any port actions been created yet? */
+ if (ports == NULL) {
+ err = posix_spawn_createportactions_np(attr);
+ if (err)
+ return err;
+ ports = psattr->psa_ports;
+ }
+
+ /* Is there enough room? */
+ if (ports->pspa_alloc == ports->pspa_count) {
+ err = posix_spawn_growportactions_np(attr);
+ if (err)
+ return err;
+ }
+
+ /* Add this action to next spot in array */
+ action = &ports->pspa_actions[ports->pspa_count];
+ action->port_type = PSPA_AU_SESSION;
+ action->new_port = au_sessionport;
+
+ ports->pspa_count++;
+ return err;
+}
+
/*
* posix_spawn_file_actions_init
--- /dev/null
+/*
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <libkern/OSAtomic.h>
+
+#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__)
+static int64_t __remove_counter = 0;
+#else
+static int32_t __remove_counter = 0;
+#endif
+
+uint64_t
+__get_remove_counter(void) {
+#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__)
+ return (uint64_t)OSAtomicAdd64Barrier(0, &__remove_counter);
+#else
+ return (uint64_t)OSAtomicAdd32Barrier(0, &__remove_counter);
+#endif
+}
+
+__private_extern__ void
+__inc_remove_counter(void)
+{
+#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__)
+ (void)OSAtomicAdd64(1, &__remove_counter);
+#else
+ (void)OSAtomicAdd32(1, &__remove_counter);
+#endif
+}
--- /dev/null
+/*
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <stdio.h>
+
+void __inc_remove_counter(void);
+int __rename(const char *old, const char *new);
+
+int
+rename(const char *old, const char *new)
+{
+ int res = __rename(old, new);
+ if (res == 0) __inc_remove_counter();
+ return res;
+}
--- /dev/null
+/*
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <unistd.h>
+
+void __inc_remove_counter(void);
+int __rmdir(const char *path);
+
+int
+rmdir(const char *path)
+{
+ int res = __rmdir(path);
+ if (res == 0) __inc_remove_counter();
+ return res;
+}
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* @APPLE_LICENSE_HEADER_END@
*/
+#include <sys/cdefs.h>
+
#ifdef __APPLE_PR3375657_HACK__
#include <stdio.h>
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* @APPLE_LICENSE_HEADER_END@
*/
+#include <sys/cdefs.h>
+
#ifdef __APPLE_PR3375657_HACK__
#include <stdio.h>
#include <stdarg.h>
#include <sys/sem.h>
-#if !__DARWIN_UNIX03
#include <errno.h>
/*
* Because KERNEL is defined, including errno.h doesn't define errno, so
*/
extern int * __error(void);
#define errno (*__error())
-#endif /* !__DARWIN_UNIX03 */
/*
- * Stub function to account for the differences in the ipc_perm structure,
+ * Legacy stub to account for the differences in the ipc_perm structure,
* while maintaining binary backward compatibility.
*/
extern int __semctl(int semid, int semnum, int cmd, void *);
va_list ap;
int rv;
int val = 0;
-#if __DARWIN_UNIX03
- struct __semid_ds_new *ds;
-
- va_start(ap, cmd);
- if (cmd == SETVAL) {
- val = va_arg(ap, int);
- rv = __semctl(semid, semnum, cmd, (void *)val);
- } else {
- ds = va_arg(ap, struct __semid_ds_new *);
- rv = __semctl(semid, semnum, cmd, (void *)ds);
- }
- va_end(ap);
-
- return rv;
-#else /* !__DARWIN_UNIX03 */
struct __semid_ds_new ds;
struct __semid_ds_new *ds_new = &ds;
struct __semid_ds_old *ds_old = NULL;
}
return (rv);
-#endif /* !__DARWIN_UNIX03 */
}
/*
- * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2006, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <sys/socket.h>
#include <errno.h>
-ssize_t __sendmsg__nocancel(int, const struct msghdr *, int);
+ssize_t __sendmsg_nocancel(int, const struct msghdr *, int);
/*
* sendmsg stub, legacy version
--- /dev/null
+/*
+ * Copyright (c) 2008 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <sys/time.h>
+#include <notify.h>
+#include <notify_keys.h>
+
+#ifndef kNotifyClockSet
+#define kNotifyClockSet "com.apple.system.clock_set"
+#endif
+
+int __settimeofday(const struct timeval *tp, const struct timezone *tzp);
+
+/*
+ * settimeofday stub, legacy version
+ */
+int
+settimeofday(const struct timeval *tp, const struct timezone *tzp)
+{
+ int ret = __settimeofday(tp, tzp);
+ if (ret == 0) notify_post(kNotifyClockSet);
+
+ return ret;
+}
.\"
.\" @APPLE_LICENSE_HEADER_END@
.\"
-.Dd September 20, 1999
+.Dd August 29, 2008
.Dt SHM_OPEN 2
.Os Darwin
.Sh NAME
it unlinked and all other references are gone. Objects do
not persist across a system reboot.
.Pp
-The new descriptor is set to remain open across
-.Xr execve
-system calls; see
-.Xr close 2
-and
-.Xr fcntl 2 .
-.Pp
The system imposes a limit on the number of file descriptors
open simultaneously by one process.
.Xr Getdtablesize 2
The process has already reached its limit for open file descriptors.
.It Bq Er ENAMETOOLONG
.Fa name
-exceeded
-.Dv SHM_NAME_MAX
-characters.
+exceeded the name size limit.
+This is currently
+.Dv PSHMNAMLEN
+characters (defined in
+.In sys/posix_shm.h ) ,
+but this may change in the future.
.It Bq Er ENFILE
The system file table is full.
.It Bq Er ENOENT
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* @APPLE_LICENSE_HEADER_END@
*/
+#include <sys/cdefs.h>
+
#ifdef __APPLE_PR3375657_HACK__
#include <stdio.h>
Permission is denied to be remove the object.
.It Bq Er ENAMETOOLONG
.Fa name
-exceeded
-.Dv SHM_NAME_MAX
-characters.
+exceeded the name size limit.
+This is currently
+.Dv PSHMNAMLEN
+characters (defined in
+.In sys/posix_shm.h ) ,
+but this may change in the future.
.It Bq Er ENOENT
The named object does not exist.
.El
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2009 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* @APPLE_LICENSE_HEADER_END@
*/
+#include <sys/cdefs.h>
+
#ifdef __APPLE_PR3375657_HACK__
#include <stdio.h>
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
/*
* Actually declare the global data.
*/
-sigcatch_t sigcatch[PADDING] = { (void (*)(int, int, struct sigcontext *))0 };
+sigcatch_t sigcatch[PADDING] = { (sigcatch_t)0 };
#include <string.h>
#include <stdio.h>
-#define ACL_MIN_SIZE_HEURISTIC (sizeof(struct kauth_filesec) + 16 * sizeof(struct kauth_ace))
+#define ACL_MIN_SIZE_HEURISTIC (KAUTH_FILESEC_SIZE(16))
static int statx_syscall(void *obj, void *sbptr, void *fsacl, size_t *fsacl_size);
static int fstatx_syscall(void *obj, void *sbptr, void *fsacl, size_t *fsacl_size);
int (* stat_syscall)(void *obj, void *sbptr, void *fsacl, size_t *fsacl_size),
void *sbptr, filesec_t fsec)
{
- struct kauth_filesec *fsacl, *ofsacl;
+ kauth_filesec_t fsacl, ofsacl;
size_t fsacl_size, buffer_size;
int error;
struct stat * sb = (struct stat *)0;
--- /dev/null
+/*
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <unistd.h>
+
+void __inc_remove_counter(void);
+int __unlink(const char *path);
+
+int
+unlink(const char *path)
+{
+ int res = __unlink(path);
+ if (res == 0) __inc_remove_counter();
+ return res;
+}
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999, 2008 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#undef errno
extern int errno;
extern int *__error(void);
+extern int __pthread_canceled(int);
void
cthread_set_errno_self(error)
extern void mig_init();
extern void _pthread_set_self(pthread_t);
+extern void pthread_workqueue_atfork_prepare(void);
+extern void pthread_workqueue_atfork_parent(void);
+extern void pthread_workqueue_atfork_child(void);
/*
* Mach imports:
*/
extern void fork_mach_init();
extern void _cproc_fork_child(), _stack_fork_child();
extern void _lu_fork_child(void);
+extern void _asl_fork_child(void);
extern void _pthread_fork_child(pthread_t);
extern void _notify_fork_child(void);
psaved_self = pthread_self();
_spin_lock(&psaved_self->lock);
_malloc_fork_prepare();
+
+ pthread_workqueue_atfork_prepare();
}
void _cthread_fork_parent()
}
_spin_unlock(&pthread_atfork_lock);
+ pthread_workqueue_atfork_parent();
}
void _cthread_fork_child()
_cproc_fork_child();
_lu_fork_child();
-
+ _asl_fork_child();
_notify_fork_child();
__is_threaded = 0;
mig_init(1); /* enable multi-threaded mig interfaces */
+ pthread_workqueue_atfork_child();
+
TAILQ_FOREACH(e, &pthread_atfork_queue, qentry) {
if (e->child != NULL)
e->child();
mach_port_t _lu_port = MACH_PORT_NULL;
mach_port_t _ds_port = MACH_PORT_NULL;
+mach_port_t _mbr_port = MACH_PORT_NULL;
static name_t LOOKUP_NAME = "lookup daemon v2";
#ifndef kDSStdMachDSLookupPortName
#define kDSStdMachDSLookupPortName "com.apple.system.DirectoryService.libinfo_v1"
+#define kDSStdMachDSMembershipPortName "com.apple.system.DirectoryService.membership_v1"
#endif
mach_port_t
{
_lu_port = MACH_PORT_NULL;
_ds_port = MACH_PORT_NULL;
+ _mbr_port = MACH_PORT_NULL;
}
void
kern_return_t status;
if (_ds_port != MACH_PORT_NULL) return 1;
-
+
status = bootstrap_look_up(bootstrap_port, kDSStdMachDSLookupPortName, &_ds_port);
if ((status != BOOTSTRAP_SUCCESS) && (status != BOOTSTRAP_UNKNOWN_SERVICE)) _ds_port = MACH_PORT_NULL;
+ status = bootstrap_look_up(bootstrap_port, kDSStdMachDSMembershipPortName, &_mbr_port);
+ if ((status != BOOTSTRAP_SUCCESS) && (status != BOOTSTRAP_UNKNOWN_SERVICE)) _mbr_port = MACH_PORT_NULL;
+
return (_ds_port != MACH_PORT_NULL);
}
if (port != MACH_PORT_NULL && port != _task_reply_port) {
LOCK(reply_port_lock);
pself->reply_port = _task_reply_port;
- (void) mach_port_destroy(mach_task_self(), port);
+ (void) mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_RECEIVE, -1);
pself->reply_port = MACH_PORT_NULL;
UNLOCK(reply_port_lock);
}
if (port != MACH_PORT_NULL && port != _task_reply_port) {
LOCK(reply_port_lock);
self->reply_port = _task_reply_port;
- (void) mach_port_destroy(mach_task_self(), port);
+ (void) mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_RECEIVE, -1);
self->reply_port = MACH_PORT_NULL;
UNLOCK(reply_port_lock);
}
strncpy(utx.ut_line, line, sizeof(utx.ut_line));
utx.ut_type = UTMPX_AUTOFILL_MASK | UTMPX_DEAD_IF_CORRESPONDING_MASK | DEAD_PROCESS;
(void)gettimeofday(&utx.ut_tv, NULL);
- setutxent();
+ UTMPX_LOCK;
+ _setutxent();
ux = _pututxline(&utx);
- endutxent();
- if (!ux)
+ _endutxent();
+ if (!ux) {
+ UTMPX_UNLOCK;
return 0;
+ }
#ifdef UTMP_COMPAT
if (utfile_system) { /* only if we are using _PATH_UTMPX */
which = _utmp_compat(ux, &u);
_write_utmp(&u, 1);
}
#endif /* UTMP_COMPAT */
+ UTMPX_UNLOCK;
return 1;
}
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd November 4, 1996
+.Dd April 5, 2008
.Dt OPENPTY 3
.Os
.Sh NAME
.Pp
The
.Fn openpty
-function finds an available pseudo-tty and returns file descriptors
+function allocates a pseudo-tty and returns file descriptors
for the master and slave in
.Fa amaster
and
.Fn openpty
will fail if:
.Bl -tag -width Er
-.It Bq Er ENOENT
-There are no available ttys.
+.It Bq Er EAGAIN
+There are no available pseudo-ttys.
.El
.Pp
.Fn login_tty
.Fn fork
fails.
.Sh FILES
-.Bl -tag -width /dev/[pt]ty[pqrstuwxyzPQRST][0123456789abcdef] -compact
-.It Pa /dev/[pt]ty[pqrstuwxyzPQRST][0123456789abcdef]
+.Bl -tag -width /dev/ttys[0-9][0-9][0-9] -compact
+.It Pa /dev/ptmx
+cloning pseudo-tty device
+.It Pa /dev/ttys[0-9][0-9][0-9]
+slave pseudo-tty devices
.El
.Sh SEE ALSO
.Xr fork 2
-.Sh BUGS
-The names of the virtual consoles for the i386 PCVT console driver
-conflict with what would be the seventh group of pseudo-ttys, so
-.Fn openpty
-skips
-.Pa /dev/[pt]tyv[0123456789abcdef]
-while looking for pseudo-ttys.
function converts the supplied UUID
.I uu
from the binary representation into a 36\-byte string (plus tailing '\\0')
-of the form 1b4e28ba\-2fa1\-11d2\-883f\-b9a76 and stores this value in the
+of the form 1b4e28ba\-2fa1\-11d2\-883f\-b9a761bde3fb and stores this value in the
character string pointed to by
.IR out .
The case of the hex digits returned by
function converts the supplied UUID
.I uu
from the binary representation into a 36\-byte string (plus tailing '\\0')
-of the form 1b4e28ba\-2fa1\-11d2\-883f\-b9a76 and stores this value in the
+of the form 1b4e28ba\-2fa1\-11d2\-883f\-b9a761bde3fb and stores this value in the
character string pointed to by
.IR out .
The case of the hex digits returned by
---- uuid_unparse.3.in 2004-06-02 17:18:30.000000000 -0700
-+++ uuid_unparse.3.in.edit 2006-09-07 18:22:58.000000000 -0700
+--- uuid_unparse.3.in.orig 2008-09-25 10:43:40.000000000 -0700
++++ uuid_unparse.3.in 2008-09-25 10:51:45.000000000 -0700
@@ -36,9 +36,18 @@
.nf
.B #include <uuid/uuid.h>
.fi
.SH DESCRIPTION
The
+@@ -46,7 +55,7 @@
+ function converts the supplied UUID
+ .I uu
+ from the binary representation into a 36\-byte string (plus tailing '\\0')
+-of the form 1b4e28ba\-2fa1\-11d2\-883f\-b9a76 and stores this value in the
++of the form 1b4e28ba\-2fa1\-11d2\-883f\-b9a761bde3fb and stores this value in the
+ character string pointed to by
+ .IR out .
+ The case of the hex digits returned by
# searching i386 directory as a fallback to avoid unnecessary code duplication
.PATH: ${.CURDIR}/x86_64/gen ${.CURDIR}/i386/gen
-MDSRCS+= icacheinval.s \
+MDSRCS+= _ctx_start.S \
+ _setcontext.S \
+ getcontext.S \
+ getmcontext.c \
+ icacheinval.s \
+ makecontext.c \
mcount.s \
- setjmperr.c
+ setcontext.c \
+ setjmperr.c \
+ swapcontext.c
+
+.for _src in makecontext.c setcontext.c swapcontext.c
+CFLAGS-${_src} += -fomit-frame-pointer
+# -pg and -fomit-frame-pointer don't work together, so just use -g
+${_src:R}.po: ${_src} _STANDARD_DEBUG
+.endfor
--- /dev/null
+/*
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/*
+ * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__x86_64__)
+
+#include <architecture/i386/asm_help.h>
+
+/*
+ * _ctx_start((void *func)(int arg1, ..., argn),
+ * int arg1, ..., argn, ucontext_t *ucp)
+ *
+ * %rdi - func
+ * %rsi - arg1
+ * %rdx - arg2
+ * %rcx - arg3
+ * %r8 - arg4
+ * %r9 - arg5
+ * WRONG!
+ * (8*(n-6))(%rsp) - argn
+ * (8*(n + 1))(%rsp) - ucp, %rbp setup to point here (base of stack)
+ */
+TEXT
+LABEL(__ctx_start)
+ popq %rax /* accounted for in makecontext() */
+ /* makecontext will simulate 6 parameters at least */
+ /* Or it could just set these in the mcontext... */
+ popq %rdi
+ popq %rsi
+ popq %rdx
+ popq %rcx
+ popq %r8
+ popq %r9
+
+ callq *%rax /* call start function */
+ movq %r12, %rsp /*
+ * setup stack for completion routine;
+ * ucp is now at top of stack
+ */
+ movq (%rsp), %rdi
+ CALL_EXTERN(__ctx_done) /* should never return */
+ int $5 /* trap */
+
+#endif /* __x86_64__ */
--- /dev/null
+/*
+ * Copyright (c) 2007,2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#if defined(__x86_64__)
+
+#include <architecture/i386/asm_help.h>
+
+#define MCONTEXT_SS_RAX 16
+#define MCONTEXT_SS_RBX 24
+#define MCONTEXT_SS_RCX 32
+#define MCONTEXT_SS_RDX 40
+#define MCONTEXT_SS_RDI 48
+#define MCONTEXT_SS_RSI 56
+#define MCONTEXT_SS_RBP 64
+#define MCONTEXT_SS_RSP 72
+#define MCONTEXT_SS_R8 80
+#define MCONTEXT_SS_RIP 144
+#define MCONTEXT_SS_RFLAGS 152
+
+TEXT
+LABEL(__setcontext)
+ /* struct mcontext_t * %rdi */
+#if DEBUG
+ movq MCONTEXT_SS_RSI(%rdi), %rsi
+ movq MCONTEXT_SS_RCX(%rdi), %rcx
+ movq MCONTEXT_SS_R8+00(%rdi), %r8
+ movq MCONTEXT_SS_R8+08(%rdi), %r9
+ movq MCONTEXT_SS_R8+16(%rdi), %r10
+ movq MCONTEXT_SS_R8+24(%rdi), %r11
+#endif
+ movq MCONTEXT_SS_RBX(%rdi), %rbx
+ movq MCONTEXT_SS_R8+32(%rdi), %r12
+ movq MCONTEXT_SS_R8+40(%rdi), %r13
+ movq MCONTEXT_SS_R8+48(%rdi), %r14
+ movq MCONTEXT_SS_R8+56(%rdi), %r15
+
+ movq MCONTEXT_SS_RSP(%rdi), %rsp
+ movq MCONTEXT_SS_RBP(%rdi), %rbp
+
+ xorl %eax, %eax /* force x=getcontext(); ... setcontext(); to keep x==0 */
+
+#if DEBUG
+ movq MCONTEXT_SS_RIP(%rdi), %rdx
+ movq MCONTEXT_SS_RDI(%rdi), %rdi
+ jmp *%rdx
+#else
+ jmp *MCONTEXT_SS_RIP(%rdi)
+#endif
+
+#endif /* __x86_64__ */
--- /dev/null
+/*
+ * Copyright (c) 2007,2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#if defined(__x86_64__)
+
+#include <architecture/i386/asm_help.h>
+
+#define MCONTEXT_SS_RAX 16
+#define MCONTEXT_SS_RBX 24
+#define MCONTEXT_SS_RCX 32
+#define MCONTEXT_SS_RDX 40
+#define MCONTEXT_SS_RDI 48
+#define MCONTEXT_SS_RSI 56
+#define MCONTEXT_SS_RBP 64
+#define MCONTEXT_SS_RSP 72
+#define MCONTEXT_SS_R8 80
+#define MCONTEXT_SS_RIP 144
+#define MCONTEXT_SS_RFLAGS 152
+
+TEXT
+LABEL(_getcontext)
+ /* struct ucontext_t * $rdi */
+ push %rbp
+ movq %rsp, %rbp
+ movq %rsp, %rsi
+ CALL_EXTERN(_getmcontext) /* getmcontext(uctx, sp) */
+ pop %rbp
+
+#if DEBUG
+ movq $0, MCONTEXT_SS_RAX(%rax)
+ movq $0, MCONTEXT_SS_RDX(%rax)
+ movq $0, MCONTEXT_SS_RCX(%rax)
+ movq $0, MCONTEXT_SS_RDI(%rax)
+ movq $0, MCONTEXT_SS_RSI(%rax)
+ movq $0, MCONTEXT_SS_R8(%rax)
+ movq $0, MCONTEXT_SS_R8+8(%rax)
+ movq $0, MCONTEXT_SS_R8+16(%rax)
+ movq $0, MCONTEXT_SS_R8+24(%rax)
+ movq $0, MCONTEXT_SS_RFLAGS(%rax)
+#endif
+
+ movq %rbp, MCONTEXT_SS_RBP(%rax)
+ movq %rbx, MCONTEXT_SS_RBX(%rax)
+ movq %r12, MCONTEXT_SS_R8+32(%rax)
+ movq %r13, MCONTEXT_SS_R8+40(%rax)
+ movq %r14, MCONTEXT_SS_R8+48(%rax)
+ movq %r15, MCONTEXT_SS_R8+56(%rax)
+ movq (%rsp), %rcx /* return address */
+ movq %rcx, MCONTEXT_SS_RIP(%rax)
+ leaq 8(%rsp), %rcx
+ movq %rcx, MCONTEXT_SS_RSP(%rax)
+ xorl %eax, %eax
+ ret
+
+#endif /* __x86_64__ */
--- /dev/null
+/*
+ * Copyright (c) 2007, 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#if defined(__x86_64__)
+
+#define _XOPEN_SOURCE 600L
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+#include <signal.h>
+#include <ucontext.h>
+
+extern size_t pthread_get_stacksize_np(pthread_t);
+extern void *pthread_get_stackaddr_np(pthread_t);
+#ifdef __DYNAMIC__
+extern int __in_sigtramp;
+#endif /* __DYNAMIC_ */
+
+__private_extern__ mcontext_t
+getmcontext(ucontext_t *uctx, void *sp)
+{
+ pthread_t self = pthread_self();
+ mcontext_t mctx = (mcontext_t)&uctx->__mcontext_data;
+ size_t stacksize = pthread_get_stacksize_np(self);
+ stack_t stack;
+
+ uctx->uc_stack.ss_sp = sp;
+ uctx->uc_stack.ss_flags = 0;
+
+ if (0 == sigaltstack(NULL, &stack)) {
+ if (stack.ss_flags & SS_ONSTACK) {
+ uctx->uc_stack = stack;
+ stacksize = stack.ss_size;
+ }
+ }
+
+ if (stacksize == 0) { /* main thread doesn't have pthread stack size */
+ struct rlimit rlim;
+ if (0 == getrlimit(RLIMIT_STACK, &rlim))
+ stacksize = rlim.rlim_cur;
+ }
+
+ uctx->uc_stack.ss_size = stacksize;
+
+ if (uctx->uc_mcontext != mctx) {
+ uctx->uc_mcontext = mctx;
+
+#ifdef __DYNAMIC__
+ uctx->uc_link = (ucontext_t*)__in_sigtramp; /* non-zero if in signal handler */
+#else /* !__DYNAMIC__ */
+ uctx->uc_link = 0;
+#endif /* __DYNAMIC__ */
+
+ }
+
+ sigprocmask(0, NULL, &uctx->uc_sigmask);
+ return mctx;
+}
+
+#endif /* __x86_64__ */
--- /dev/null
+/*
+ * Copyright (c) 2007, 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/*
+ * Copyright (c) 2001 Daniel M. Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__x86_64__)
+
+#define _XOPEN_SOURCE
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/signal.h>
+#include <sys/ucontext.h>
+
+#include <errno.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ucontext.h>
+#include <unistd.h>
+
+/* Prototypes */
+extern void _ctx_start(ucontext_t *, int argc, ...);
+
+void
+_ctx_done (ucontext_t *ucp)
+{
+ if (ucp->uc_link == NULL)
+ exit(0);
+ else {
+ /*
+ * Since this context has finished, don't allow it
+ * to be restarted without being reinitialized (via
+ * setcontext or swapcontext).
+ */
+ ucp->uc_mcsize = 0;
+
+ /* Set context to next one in link */
+ /* XXX - what to do for error, abort? */
+ setcontext((const ucontext_t *)ucp->uc_link);
+ LIBC_ABORT("setcontext failed"); /* should never get here */
+ }
+}
+
+void
+makecontext(ucontext_t *ucp, void (*start)(), int argc, ...)
+{
+ va_list ap;
+ char *stack_top;
+ intptr_t *argp;
+ int i;
+
+ if (ucp == NULL)
+ return;
+ else if ((ucp->uc_stack.ss_sp == NULL) ||
+ (ucp->uc_stack.ss_size < MINSIGSTKSZ)) {
+ /*
+ * This should really return -1 with errno set to ENOMEM
+ * or something, but the spec says that makecontext is
+ * a void function. At least make sure that the context
+ * isn't valid so it can't be used without an error.
+ */
+ ucp->uc_mcsize = 0;
+ }
+ /* XXX - Do we want to sanity check argc? */
+ else if ((argc < 0) || (argc > NCARGS)) {
+ ucp->uc_mcsize = 0;
+ }
+ /* Make sure the context is valid. */
+ else {
+ /*
+ * Arrange the stack as follows:
+ *
+ * _ctx_start() - context start wrapper
+ * start() - user start routine
+ * arg1 - first argument, aligned(16)
+ * ...
+ * argn
+ * ucp - this context, %rbp points here
+ *
+ * When the context is started, control will return to
+ * the context start wrapper which will pop the user
+ * start routine from the top of the stack. After that,
+ * the top of the stack will be setup with all arguments
+ * necessary for calling the start routine. When the
+ * start routine returns, the context wrapper then sets
+ * the stack pointer to %rbp which was setup to point to
+ * the base of the stack (and where ucp is stored). It
+ * will then call _ctx_done() to swap in the next context
+ * (uc_link != 0) or exit the program (uc_link == 0).
+ */
+ mcontext_t mc;
+
+ stack_top = (char *)(ucp->uc_stack.ss_sp +
+ ucp->uc_stack.ss_size - sizeof(intptr_t));
+
+
+ /* Give 6 stack slots to _ctx_start */
+ int minargc = 6;
+ if (argc > minargc)
+ minargc = argc;
+
+ /*
+ * Adjust top of stack to allow for 3 pointers (return
+ * address, _ctx_start, and ucp) and argc arguments.
+ * We allow the arguments to be pointers also. The first
+ * argument to the user function must be properly aligned.
+ */
+
+ stack_top = stack_top - (sizeof(intptr_t) * (1 + minargc));
+ stack_top = (char *)((intptr_t)stack_top & ~15);
+ stack_top = stack_top - (2 * sizeof(intptr_t));
+ argp = (intptr_t *)stack_top;
+
+ /*
+ * Setup the top of the stack with the user start routine
+ * followed by all of its aguments and the pointer to the
+ * ucontext. We need to leave a spare spot at the top of
+ * the stack because setcontext will move rip to the top
+ * of the stack before returning.
+ */
+ *argp = (intptr_t)_ctx_start; /* overwritten with same value */
+ argp++;
+ *argp = (intptr_t)start;
+ argp++;
+
+ /* Add all the arguments: */
+ va_start(ap, argc);
+ for (i = 0; i < argc; i++) {
+ *argp = va_arg(ap, intptr_t);
+ argp++;
+ }
+ va_end(ap);
+
+ /* Always provide space for ctx_start to pop the parameter registers */
+ for (;argc < minargc; argc++) {
+ *argp++ = 0;
+ }
+
+ /* Keep stack aligned */
+ if (argc & 1) {
+ *argp++ = 0;
+ }
+
+ /* The ucontext is placed at the bottom of the stack. */
+ *argp = (intptr_t)ucp;
+
+ /*
+ * Set the machine context to point to the top of the
+ * stack and the program counter to the context start
+ * wrapper. Note that setcontext() pushes the return
+ * address onto the top of the stack, so allow for this
+ * by adjusting the stack downward 1 slot. Also set
+ * %r12 to point to the base of the stack where ucp
+ * is stored.
+ */
+ mc = ucp->uc_mcontext;
+ /* Use callee-save and match _ctx_start implementation */
+ mc->__ss.__r12 = (intptr_t)argp;
+ mc->__ss.__rbp = 0;
+ mc->__ss.__rsp = (intptr_t)stack_top + sizeof(caddr_t);
+ mc->__ss.__rip = (intptr_t)_ctx_start;
+ }
+}
+
+#endif /* __x86_64__ */
--- /dev/null
+/*
+ * Copyright (c) 2007, 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#if defined(__x86_64__)
+
+#define _XOPEN_SOURCE 600L
+#include <ucontext.h>
+#undef _ANSI_SOURCE
+#include <signal.h>
+
+extern int _setcontext(const mcontext_t);
+
+int
+setcontext(const ucontext_t *uctx)
+{
+ mcontext_t mctx = (mcontext_t)&uctx->__mcontext_data;
+ ucontext_t *_uctx = (ucontext_t *)uctx;
+ if (mctx != _uctx->uc_mcontext)
+ _uctx->uc_mcontext = mctx;
+ sigsetmask(uctx->uc_sigmask);
+ return _setcontext(mctx);
+}
+
+#endif /* __x86_64__ */
--- /dev/null
+/*
+ * Copyright (c) 2007, 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/*
+ * Copyright (c) 2001 Daniel M. Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__x86_64__)
+
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/signal.h>
+#include <ucontext.h>
+
+#include <errno.h>
+#include <stddef.h>
+
+#define uc_flags uc_onstack
+#define UCF_SWAPPED 0x80000000
+
+int
+swapcontext(ucontext_t *oucp, const ucontext_t *ucp)
+{
+ int ret;
+
+ if ((oucp == NULL) || (ucp == NULL)) {
+ errno = EINVAL;
+ return (-1);
+ }
+ oucp->uc_flags &= ~UCF_SWAPPED;
+ ret = getcontext(oucp);
+ if ((ret == 0) && !(oucp->uc_flags & UCF_SWAPPED)) {
+ oucp->uc_flags |= UCF_SWAPPED;
+ ret = setcontext(ucp);
+ }
+ return (ret);
+}
+
+#endif /* __x86_64__ */
+++ /dev/null
-
-.PATH: ${.CURDIR}/x86_64/mach
-
-MDSRCS += mach_absolute_time.s
+++ /dev/null
-/*
- * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-#include <machine/cpu_capabilities.h>
-
-
- .text
- .align 2
- .globl _mach_absolute_time
-_mach_absolute_time:
- movq $(_COMM_PAGE_NANOTIME), %rax
- jmp *%rax
MDSRCS += \
init_cpu_capabilities.c \
get_cpu_capabilities.s \
+ pthread_mutex_lock.s \
pthread_set_self.s \
pthread_self.s \
pthread_getspecific.s \
--- /dev/null
+/*
+ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <machine/cpu_capabilities.h>
+
+
+ .text
+ .align 2
+ .globl __commpage_pthread_mutex_lock
+__commpage_pthread_mutex_lock:
+ movq $(_COMM_PAGE_MUTEX_LOCK), %rax
+ jmp *%rax
+
# Long double is 80 bits
-GDTOA_FBSDSRCS+=gdtoa_strtopx.c machdep_ldisx.c
+GDTOA_FBSDSRCS+=gdtoa-strtopx.c machdep_ldisx.c
strncpy.s \
strncmp.s \
memcmp.s \
- bcmp.s \
memset.s \
ffs.s
+
+SUPPRESSSRCS += bcmp.c
+++ /dev/null
-/*
- * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-/*
- * bcmp() is implemented in memcmp.s, as it is equivalent to memcmp() in OSX.
- * (The two symbols, bcmp and memcmp, have the same value.)
- * This empty file is here to prevent the Free BSD machine independent version
- * from building.
- */
LEAF(_bzero,0)
movq $(_COMM_PAGE_BZERO), %rax
jmp *%rax
+
+X_LEAF(___bzero, _bzero)
cmova %rcx,%rax // %eax = max(LHS offset, RHS offset);
movl $4096,%ecx
subl %eax,%ecx // get #bytes to next page crossing
- cmpl %edx,%ecx // will operand run out first?
+ cmpq %rdx,%rcx // will operand run out first?
cmova %edx,%ecx // get min(length remaining, bytes to page end)
movl %ecx,%eax
shrl $4,%ecx // get #chunks till end of operand or page
+++ /dev/null
-/*
- * Copyright (c) 1999-2005 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-/*
- * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved
- *
- * File: SYS.h
- *
- * Definition of the user side of the UNIX system call interface
- * for x86-64.
- *
- * HISTORY
- * 12-3-92 Bruce Martin (Bruce_Martin@next.com)
- * Created.
- */
-
-/*
- * Headers
- */
-#include <sys/syscall.h>
-#include <architecture/i386/asm_help.h>
-#include <mach/i386/syscall_sw.h>
-
-#define UNIX_SYSCALL_SYSCALL \
- movq %rcx, %r10 ;\
- syscall
-
-/*
- * This is the same as UNIX_SYSCALL, but it can call an alternate error
- * return function. It's generic to support potential future callers.
- */
-#define UNIX_SYSCALL_ERR(name, nargs,error_ret) \
- .globl error_ret ;\
-LEAF(_##name, 0) ;\
- movl $ SYSCALL_CONSTRUCT_UNIX(SYS_##name), %eax ;\
- UNIX_SYSCALL_SYSCALL ;\
- jnb 2f ;\
- BRANCH_EXTERN(error_ret) ;\
-2:
-
-#define UNIX_SYSCALL(name, nargs) \
- .globl cerror ;\
-LEAF(_##name, 0) ;\
- movl $ SYSCALL_CONSTRUCT_UNIX(SYS_##name), %eax ;\
- UNIX_SYSCALL_SYSCALL ;\
- jnb 2f ;\
- BRANCH_EXTERN(cerror) ;\
-2:
-
-#define UNIX_SYSCALL_NONAME(name, nargs) \
- .globl cerror ;\
- movl $ SYSCALL_CONSTRUCT_UNIX(SYS_##name), %eax ;\
- UNIX_SYSCALL_SYSCALL ;\
- jnb 2f ;\
- BRANCH_EXTERN(cerror) ;\
-2:
-
-#define PSEUDO(pseudo, name, nargs) \
-LEAF(_##pseudo, 0) ;\
- UNIX_SYSCALL_NONAME(name, nargs)
*/
/* Copyright 1998 Apple Computer, Inc. */
-#include <SYS.h>
+#include <architecture/i386/asm_help.h>
#define __APPLE_API_PRIVATE
#include <machine/cpu_capabilities.h>
*/
#include <architecture/i386/asm_help.h>
-#include <SYS.h>
#define JB_RBX 0
#define JB_RBP 8
#define JB_FPCONTROL 76
#define JB_MASK 80
#define JB_SAVEMASK 84 // sigsetjmp/siglongjmp only
+#define JB_ONSTACK 88
+#define STACK_SSFLAGS 16 // offsetof(stack_t, ss_flags)
LEAF(_sigsetjmp, 0)
// %rdi is sigjmp_buf * jmpbuf;
CALL_EXTERN(_sigprocmask)
popq %rax // Save the mask
addq $8, %rsp // Restore the stack to before we align it
- popq %rdi // jmp_buf (struct sigcontext *)
- movq %rax, JB_MASK(%rdi)
+ movq (%rsp), %rdi // jmp_buf (struct sigcontext *). Leave pointer on the stack for _sigaltstack call)
+ movl %eax, JB_MASK(%rdi)
+
+ // Get current sigaltstack status (stack_t)
+ subq $32, %rsp // 24 bytes for a stack_t, + 8 for the jmp_buf pointer, + 8 is correctly aligned
+ movq %rsp, %rsi // oss
+ xorq %rdi, %rdi // ss == NULL
+ CALL_EXTERN(_sigaltstack) // sigaltstack(NULL, oss)
+ movl STACK_SSFLAGS(%rsp), %eax // oss.ss_flags
+ movq 32(%rsp), %rdi // jmpbuf (will be first argument to subsequent call)
+ movl %eax, JB_ONSTACK(%rdi) // Store ss_flags in jmpbuf
+ addq $40, %rsp // restore %rsp
+
L_do__setjmp:
BRANCH_EXTERN(__setjmp)
movq %rsp, %rsi // set = address where we stored the mask
xorq %rdx, %rdx // oset = NULL
CALL_EXTERN_AGAIN(_sigprocmask)
- addq $8, %rsp
- popq %rsi // Restore the value
- popq %rdi // Restore the jmp_buf
+
+ // Restore sigaltstack status
+ movq 16(%rsp), %rdi // Grab jmpbuf but leave it on the stack
+ movl JB_ONSTACK(%rdi), %edi // Pass old state to _sigunaltstack()
+ CALL_EXTERN(__sigunaltstack)
+ addq $8, %rsp // Restore stack
+ popq %rsi
+ popq %rdi // Pass jmpbuf to _longjmp
+
L_do__longjmp:
BRANCH_EXTERN(__longjmp) // else
END(_longjmp)