X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/39236c6e673c41db228275375ab7fdb0f837b292..HEAD:/bsd/dev/unix_startup.c diff --git a/bsd/dev/unix_startup.c b/bsd/dev/unix_startup.c index 0f36920f0..474da06aa 100644 --- a/bsd/dev/unix_startup.c +++ b/bsd/dev/unix_startup.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2010 Apple Inc. All rights reserved. + * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -34,6 +34,7 @@ #include +#include #include #include @@ -41,7 +42,6 @@ #include #include #include -#include #include #include #include @@ -52,19 +52,19 @@ #include #include #include +#include extern uint32_t kern_maxvnodes; extern vm_map_t mb_map; -#if INET || INET6 +#if INET extern uint32_t tcp_sendspace; extern uint32_t tcp_recvspace; #endif void bsd_bufferinit(void); -extern void md_prepare_for_shutdown(int, int, char *); -unsigned int bsd_mbuf_cluster_reserve(boolean_t *); +unsigned int bsd_mbuf_cluster_reserve(boolean_t *); void bsd_scale_setup(int); void bsd_exec_setup(int); @@ -72,25 +72,27 @@ void bsd_exec_setup(int); * Declare these as initialized data so we can patch them. */ -#ifdef NBUF +#ifdef NBUF int max_nbuf_headers = NBUF; int niobuf_headers = (NBUF / 2) + 2048; -int nbuf_hashelements = NBUF; -int nbuf_headers = NBUF; +int nbuf_hashelements = NBUF; +int nbuf_headers = NBUF; #else int max_nbuf_headers = 0; int niobuf_headers = 0; -int nbuf_hashelements = 0; -int nbuf_headers = 0; +int nbuf_hashelements = 0; +int nbuf_headers = 0; #endif -SYSCTL_INT (_kern, OID_AUTO, nbuf, CTLFLAG_RD | CTLFLAG_LOCKED, &nbuf_headers, 0, ""); -SYSCTL_INT (_kern, OID_AUTO, maxnbuf, CTLFLAG_RW | CTLFLAG_LOCKED, &max_nbuf_headers, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, nbuf, CTLFLAG_RD | CTLFLAG_LOCKED, &nbuf_headers, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, maxnbuf, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_KERN, &max_nbuf_headers, 0, ""); __private_extern__ int customnbuf = 0; -int serverperfmode = 0; /* Flag indicates a server boot when set */ int ncl = 0; +/* Indicates a server boot when set */ +TUNABLE(int, serverperfmode, "serverperfmode", 0); + #if SOCKETS static unsigned int mbuf_poolsz; #endif @@ -109,91 +111,106 @@ bsd_startupearly(void) kern_return_t ret; /* clip the number of buf headers upto 16k */ - if (max_nbuf_headers == 0) - max_nbuf_headers = atop_kernel(sane_size / 50); /* Get 2% of ram, but no more than we can map */ - if ((customnbuf == 0) && (max_nbuf_headers > 16384)) + if (max_nbuf_headers == 0) { + max_nbuf_headers = (int)atop_kernel(sane_size / 50); /* Get 2% of ram, but no more than we can map */ + } + if ((customnbuf == 0) && ((unsigned int)max_nbuf_headers > 16384)) { max_nbuf_headers = 16384; - if (max_nbuf_headers < CONFIG_MIN_NBUF) + } + if (max_nbuf_headers < CONFIG_MIN_NBUF) { max_nbuf_headers = CONFIG_MIN_NBUF; + } /* clip the number of hash elements to 200000 */ - if ( (customnbuf == 0 ) && nbuf_hashelements == 0) { - nbuf_hashelements = atop_kernel(sane_size / 50); - if (nbuf_hashelements > 200000) + if ((customnbuf == 0) && nbuf_hashelements == 0) { + nbuf_hashelements = (int)atop_kernel(sane_size / 50); + if ((unsigned int)nbuf_hashelements > 200000) { nbuf_hashelements = 200000; - } else + } + } else { nbuf_hashelements = max_nbuf_headers; + } if (niobuf_headers == 0) { - if (max_nbuf_headers < 4096) + if (max_nbuf_headers < 4096) { niobuf_headers = max_nbuf_headers; - else + } else { niobuf_headers = (max_nbuf_headers / 2) + 2048; + } } - if (niobuf_headers < CONFIG_MIN_NIOBUF) + if (niobuf_headers < CONFIG_MIN_NIOBUF) { niobuf_headers = CONFIG_MIN_NIOBUF; + } size = (max_nbuf_headers + niobuf_headers) * sizeof(struct buf); size = round_page(size); ret = kmem_suballoc(kernel_map, - &firstaddr, - size, - FALSE, - VM_FLAGS_ANYWHERE, - &bufferhdr_map); - - if (ret != KERN_SUCCESS) + &firstaddr, + size, + FALSE, + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_FILE, + &bufferhdr_map); + + if (ret != KERN_SUCCESS) { panic("Failed to create bufferhdr_map"); + } ret = kernel_memory_allocate(bufferhdr_map, - &firstaddr, - size, - 0, - KMA_HERE | KMA_KOBJECT); + &firstaddr, + size, + 0, + KMA_HERE | KMA_KOBJECT, + VM_KERN_MEMORY_FILE); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { panic("Failed to allocate bufferhdr_map"); + } buf_headers = (struct buf *) firstaddr; bzero(buf_headers, size); #if SOCKETS { - static const unsigned int maxspace = 128 * 1024; + static const unsigned int maxspace = 128 * 1024; int scale; nmbclusters = bsd_mbuf_cluster_reserve(NULL) / MCLBYTES; -#if INET || INET6 +#if INET if ((scale = nmbclusters / NMBCLUSTERS) > 1) { tcp_sendspace *= scale; tcp_recvspace *= scale; - if (tcp_sendspace > maxspace) + if (tcp_sendspace > maxspace) { tcp_sendspace = maxspace; - if (tcp_recvspace > maxspace) + } + if (tcp_recvspace > maxspace) { tcp_recvspace = maxspace; + } } -#endif /* INET || INET6 */ +#endif /* INET */ } #endif /* SOCKETS */ if (vnodes_sized == 0) { if (!PE_get_default("kern.maxvnodes", &desiredvnodes, sizeof(desiredvnodes))) { /* - * Size vnodes based on memory - * Number vnodes is (memsize/64k) + 1024 + * Size vnodes based on memory + * Number vnodes is (memsize/64k) + 1024 * This is the calculation that is used by launchd in tiger - * we are clipping the max based on 16G + * we are clipping the max based on 16G * ie ((16*1024*1024*1024)/(64 *1024)) + 1024 = 263168; * CONFIG_VNODES is set to 263168 for "medium" configurations (the default) - * but can be smaller or larger. + * but can be smaller or larger. */ - desiredvnodes = (sane_size/65536) + 1024; + desiredvnodes = (int)(sane_size / 65536) + 1024; #ifdef CONFIG_VNODES - if (desiredvnodes > CONFIG_VNODES) - desiredvnodes = CONFIG_VNODES; + if (desiredvnodes > CONFIG_VNODES) { + desiredvnodes = CONFIG_VNODES; + } #endif } vnodes_sized = 1; @@ -215,14 +232,17 @@ bsd_bufferinit(void) #if SOCKETS ret = kmem_suballoc(kernel_map, - (vm_offset_t *) & mbutl, - (vm_size_t) (nmbclusters * MCLBYTES), - FALSE, - VM_FLAGS_ANYWHERE, - &mb_map); - - if (ret != KERN_SUCCESS) + (vm_offset_t *) &mbutl, + (vm_size_t) (nmbclusters * MCLBYTES), + FALSE, + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_MBUF, + &mb_map); + + if (ret != KERN_SUCCESS) { panic("Failed to allocate mb_map\n"); + } #endif /* SOCKETS */ /* @@ -233,11 +253,11 @@ bsd_bufferinit(void) /* 512 MB (K32) or 2 GB (K64) hard limit on size of the mbuf pool */ #if !defined(__LP64__) -#define MAX_MBUF_POOL (512 << MBSHIFT) +#define MAX_MBUF_POOL (512 << MBSHIFT) #else -#define MAX_MBUF_POOL (2ULL << GBSHIFT) +#define MAX_MBUF_POOL (2ULL << GBSHIFT) #endif /* !__LP64__ */ -#define MAX_NCL (MAX_MBUF_POOL >> MCLSHIFT) +#define MAX_NCL (MAX_MBUF_POOL >> MCLSHIFT) #if SOCKETS /* @@ -254,8 +274,9 @@ bsd_mbuf_cluster_reserve(boolean_t *overridden) static boolean_t was_overridden = FALSE; /* If called more than once, return the previously calculated size */ - if (mbuf_poolsz != 0) + if (mbuf_poolsz != 0) { goto done; + } /* * Some of these are parsed in parse_bsd_args(), but for x86 we get @@ -263,50 +284,54 @@ bsd_mbuf_cluster_reserve(boolean_t *overridden) * to correctly compute the size of the low-memory VM pool. It is * redundant but rather harmless. */ - (void) PE_parse_boot_argn("ncl", &ncl, sizeof (ncl)); - (void) PE_parse_boot_argn("mbuf_pool", &mbuf_pool, sizeof (mbuf_pool)); + (void) PE_parse_boot_argn("ncl", &ncl, sizeof(ncl)); + (void) PE_parse_boot_argn("mbuf_pool", &mbuf_pool, sizeof(mbuf_pool)); /* * Convert "mbuf_pool" from MB to # of 2KB clusters; it is * equivalent to "ncl", except that it uses different unit. */ - if (mbuf_pool != 0) + if (mbuf_pool != 0) { ncl = (mbuf_pool << MBSHIFT) >> MCLSHIFT; + } - if (sane_size > (64 * 1024 * 1024) || ncl != 0) { - - if (ncl || serverperfmode) + if (sane_size > (64 * 1024 * 1024) || ncl != 0) { + if (ncl || serverperfmode) { was_overridden = TRUE; + } - if ((nmbclusters = ncl) == 0) { + if ((nmbclusters = ncl) == 0) { /* Auto-configure the mbuf pool size */ - nmbclusters = mbuf_default_ncl(serverperfmode, sane_size); + nmbclusters = mbuf_default_ncl(mem_actual); } else { /* Make sure it's not odd in case ncl is manually set */ - if (nmbclusters & 0x1) + if (nmbclusters & 0x1) { --nmbclusters; + } /* And obey the upper limit */ - if (nmbclusters > MAX_NCL) + if (nmbclusters > MAX_NCL) { nmbclusters = MAX_NCL; + } } - /* Round it down to nearest multiple of 4KB clusters */ - nmbclusters = P2ROUNDDOWN(nmbclusters, NCLPBG); + /* Round it down to nearest multiple of PAGE_SIZE */ + nmbclusters = (unsigned int)P2ROUNDDOWN(nmbclusters, NCLPG); } mbuf_poolsz = nmbclusters << MCLSHIFT; done: - if (overridden) + if (overridden) { *overridden = was_overridden; + } - return (mbuf_poolsz); + return mbuf_poolsz; } #endif #if defined(__LP64__) extern int tcp_tcbhashsize; extern int max_cached_sock_count; -#endif +#endif void @@ -316,36 +341,47 @@ bsd_scale_setup(int scale) if ((scale > 0) && (serverperfmode == 0)) { maxproc *= scale; maxprocperuid = (maxproc * 2) / 3; + if (scale > 2) { + maxfiles *= scale; + maxfilesperproc = maxfiles / 2; + } } /* Apply server scaling rules */ - if ((scale > 0) && (serverperfmode !=0)) { + if ((scale > 0) && (serverperfmode != 0)) { maxproc = 2500 * scale; hard_maxproc = maxproc; /* no fp usage */ - maxprocperuid = (maxproc*3)/4; + maxprocperuid = (maxproc * 3) / 4; maxfiles = (150000 * scale); - maxfilesperproc = maxfiles/2; + maxfilesperproc = maxfiles / 2; desiredvnodes = maxfiles; vnodes_sized = 1; + tcp_tfo_backlog = 100 * scale; if (scale > 4) { - /* clip them at 32G level */ + /* clip somaxconn at 32G level */ somaxconn = 2048; - /* 64G or more the hash size is 32k */ + /* + * For scale > 4 (> 32G), clip + * tcp_tcbhashsize to 32K + */ + tcp_tcbhashsize = 32 * 1024; + if (scale > 7) { /* clip at 64G level */ - tcp_tcbhashsize = 16 *1024; max_cached_sock_count = 165000; } else { - tcp_tcbhashsize = 32 *1024; - max_cached_sock_count = 60000 + ((scale-1) * 15000); + max_cached_sock_count = 60000 + ((scale - 1) * 15000); } } else { - somaxconn = 512*scale; - tcp_tcbhashsize = 4*1024*scale; - max_cached_sock_count = 60000 + ((scale-1) * 15000); + somaxconn = 512 * scale; + tcp_tcbhashsize = 4 * 1024 * scale; + max_cached_sock_count = 60000 + ((scale - 1) * 15000); } } + + if (maxproc > hard_maxproc) { + hard_maxproc = maxproc; + } #endif bsd_exec_setup(scale); } -