# $Copyright:	$
# Copyright (c) 1984, 1985, 1986, 1987, 1988, 1989, 1990 
# Sequent Computer Systems, Inc.   All rights reserved.
#  
# This software is furnished under a license and may be used
# only in accordance with the terms of that license and with the
# inclusion of the above copyright notice.   This software may not
# be provided or otherwise made available to, or used by, any
# other person.  No title to or ownership of the software is
# hereby transferred.
#
# $Header: config 1.21 90/10/29 $
#
space {
	csysc_space.c
	led_space.c
	mmap_space.c
	panics_space.c
	param_space.c
	stats_space.c
	vec_space.c
	autop_space.c
}

#
#  Kernel configuration parameters and
#  system parameter formulae.
#

declare [10] {				# Declare constants
  K		int {"The value of 1K (1024).  This should NOT be changed."}
}

set {
  K		{1024}			# Must not change
}

#
#  Table sizing heuristics are based on the "maxusers" parameter and a
#  modifier to more closely match a typical "timeshare", "parallel/research",
#  or "commercial" system.
#
#  The values of the parameters declared below can be changed by use of
#  the appropriate "new_default," "adjust," or "set" directives in the
#  site.<configuration> file, to compensate for the heuristics when
#  they aren't optimal for a given system.
#

declare [1] {
  BUFPCT     int {"%free memory given to buffer cache"}
}

declare [3] {
  PROC_MULT  int {"average # processes per user (rough)"}
  FIFO_DIV   int {"average # processes per pipe"}
  INODE_MULT int {"scaling factor for average #of inodes/process (see NINODE)"}
  INODE_DIV  int {"scaling divisor for average #of inodes/process (see NINODE)"}
  FILE_MULT  int {"scaling factor for average #of files/process (see NFILE)"}
  FILE_DIV   int {"scaling divisor for average #of files/process (see NFILE)"}
  MFILE_MULT int {"scaling factor for average #of mapped-file extents/process (see NMFILE)"}
  MFILE_DIV  int {"scaling divisor for average #of mapped-file extents/process (see NMFILE)"}
  FLINO_MULT int {"average #of lockable files per user"}
  FILCK_MULT int {"average #of locks per lockable file"}
  #
  #  Tuneable paging parameters.
  #  maxRS, vt_maxRS are adjusted when system comes up to insure fit in memory.
  #
  AVGSZ_PROC int {"average process size(K); used to size Usrptmap[]"}
  DEF_MINRS  int {"size(K) default min Rset"}
}

ifdef (TIMESHARE) {

#
#  Timeshare systems assume a large number of users with medium number
#  of moderate size processes each, working on mostly unrelated files.
#  Little use of file-record-locking is made, and memory consumption
#  by processes outweighs the buf-cache.
#

default {
  BUFPCT	{25; 5..95}	# %free memory given to buffer cache
  PROC_MULT	{ 8; 1..}	# average # processes per user (rough)
  FIFO_DIV	{ 4; 1..}	# average # processes per pipe
  INODE_MULT	{ 1; 1..}	# INODE_MULT / INODE_DIV average # ...
  INODE_DIV	{ 1; 1..}	#	... inodes per process (rough)
  FILE_MULT	{ 8; 1..}	# FILE_MULT / FILE_DIV average # ...
  FILE_DIV	{ 5; 1..}	#	... files per process (rough)
  MFILE_MULT	{ 1; 1..}	# MFILE_MULT / MFILE_DIV average # ...
  MFILE_DIV	{ 2; 1..}	#	... mapped-file extents per process
  FLINO_MULT	{ 1; 1..}	# average # lockable files per user (rough)
  FILCK_MULT	{ 1; 1..}	# average # locks per lockable file (rough)
  #
  #  Tuneable paging parameters.
  #  maxRS, vt_maxRS are adjusted when system comes up to insure fit in memory.
  #
  AVGSZ_PROC	{512; 4..}	# average process size (K); sizes Usrptmap[]
  DEF_MINRS	{64;  4..}	# size(K) default min Rset
}

} elsifdef (PARALLEL) {

#
#  Parallel/research systems assume a relatively small number of users
#  (maxusers <= 32 or so) each potentially using many large processes.  The
#  system must be able to execute a 30-process parallel application of nearly
#  full-sized processes without swapping due to limited page-table mapping
#  resource (Usrptmap[]).  Buf-cache size isn't as critical as available free
#  memory for the parallel program(s).  Little/no use of file-record-locking is
#  made.  Such systems may not be suitable for large parallel makes, since they
#  are tuned for large shared-memory programs (few large processes).
# 
#  4K page size allows over-allocate Usrptmap (AVGSZ_PROC) without
#  consuming much kernel memory.
#

default {
  BUFPCT	{10; 5..95}	# %free memory given to buffer cache
  PROC_MULT	{16; 1..}	# average # processes per user (rough)
  FIFO_DIV	{ 4; 1..}	# average # processes per pipe
  INODE_MULT	{ 1; 1..}	# INODE_MULT / INODE_DIV average # ...
  INODE_DIV	{ 1; 1..}	#	... inodes per process (rough)
  FILE_MULT	{ 8; 1..}	# FILE_MULT / FILE_DIV average # ...
  FILE_DIV	{ 5; 1..}	#	... files per process (rough)
  MFILE_MULT	{ 1; 1..}	# MFILE_MULT / MFILE_DIV average # ...
  MFILE_DIV	{ 1; 1..}	#	... mapped-file extents per process
  FLINO_MULT	{ 1; 1..}	# average # lockable files per user (rough)
  FILCK_MULT	{ 1; 1..}	# average # locks per lockable file (rough)
  #
  #  Tuneable paging parameters.
  #  maxRS, vt_maxRS are adjusted when system comes up to insure fit in memory.
  #
  AVGSZ_PROC	{64*K; 4..}	# average process size (K); sizes Usrptmap[]
  DEF_MINRS	{256;  4..}	# size(K) default min Rset
}

} elsifdef (COMMERCIAL) {

#
#  Commercial systems assume a large number of users with smaller number
#  of processes each, working on many mostly small files.  Processes tend to
#  be fairly small and share a small number of executable binaries.  There
#  is greater use of file-locking and due to lots of sharing of pages in the
#  binaries, a higher minRS is reasonable. Assume commercial implies use of
#  databases, which bypass the buffer cache, so use a smaller value.
#

default {
  BUFPCT	{10; 5..95}	# %free memory given to buffer cache
  PROC_MULT	{ 5; 1..}	# average # processes per user (rough)
  FIFO_DIV	{ 4; 1..}	# average # processes per pipe
  INODE_MULT	{ 8; 1..}	# INODE_MULT / INODE_DIV average # ...
  INODE_DIV	{ 5; 1..}	#	... inodes per process (rough)
  FILE_MULT	{ 2; 1..}	# FILE_MULT / FILE_DIV average # ...
  FILE_DIV	{ 1; 1..}	#	... files per process (rough)
  MFILE_MULT	{ 1; 1..}	# MFILE_MULT / MFILE_DIV average # ...
  MFILE_DIV	{ 4; 1..}	#	... mapped-file extents per process
  FLINO_MULT	{ 8; 1..}	# average # lockable files per user (rough)
  FILCK_MULT	{ 8; 1..}	# average # locks per lockable file (rough)
  #
  #  Tuneable paging parameters.
  #  maxRS, vt_maxRS are adjusted when system comes up to insure fit in memory.
  #
  AVGSZ_PROC	{400; 4..}	# average process size (K); sizes Usrptmap[]
  DEF_MINRS	{128; 4..}	# size(K) default min Rset
}

} else {

  fatal ("ENVIRONMENT must be one of: TIMESHARE, PARALLEL, COMMERCIAL.")

}

#
#  Determine defaults for various table sizes and other parameters.
#

declare [1] {
  MAXUP		int {"max #of processes per non-root user"}
}

declare [3] {
  NPROC		int {"#of process slots"}
  NFIFO		int {"#of FIFOs"}
  FIFOSANITY	int {"Minimum number of FIFOs for the system"}
  NINODE	int {"#of concurrently active inodes"}
  NFILE		int {"#of concurrently open files"}
  NMFILE	int {"#of mapped file extents"}

  NMOUNT	int {"#of mount table entries"}
  NOFILES	int {"Max #of file descriptors per process"}
  NOFILETAB	int {"#of file descriptor tables"}
  NFILCK	int {"Number of record locks"}
}

default {
  MAXUP		{100; 2..}		# #of non root processes (per user)
  NPROC		{20 + PROC_MULT * MAXUSERS; 10..};	# #of process slots
  NFIFO		{NPROC / FIFO_DIV; FIFOSANITY..}	# #of FIFOs
  FIFOSANITY	{2; 2..}				# min #of FIFOs
  NINODE	{(INODE_MULT * NPROC) / INODE_DIV + MAXUSERS + 48; 48..}
					# #of concurrently active inodes
  NFILE		{FILE_MULT * (NPROC+16+MAXUSERS) / FILE_DIV + 32; 32..}
					# #of concurrently open files
  NMFILE	{MFILE_MULT * NPROC / MFILE_DIV; 0..}
					# default #of mapped file extents
  NMOUNT	{32; 4..}		# #of mount table entries
  NOFILES	{64; 20..}		# Max #of file descriptors per process
  NOFILETAB	{NPROC; NPROC..}	# #of file descriptor tables
  NFILCK	{((MAXUSERS*FLINO_MULT) + 50)*FILCK_MULT; 20..}
					# #of record locks
}

declare [5] {
  NCMULT	int {"Scaling factor for name cache size (see NCSIZE)"}
  NCDIV		int {"Scaling divisor for name cache size (see NCSIZE)"}
  NCSIZE	int {"Number of directory name lookup cache entries"}
  NCHSIZE	int {"Number of directory name lookup cache hash entries"}
  NCALL		int {"Maximum number of active timeout events"}
  NUCRED	int {"#of user-credential structures"}
  MAXSYMLINKS	int {"Maximum #of symbolic links to follow in a pathname"}
  TICKADJ	int {"Time clock is adjusted on each tick for adjtime"}
}

default {
  NCMULT	{1; 1..}
  NCDIV		{2; 1..}
  NCSIZE	{NINODE * NCMULT / NCDIV; 10..}
					# #of name cache entries
  NCHSIZE	{NCSIZE / 4; 4..}	# #of name cache hash entries
  NCALL		{16 + NPROC; 16 + NPROC..}
					# maximum #of active timeout events
  NUCRED	{NPROC; NPROC..}	# #of user-credential structures
  MAXSYMLINKS	{8; 1..}		# Maximum number of symbolic links that
					# may be expanded in a path name.
					# Should be set high enough to allow
					# all legitimate uses, but halt
					# infinite loops reasonably quickly.
  TICKADJ	{40; 1..10000}		# can adjust 24ms in 60s
}

#
#  Variables defaulted here to zero are initialized at bootstrap time
#  to values dependent on memory size.
#

declare [5] {
  NBUF	  int {"If non-zero, overrides BUFPCT default for #of I/O buffers"}
  NPBUF	  int {"If non-zero, overrides default #of physical I/O buffers"}
  NHBUF	  int {"If non-zero, overrides default #of hash buffers"}
}

default {
  NBUF		{0; 0..}
  NPBUF		{0; 0..256}
  NHBUF		{0; 0..}
}

#
# Asynchronous I/O parameters.  These values are defaulted to zero, which
# removes all of the resources for Asynchronous I/O.  To enable
# asynchronous I/O the values must be set or adjusted to be non-zero.
#

declare [7] {
  NABUF	  int {"Number of async I/O buffers, zero disables async I/O"}
  MAXAIO  int {"Maximum number of pending async I/O request per process"}
}

default {
  NABUF		{50; 0..}
  MAXAIO	{10; 0..NABUF}
}

#
#  Streams stuff.
# 
#  These numbers basically assume that the base OS is the only
#  user of streams (i.e. primarily TTYs and PTYs).  The distribution
#  for buffers has been done with this in mind.  Each layered product
#  which uses streams resources should arrange to bump these numbers
#  as appropriate for its usage when its installed.
#

declare [5] {
  TTYMULT  int {"Number of ttys/ptys per user"}
  NSTREAM  int {"Parameter for streams resources (e.g. NBLK4, NQUEUE, etc.)"}
}

default {
  TTYMULT 	{4; 1..}		# number of ttys/ptys per user
  NSTREAM	{10 + TTYMULT * MAXUSERS; 10+MAXUSERS..}
}					# number of streams

# 
#  The following should only be changed by a layered product if
#  it deals with streams with larger number of modules pushed.
#  Note that too many modules pushed on as stream may lead to
#  an overflow of kernel stack due to nested put procedure calls.
#

declare [7] {
  NSTRPUSH	int {"Maximum number of modules per stream"}
}

default {
  NSTRPUSH 	{6; 4..}		# max #of modules per stream
}

#
# The following should be an even number since queues come in pairs.
#  2 queues for stream head, 2 for driver and 4 for a couple of pushed
#  modules for each stream leads to a factor of 8 per stream.  If a 
#  layered product expects a lots of streams with more than 2 modules
#  per stream then this number should be appropriately bumped.
#

declare [7] {
  NQUEUE	int {"Number of stream queues"}
}

default {
  NQUEUE 	{8 * NSTREAM; NSTREAM..}	# number of stream queues
}

#
#  The following decides the link table size.
#  Each I_LINK call adds an entry into the link table and each I_UNLINK
#  call deletes one.  Thus the table size should be > maximum number
#  of streams used to link drivers together.  The following value is
#  much too conservative but has the benefit of guaranteeing that
#  the table will never be overrun (each entry is pretty small).  
#  Thus layered products should not bump this value.
#

declare [7] {
  NMUXLINK	int {"Number of multiplexor links"}
}

default {
  NMUXLINK 	{NSTREAM; NSTREAM..}	# number of multiplexor links
}

#
#  The following is probably conservative, since it can deal with
#  a bufcall() cell for each server + a poll()/sigset() cell per stream.
#

declare [7] {
  NSTREVENT	int {"Number of event cells"}
}

default {
  NSTREVENT 	{NQUEUE; NQUEUE..}	# number of event cells
}

# 
#  The following percentages decide the cutoff points for allocb()
#  calls at low and medium priorities.  If higher priority traffic
#  is getting choked then these numbers are probably too high.  If
#  there is not enough high priority traffic and buffers are denied
#  to lower priority requests, then these numbers are probably too
#  low.
#

declare [7] {
  STRLOFRAC 	int {"Low priority alloc percentage"}
  STRMEDFRAC 	int {"Medium priority alloc percentage"}
}

default {
  STRLOFRAC 	{80; 30..100}		# low priority alloc percentage
  STRMEDFRAC 	{90; 70..100}		# medium priority alloc percentage
}

# 
#  The following is reasonable for TTY/PTYs, but layered products
#  should probably re-examine it carefully.  Too large a value may
#  waste too much memory for ordinary write()s.
#

declare [7] {
  STRMSGSZ 	int {"Maximum streams message size"}
  STRCTLSZ 	int {"Maximum streams control message size"}
}

default {
  STRMSGSZ 	{4096; 0..}		# maximum message size
  STRCTLSZ 	{1024; 0..}		# maximum control message size
}

# 
#  The following basically decides how much work gets done by a single
#  invocation to runqueues().  In the current software interrupt based
#  scheduler, this decides how long the interrupt is going to take.
#

declare [7] {
  STRNSCHED	int {"Maximum #of queues to service during runqueues() call"}
}

default {
  STRNSCHED 	{32; 1..}		# max queues serviced/runqueues() call
}

# 
#  The following buffer allocation numbers and distribution is reasonable
#  for TTY/PTYs, but layered products should probably re-examine them
#  carefully and bump the appropriate numbers accordingly.  Note that
#  for some products these numbers probably should not be based on
#  NSTREAM, but rather, say on available memory (e.g. NFS).
#

declare [7] {
  NBLKPRIV 	int {"Number of private stream buffers"}
  NBLK4096 	int {"Number of 4K stream buffers"}
  NBLK2048 	int {"Number of 2K stream buffers"}
  NBLK1024 	int {"Number of 1K stream buffers"}
  NBLK512 	int {"Number of 512-byte stream buffers"}
  NBLK256 	int {"Number of 256-byte stream buffers"}
  NBLK128	int {"Number of 128-byte stream buffers"}
  NBLK64 	int {"Number of 64-byte stream buffers"}
  NBLK16 	int {"Number of 16-byte stream buffers"}
  NBLK4 	int {"Number of 4-byte stream buffers"}
}

default {
  NBLKPRIV 	{10; 0..}			# number of private buffers
  NBLK4096 	{10 + NSTREAM / 4;  0..}	# number of 4K buffers
  NBLK2048 	{10 + NSTREAM * 1;  0..}	# number of 2K buffers
  NBLK1024 	{10 + NSTREAM / 4;  0..}	# number of 1K buffers
  NBLK512 	{10 + NSTREAM / 4;  0..}	# number of 512-byte buffers
  NBLK256 	{10 + NSTREAM / 4;  0..}	# number of 256-byte buffers
  NBLK128	{10 + NSTREAM / 4;  0..}	# number of 128-byte buffers
  NBLK64 	{10 + NSTREAM * 1;  0..}	# number of 64-byte buffers
  NBLK16 	{10 + NSTREAM * 4;  0..}	# number of 16-byte buffers
  NBLK4 	{10 + NSTREAM * 16; 0..}	# number of 4-byte buffers
}

#
# Tunable Standards-related parameters
#
# CUSERID_VERSION is used only by cuserid.c in libc (obtained via sysconf()).
#
declare [3] {
  CUSERID_VERSION	int {"0 == Traditional usage, 1 == POSIX.1 usage"}
}

default {
  CUSERID_VERSION	{0;0..1}	#default is Traditional Usage
}
