summaryrefslogtreecommitdiffstats
path: root/rubbos/app/httpd-2.0.64/modules/experimental
diff options
context:
space:
mode:
Diffstat (limited to 'rubbos/app/httpd-2.0.64/modules/experimental')
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/.indent.pro54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUauthldap262
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUcharsetl257
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUdsk_cach261
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUexample256
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmakefile256
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmem_cach265
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmod_cach264
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmoddumpio248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUutilldap266
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/README41
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/README.ldap47
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.c171
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.h112
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.c290
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.h161
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.c290
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.h160
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_storage.c311
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_util.c575
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/charset.conv55
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/config.m439
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.c1117
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.def6
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.c1006
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.dsp168
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.h319
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.imp10
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter.c137
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter_in.c160
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.c1082
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.dsp124
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.c963
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.c215
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_example.c1313
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.c1198
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/modules.mk3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.c1758
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.def7
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.dsp140
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.c450
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.h193
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache_mgr.c762
51 files changed, 16296 insertions, 0 deletions
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/.deps b/rubbos/app/httpd-2.0.64/modules/experimental/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/.indent.pro b/rubbos/app/httpd-2.0.64/modules/experimental/.indent.pro
new file mode 100644
index 00000000..a9fbe9f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/Makefile b/rubbos/app/httpd-2.0.64/modules/experimental/Makefile
new file mode 100644
index 00000000..9d5e211d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/experimental
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/experimental
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/experimental
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/Makefile.in b/rubbos/app/httpd-2.0.64/modules/experimental/Makefile.in
new file mode 100644
index 00000000..7c5c149d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/Makefile.in
@@ -0,0 +1,3 @@
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUauthldap b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUauthldap
new file mode 100644
index 00000000..4963a1dd
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUauthldap
@@ -0,0 +1,262 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(LDAPSDK)/inc \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = authldap
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) LDAP Authentication Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = AuthLDAP Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/authldap.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_auth_ldap.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ lldapsdk \
+ lldapssl \
+ lldapx \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ util_ldap_connection_find \
+ util_ldap_connection_close \
+ util_ldap_connection_unbind \
+ util_ldap_connection_cleanup \
+ util_ldap_cache_checkuserid \
+ util_ldap_cache_compare \
+ util_ldap_cache_comparedn \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ @$(LDAPSDK)/imports/lldapsdk.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ auth_ldap_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+ copy charset.conv $(INSTALL)\Apache2\conf\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUcharsetl b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUcharsetl
new file mode 100644
index 00000000..b4e4f595
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUcharsetl
@@ -0,0 +1,257 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ -DAP_WANT_DIR_TRANSLATION \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = charsetl
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Charset Lite Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = charsetl
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/charsetl.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_charset_lite.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ charset_lite_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUdsk_cach b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUdsk_cach
new file mode 100644
index 00000000..879dd86e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUdsk_cach
@@ -0,0 +1,261 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = dsk_cach
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Memory Cache Sub-Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = dsk_cach
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/dsk_cach.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_disk_cache.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Apache2 \
+ Libc \
+ mod_cach \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @$(APR)/aprlib.imp \
+ @httpd.imp \
+ @mod_cache.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ disk_cache_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUexample b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUexample
new file mode 100644
index 00000000..01b7b85e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUexample
@@ -0,0 +1,256 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = example
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Example Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Example Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/example.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_example.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ example_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmakefile
new file mode 100644
index 00000000..d6584514
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmakefile
@@ -0,0 +1,256 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME =
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION =
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME =
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE =
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/charsetl.nlm \
+ $(OBJDIR)/example.nlm \
+ $(OBJDIR)/moddumpio.nlm \
+ $(OBJDIR)/mod_cach.nlm \
+ $(OBJDIR)/mem_cach.nlm \
+ $(OBJDIR)/dsk_cach.nlm \
+ $(EOLIST)
+
+# If LDAPSDK has been defined then build the auth_ldap module
+ifneq "$(LDAPSDK)" ""
+TARGET_nlm += $(OBJDIR)/authldap.nlm \
+ $(OBJDIR)/utilldap.nlm \
+ $(EOLIST)
+endif
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmem_cach b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmem_cach
new file mode 100644
index 00000000..236d867a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmem_cach
@@ -0,0 +1,265 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ -DDEBUG \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = mem_cach
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Memory Cache Sub-Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = mem_cach
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/mem_cach.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_mem_cache.o \
+ $(OBJDIR)/cache_hash.o \
+ $(OBJDIR)/cache_pqueue.o \
+ $(OBJDIR)/cache_cache.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Apache2 \
+ Libc \
+ mod_cach \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @$(APR)/aprlib.imp \
+ @httpd.imp \
+ @mod_cache.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ mem_cache_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmod_cach b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmod_cach
new file mode 100644
index 00000000..3665b764
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmod_cach
@@ -0,0 +1,264 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ -DDEBUG \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = mod_cach
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Cache module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = mod_cach
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/mod_cach.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/cache_util.o \
+ $(OBJDIR)/cache_storage.o \
+ $(OBJDIR)/mod_cache.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Apache2 \
+ Libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @$(APR)/aprlib.imp \
+ @httpd.imp \
+ @netware.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ @mod_cache.imp \
+ cache_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmoddumpio b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmoddumpio
new file mode 100644
index 00000000..53c74bb5
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmoddumpio
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = moddumpio
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Debugging IO Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = DumpIO Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/moddumpio.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_dumpio.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ dumpio_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUutilldap b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUutilldap
new file mode 100644
index 00000000..376325a2
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUutilldap
@@ -0,0 +1,266 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(LDAPSDK)/inc \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = utilldap
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) LDAP Authentication Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = UtilLDAP Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/utilldap.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/util_ldap.o \
+ $(OBJDIR)/util_ldap_cache.o \
+ $(OBJDIR)/util_ldap_cache_mgr.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ lldapsdk \
+ lldapssl \
+ lldapx \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ @$(LDAPSDK)/imports/lldapsdk.imp \
+ @$(LDAPSDK)/imports/lldapssl.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ ldap_module \
+ util_ldap_connection_find \
+ util_ldap_connection_close \
+ util_ldap_connection_unbind \
+ util_ldap_connection_cleanup \
+ util_ldap_cache_checkuserid \
+ util_ldap_cache_getuserdn \
+ util_ldap_cache_compare \
+ util_ldap_cache_comparedn \
+ util_ldap_ssl_supported \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/README b/rubbos/app/httpd-2.0.64/modules/experimental/README
new file mode 100644
index 00000000..447c16ee
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/README
@@ -0,0 +1,41 @@
+README for Apache 2.0 Example Module
+[April, 1997, updated May 2000]
+
+The files in the src/modules/example directory under the Apache
+distribution directory tree are provided as an example to those that
+wish to write modules that use the Apache API.
+
+The main file is mod_example.c, which illustrates all the different
+callback mechanisms and call syntaces. By no means does an add-on
+module need to include routines for all of the callbacks - quite the
+contrary!
+
+The example module is an actual working module. If you link it into
+your server, enable the "example-handler" handler for a location, and then
+browse to that location, you will see a display of some of the tracing
+the example module did as the various callbacks were made.
+
+To include the example module in your server add --enable-example to the
+other ./configure arguments executed from the httpd-2.0 directory. After
+that run 'make'.
+
+To add another module of your own:
+
+ A. cp modules/experimental/mod_example.c modules/experimental/mod_myexample.c
+ B. Modify the file
+ C. Build the server with --enable--myexample
+
+To activate the example module, include a block similar to the
+following in your httpd.conf file:
+
+ <Location /example-info>
+ SetHandler example-handler
+ </Location>
+
+As an alternative, you can put the following into a .htaccess file and
+then request the file "test.example" from that location:
+
+ AddHandler example-handler .example
+
+After reloading/restarting your server, you should be able to browse
+to this location and see the brief display mentioned earlier.
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/README.ldap b/rubbos/app/httpd-2.0.64/modules/experimental/README.ldap
new file mode 100644
index 00000000..c9445b81
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/README.ldap
@@ -0,0 +1,47 @@
+Quick installation instructions (UNIX):
+
+- Building on generic Unix:
+
+ Add generic ldap support and the TWO ldap modules to the build, like this:
+
+ ./configure --with-ldap --enable-ldap --enable-auth-ldap
+
+ The --with-ldap switches on LDAP library linking in apr-util. Make
+ sure that you have an LDAP client library available such as those
+ from Netscape/iPlanet/Sun One or the OpenLDAP project.
+
+ The --enable-ldap option switches on the LDAP caching module. This
+ module is a support module for other LDAP modules, and is not useful
+ on its own. This module is required, but caching can be disabled
+ via the configuration directive LDAPCacheEntries.
+
+ The --enable-auth-ldap option switches on the LDAP authentication
+ module.
+
+- Building on AIX:
+
+ The following ./configure line is reported to work for AIX:
+
+ CC=cc_r; export CC
+ CPPFLAGS=-qcpluscmt;export CPPFLAGS
+ ./configure --with-mpm=worker --prefix=/usr/local/apache \
+ --enable-dav=static --enable-dav_fs=static --enable-ssl=static
+ --with-ldap=yes --with-ldap-include=/usr/local/include
+ --with-ldap-lib=/usr/local/lib --enable-ldap=static
+ --enable-auth_ldap=static
+
+
+Quick installation instructions (win32):
+
+1. copy the file srclib\apr-util\include\apr_ldap.hw to apr_ldap.h
+2. the netscape/iplanet ldap libraries are installed in srclib\ldap
+3. Compile the two modules util_ldap and mod_auth_ldap using the dsp files
+4. You get a mod_auth_ldap.so and a util_ldap.so module
+5. Put them in the modules directory, don't forget to copy the
+ nsldap32v50.dll somewhere where apache.exe will find it
+6. Load the two modules in your httpd.conf, like below:
+ LoadModule ldap_module modules/util_ldap.so
+ LoadModule auth_ldap_module modules/mod_auth_ldap.so
+7. Configure the directories as described in the docus.
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.c b/rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.c
new file mode 100644
index 00000000..6db98f71
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.c
@@ -0,0 +1,171 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_general.h"
+
+#include "mod_cache.h"
+#include "cache_hash.h"
+#include "cache_pqueue.h"
+#include "cache_cache.h"
+
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#if APR_HAVE_STRING_H
+#include <string.h>
+#endif
+
+struct cache_cache_t {
+ int max_entries;
+ apr_size_t max_size;
+ apr_size_t current_size;
+ int total_purges;
+ long queue_clock;
+ cache_hash_t *ht;
+ cache_pqueue_t *pq;
+ cache_pqueue_set_priority set_pri;
+ cache_pqueue_get_priority get_pri;
+ cache_cache_inc_frequency *inc_entry;
+ cache_cache_get_size *size_entry;
+ cache_cache_get_key *key_entry;
+ cache_cache_free *free_entry;
+};
+
+CACHE_DECLARE(cache_cache_t *)cache_init(int max_entries,
+ apr_size_t max_size,
+ cache_pqueue_get_priority get_pri,
+ cache_pqueue_set_priority set_pri,
+ cache_pqueue_getpos get_pos,
+ cache_pqueue_setpos set_pos,
+ cache_cache_inc_frequency *inc_entry,
+ cache_cache_get_size *size_entry,
+ cache_cache_get_key* key_entry,
+ cache_cache_free *free_entry)
+{
+ cache_cache_t *tmp;
+ tmp = malloc(sizeof(cache_cache_t));
+ tmp->max_entries = max_entries;
+ tmp->max_size = max_size;
+ tmp->current_size = 0;
+ tmp->total_purges = 0;
+ tmp->queue_clock = 0;
+ tmp->get_pri = get_pri;
+ tmp->set_pri = set_pri;
+ tmp->inc_entry = inc_entry;
+ tmp->size_entry = size_entry;
+ tmp->key_entry = key_entry;
+ tmp->free_entry = free_entry;
+
+ tmp->ht = cache_hash_make(max_entries);
+ tmp->pq = cache_pq_init(max_entries, get_pri, get_pos, set_pos);
+
+ return tmp;
+}
+
+CACHE_DECLARE(void) cache_free(cache_cache_t *c)
+{
+ cache_pq_free(c->pq);
+ cache_hash_free(c->ht);
+ free(c);
+}
+
+
+CACHE_DECLARE(void*) cache_find(cache_cache_t* c, const char *key)
+{
+ void *e;
+
+ e = cache_hash_get(c->ht, key, CACHE_HASH_KEY_STRING);
+ if (!e)
+ return NULL;
+
+ return e;
+}
+
+CACHE_DECLARE(void) cache_update(cache_cache_t* c, void *entry)
+{
+ long old_priority;
+ long new_priority;
+
+ old_priority = c->set_pri(c->queue_clock, entry);
+ c->inc_entry(entry);
+ new_priority = c->set_pri(c->queue_clock, entry);
+ cache_pq_change_priority(c->pq, old_priority, new_priority, entry);
+}
+
+CACHE_DECLARE(void) cache_insert(cache_cache_t* c, void *entry)
+{
+ void *ejected = NULL;
+ long priority;
+
+ c->set_pri(c->queue_clock, entry);
+ /* FIX: check if priority of bottom item is greater than inserted one */
+ while ((cache_pq_size(c->pq) >= c->max_entries) ||
+ ((c->current_size + c->size_entry(entry)) > c->max_size)) {
+
+ ejected = cache_pq_pop(c->pq);
+ /* FIX: If ejected is NULL, we'll segfault here */
+ priority = c->get_pri(ejected);
+
+ if (c->queue_clock > priority)
+ c->queue_clock = priority;
+
+ cache_hash_set(c->ht,
+ c->key_entry(ejected),
+ CACHE_HASH_KEY_STRING,
+ NULL);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, "Cache Purge of %s",c->key_entry(ejected));
+ c->current_size -= c->size_entry(ejected);
+ c->free_entry(ejected);
+ c->total_purges++;
+ }
+ c->current_size += c->size_entry(entry);
+
+ cache_pq_insert(c->pq, entry);
+ cache_hash_set(c->ht, c->key_entry(entry), CACHE_HASH_KEY_STRING, entry);
+}
+
+CACHE_DECLARE(void *) cache_pop(cache_cache_t *c)
+{
+ void *entry;
+
+ if (!c)
+ return NULL;
+
+ entry = cache_pq_pop(c->pq);
+
+ if (!entry)
+ return NULL;
+
+ c->current_size -= c->size_entry(entry);
+ cache_hash_set(c->ht, c->key_entry(entry), CACHE_HASH_KEY_STRING, NULL);
+
+ return entry;
+}
+
+CACHE_DECLARE(apr_status_t) cache_remove(cache_cache_t *c, void *entry)
+{
+ apr_size_t entry_size = c->size_entry(entry);
+ apr_status_t rc;
+ rc = cache_pq_remove(c->pq, entry);
+ if (rc != APR_SUCCESS)
+ return rc;
+
+ cache_hash_set(c->ht, c->key_entry(entry), CACHE_HASH_KEY_STRING, NULL);
+ c->current_size -= entry_size;
+
+ return APR_SUCCESS;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.h b/rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.h
new file mode 100644
index 00000000..67189c5f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.h
@@ -0,0 +1,112 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CACHE_CACHE_H
+#define CACHE_CACHE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mod_cache.h"
+
+/**
+ * @file cache_hash.h
+ * @brief Cache Cache Functions
+ */
+
+/**
+ * @defgroup Cache_cache Cache Functions
+ * @ingroup CACHE
+ * @{
+ */
+/** ADT for the cache */
+typedef struct cache_cache_t cache_cache_t;
+
+/** callback to increment the frequency of a item */
+typedef void cache_cache_inc_frequency(void*a);
+/** callback to get the size of a item */
+typedef apr_size_t cache_cache_get_size(void*a);
+/** callback to get the key of a item */
+typedef const char* cache_cache_get_key(void *a);
+/** callback to free an entry */
+typedef void cache_cache_free(void *a);
+
+/**
+ * initialize the cache ADT
+ * @param max_entries the number of entries in the cache
+ * @param max_size the size of the cache
+ * @param get_pri callback to get a priority of a entry
+ * @param set_pri callback to set a priority of a entry
+ * @param get_pos callback to get the position of a entry in the cache
+ * @param set_pos callback to set the position of a entry in the cache
+ * @param inc_entry callback to increment the frequency of a entry
+ * @param size_entry callback to get the size of a entry
+ * @param key_entry callback to get the key of a entry
+ * @param free_entry callback to free an entry
+ */
+CACHE_DECLARE(cache_cache_t *)cache_init(int max_entries,
+ apr_size_t max_size,
+ cache_pqueue_get_priority get_pri,
+ cache_pqueue_set_priority set_pri,
+ cache_pqueue_getpos get_pos,
+ cache_pqueue_setpos set_pos,
+ cache_cache_inc_frequency *inc_entry,
+ cache_cache_get_size *size_entry,
+ cache_cache_get_key *key_entry,
+ cache_cache_free *free_entry);
+
+/**
+ * free up the cache
+ * @param c the cache
+ */
+CACHE_DECLARE(void) cache_free(cache_cache_t *c);
+/**
+ * find a entry in the cache, incrementing the frequency if found
+ * @param c the cache
+ * @param key the key
+ */
+CACHE_DECLARE(void*) cache_find(cache_cache_t* c, const char *key);
+/**
+ * insert a entry into the cache
+ * @param c the cache
+ * @param entry the entry
+ */
+CACHE_DECLARE(void) cache_update(cache_cache_t* c, void *entry);
+/**
+ * insert a entry into the cache
+ * @param c the cache
+ * @param entry the entry
+ */
+CACHE_DECLARE(void) cache_insert(cache_cache_t* c, void *entry);
+/**
+ * pop the lowest priority item off
+ * @param c the cache
+ * @returns the entry or NULL
+ */
+CACHE_DECLARE(void *)cache_pop(cache_cache_t* c);
+/**
+ * remove an item from the cache
+ * @param c the cache
+ * @param entry the actual entry (from a find)
+ */
+CACHE_DECLARE(apr_status_t) cache_remove(cache_cache_t* c, void *entry);
+/** @} */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !CACHE_CACHE_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.c b/rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.c
new file mode 100644
index 00000000..89552a18
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.c
@@ -0,0 +1,290 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_general.h"
+
+#include "mod_cache.h"
+#include "cache_hash.h"
+
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#if APR_HAVE_STRING_H
+#include <string.h>
+#endif
+
+
+/*
+ * The internal form of a hash table.
+ *
+ * The table is an array indexed by the hash of the key; collisions
+ * are resolved by hanging a linked list of hash entries off each
+ * element of the array. Although this is a really simple design it
+ * isn't too bad given that pools have a low allocation overhead.
+ */
+
+typedef struct cache_hash_entry_t cache_hash_entry_t;
+
+struct cache_hash_entry_t {
+ cache_hash_entry_t *next;
+ unsigned int hash;
+ const void *key;
+ apr_ssize_t klen;
+ const void *val;
+};
+
+/*
+ * Data structure for iterating through a hash table.
+ *
+ * We keep a pointer to the next hash entry here to allow the current
+ * hash entry to be freed or otherwise mangled between calls to
+ * cache_hash_next().
+ */
+struct cache_hash_index_t {
+ cache_hash_t *ht;
+ cache_hash_entry_t *this, *next;
+ int index;
+};
+
+/*
+ * The size of the array is always a power of two. We use the maximum
+ * index rather than the size so that we can use bitwise-AND for
+ * modular arithmetic.
+ * The count of hash entries may be greater depending on the chosen
+ * collision rate.
+ */
+struct cache_hash_t {
+ cache_hash_entry_t **array;
+ cache_hash_index_t iterator; /* For cache_hash_first(NULL, ...) */
+ int count, max;
+};
+
+/*
+ * Hash creation functions.
+ */
+static cache_hash_entry_t **alloc_array(cache_hash_t *ht, int max)
+{
+ return calloc(1, sizeof(*ht->array) * (max + 1));
+}
+
+CACHE_DECLARE(cache_hash_t *) cache_hash_make(apr_size_t size)
+{
+ cache_hash_t *ht;
+ ht = malloc(sizeof(cache_hash_t));
+ if (!ht) {
+ return NULL;
+ }
+ ht->count = 0;
+ ht->max = size;
+ ht->array = alloc_array(ht, ht->max);
+ if (!ht->array) {
+ free(ht);
+ return NULL;
+ }
+ return ht;
+}
+
+CACHE_DECLARE(void) cache_hash_free(cache_hash_t *ht)
+{
+ if (ht) {
+ if (ht->array) {
+ free (ht->array);
+ }
+ free (ht);
+ }
+}
+/*
+ * Hash iteration functions.
+ */
+
+CACHE_DECLARE(cache_hash_index_t *) cache_hash_next(cache_hash_index_t *hi)
+{
+ hi->this = hi->next;
+ while (!hi->this) {
+ if (hi->index > hi->ht->max)
+ return NULL;
+ hi->this = hi->ht->array[hi->index++];
+ }
+ hi->next = hi->this->next;
+ return hi;
+}
+
+CACHE_DECLARE(cache_hash_index_t *) cache_hash_first(cache_hash_t *ht)
+{
+ cache_hash_index_t *hi;
+
+ hi = &ht->iterator;
+ hi->ht = ht;
+ hi->index = 0;
+ hi->this = NULL;
+ hi->next = NULL;
+ return cache_hash_next(hi);
+}
+
+CACHE_DECLARE(void) cache_hash_this(cache_hash_index_t *hi,
+ const void **key,
+ apr_ssize_t *klen,
+ void **val)
+{
+ if (key) *key = hi->this->key;
+ if (klen) *klen = hi->this->klen;
+ if (val) *val = (void *)hi->this->val;
+}
+
+
+/*
+ * This is where we keep the details of the hash function and control
+ * the maximum collision rate.
+ *
+ * If val is non-NULL it creates and initializes a new hash entry if
+ * there isn't already one there; it returns an updatable pointer so
+ * that hash entries can be removed.
+ */
+
+static cache_hash_entry_t **find_entry(cache_hash_t *ht,
+ const void *key,
+ apr_ssize_t klen,
+ const void *val)
+{
+ cache_hash_entry_t **hep, *he;
+ const unsigned char *p;
+ unsigned int hash;
+ apr_ssize_t i;
+
+ /*
+ * This is the popular `times 33' hash algorithm which is used by
+ * perl and also appears in Berkeley DB. This is one of the best
+ * known hash functions for strings because it is both computed
+ * very fast and distributes very well.
+ *
+ * The originator may be Dan Bernstein but the code in Berkeley DB
+ * cites Chris Torek as the source. The best citation I have found
+ * is "Chris Torek, Hash function for text in C, Usenet message
+ * <27038@mimsy.umd.edu> in comp.lang.c , October, 1990." in Rich
+ * Salz's USENIX 1992 paper about INN which can be found at
+ * <http://citeseer.nj.nec.com/salz92internetnews.html>.
+ *
+ * The magic of number 33, i.e. why it works better than many other
+ * constants, prime or not, has never been adequately explained by
+ * anyone. So I try an explanation: if one experimentally tests all
+ * multipliers between 1 and 256 (as I did while writing a low-level
+ * data structure library some time ago) one detects that even
+ * numbers are not useable at all. The remaining 128 odd numbers
+ * (except for the number 1) work more or less all equally well.
+ * They all distribute in an acceptable way and this way fill a hash
+ * table with an average percent of approx. 86%.
+ *
+ * If one compares the chi^2 values of the variants (see
+ * Bob Jenkins ``Hashing Frequently Asked Questions'' at
+ * http://burtleburtle.net/bob/hash/hashfaq.html for a description
+ * of chi^2), the number 33 not even has the best value. But the
+ * number 33 and a few other equally good numbers like 17, 31, 63,
+ * 127 and 129 have nevertheless a great advantage to the remaining
+ * numbers in the large set of possible multipliers: their multiply
+ * operation can be replaced by a faster operation based on just one
+ * shift plus either a single addition or subtraction operation. And
+ * because a hash function has to both distribute good _and_ has to
+ * be very fast to compute, those few numbers should be preferred.
+ *
+ * -- Ralf S. Engelschall <rse@engelschall.com>
+ */
+ hash = 0;
+ if (klen == CACHE_HASH_KEY_STRING) {
+ for (p = key; *p; p++) {
+ hash = hash * 33 + *p;
+ }
+ klen = p - (const unsigned char *)key;
+ }
+ else {
+ for (p = key, i = klen; i; i--, p++) {
+ hash = hash * 33 + *p;
+ }
+ }
+
+ /* scan linked list */
+ for (hep = &ht->array[hash % ht->max], he = *hep;
+ he;
+ hep = &he->next, he = *hep) {
+ if (he->hash == hash &&
+ he->klen == klen &&
+ memcmp(he->key, key, klen) == 0)
+ break;
+ }
+ if (he || !val)
+ return hep;
+ /* add a new entry for non-NULL values */
+ he = malloc(sizeof(*he));
+ if (!he) {
+ return NULL;
+ }
+ he->next = NULL;
+ he->hash = hash;
+ he->key = key;
+ he->klen = klen;
+ he->val = val;
+ *hep = he;
+ ht->count++;
+ return hep;
+}
+
+CACHE_DECLARE(void *) cache_hash_get(cache_hash_t *ht,
+ const void *key,
+ apr_ssize_t klen)
+{
+ cache_hash_entry_t *he;
+ he = *find_entry(ht, key, klen, NULL);
+ if (he)
+ return (void *)he->val;
+ else
+ return NULL;
+}
+
+CACHE_DECLARE(void *) cache_hash_set(cache_hash_t *ht,
+ const void *key,
+ apr_ssize_t klen,
+ const void *val)
+{
+ cache_hash_entry_t **hep, *tmp;
+ const void *tval;
+ hep = find_entry(ht, key, klen, val);
+ /* If hep == NULL, then the malloc() in find_entry failed */
+ if (hep && *hep) {
+ if (!val) {
+ /* delete entry */
+ tval = (*hep)->val;
+ tmp = *hep;
+ *hep = (*hep)->next;
+ free(tmp);
+ --ht->count;
+ }
+ else {
+ /* replace entry */
+ tval = (*hep)->val;
+ (*hep)->val = val;
+ }
+ /* Return the object just removed from the cache to let the
+ * caller clean it up. Cast the constness away upon return.
+ */
+ return (void *) tval;
+ }
+ /* else key not present and val==NULL */
+ return NULL;
+}
+
+CACHE_DECLARE(int) cache_hash_count(cache_hash_t *ht)
+{
+ return ht->count;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.h b/rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.h
new file mode 100644
index 00000000..ee3d8d12
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.h
@@ -0,0 +1,161 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CACHE_HASH_H
+#define CACHE_HASH_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mod_cache.h"
+
+/**
+ * @file cache_hash.h
+ * @brief Cache Hash Tables
+ */
+
+/**
+ * @defgroup Cache_Hash Hash Tables
+ * @ingroup CACHE
+ * @{
+ */
+
+/**
+ * When passing a key to cache_hash_set or cache_hash_get, this value can be
+ * passed to indicate a string-valued key, and have cache_hash compute the
+ * length automatically.
+ *
+ * @remark cache_hash will use strlen(key) for the length. The null-terminator
+ * is not included in the hash value (why throw a constant in?).
+ * Since the hash table merely references the provided key (rather
+ * than copying it), cache_hash_this() will return the null-term'd key.
+ */
+#define CACHE_HASH_KEY_STRING (-1)
+
+/**
+ * Abstract type for hash tables.
+ */
+typedef struct cache_hash_t cache_hash_t;
+
+/**
+ * Abstract type for scanning hash tables.
+ */
+typedef struct cache_hash_index_t cache_hash_index_t;
+
+/**
+ * Create a hash table.
+ * @param size
+ * @return The hash table just created
+ */
+CACHE_DECLARE(cache_hash_t *) cache_hash_make(apr_size_t size);
+
+/**
+ * Create a hash table.
+ * @param *ht Pointer to the hash table to be freed.
+ * @return void
+ * @remark The caller should ensure that all objects have been removed
+ * from the cache prior to calling cache_hash_free(). Objects
+ * not removed from the cache prior to calling cache_hash_free()
+ * will be unaccessable.
+ */
+CACHE_DECLARE(void) cache_hash_free(cache_hash_t *ht);
+
+
+/**
+ * Associate a value with a key in a hash table.
+ * @param ht The hash table
+ * @param key Pointer to the key
+ * @param klen Length of the key. Can be CACHE_HASH_KEY_STRING to use the string length.
+ * @param val Value to associate with the key
+ * @remark If the value is NULL the hash entry is deleted.
+ * @return The value of the deleted cache entry (so the caller can clean it up).
+ */
+CACHE_DECLARE(void *) cache_hash_set(cache_hash_t *ht, const void *key,
+ apr_ssize_t klen, const void *val);
+
+/**
+ * Look up the value associated with a key in a hash table.
+ * @param ht The hash table
+ * @param key Pointer to the key
+ * @param klen Length of the key. Can be CACHE_HASH_KEY_STRING to use the string length.
+ * @return Returns NULL if the key is not present.
+ */
+CACHE_DECLARE(void *) cache_hash_get(cache_hash_t *ht, const void *key,
+ apr_ssize_t klen);
+
+/**
+ * Start iterating over the entries in a hash table.
+ * @param ht The hash table
+ * @example
+ */
+/**
+ * <PRE>
+ *
+ * int sum_values(cache_hash_t *ht)
+ * {
+ * cache_hash_index_t *hi;
+ * void *val;
+ * int sum = 0;
+ * for (hi = cache_hash_first(ht); hi; hi = cache_hash_next(hi)) {
+ * cache_hash_this(hi, NULL, NULL, &val);
+ * sum += *(int *)val;
+ * }
+ * return sum;
+ * }
+ *
+ * There is no restriction on adding or deleting hash entries during an
+ * iteration (although the results may be unpredictable unless all you do
+ * is delete the current entry) and multiple iterations can be in
+ * progress at the same time.
+ * </PRE>
+ */
+CACHE_DECLARE(cache_hash_index_t *) cache_hash_first(cache_hash_t *ht);
+
+/**
+ * Continue iterating over the entries in a hash table.
+ * @param hi The iteration state
+ * @return a pointer to the updated iteration state. NULL if there are no more
+ * entries.
+ */
+CACHE_DECLARE(cache_hash_index_t *) cache_hash_next(cache_hash_index_t *hi);
+
+/**
+ * Get the current entry's details from the iteration state.
+ * @param hi The iteration state
+ * @param key Return pointer for the pointer to the key.
+ * @param klen Return pointer for the key length.
+ * @param val Return pointer for the associated value.
+ * @remark The return pointers should point to a variable that will be set to the
+ * corresponding data, or they may be NULL if the data isn't interesting.
+ */
+CACHE_DECLARE(void) cache_hash_this(cache_hash_index_t *hi, const void **key,
+ apr_ssize_t *klen, void **val);
+
+/**
+ * Get the number of key/value pairs in the hash table.
+ * @param ht The hash table
+ * @return The number of key/value pairs in the hash table.
+ */
+CACHE_DECLARE(int) cache_hash_count(cache_hash_t *ht);
+
+
+/** @} */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !CACHE_HASH_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.c b/rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.c
new file mode 100644
index 00000000..580b47e7
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.c
@@ -0,0 +1,290 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_general.h"
+
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#if APR_HAVE_STDIO_H
+#include <stdio.h>
+#endif
+
+#if APR_HAVE_STRING_H
+#include <string.h>
+#endif
+
+#include "cache_pqueue.h"
+#define left(i) (2*(i))
+#define right(i) ((2*(i))+1)
+#define parent(i) ((i)/2)
+/*
+ * Priority queue structure
+ */
+struct cache_pqueue_t
+{
+ apr_ssize_t size;
+ apr_ssize_t avail;
+ apr_ssize_t step;
+ cache_pqueue_get_priority pri;
+ cache_pqueue_getpos get;
+ cache_pqueue_setpos set;
+ void **d;
+};
+
+cache_pqueue_t *cache_pq_init(apr_ssize_t n,
+ cache_pqueue_get_priority pri,
+ cache_pqueue_getpos get,
+ cache_pqueue_setpos set)
+{
+ cache_pqueue_t *q;
+
+ if (!(q = malloc(sizeof(cache_pqueue_t)))) {
+ return NULL;
+ }
+
+ /* Need to allocate n+1 elements since element 0 isn't used. */
+ if (!(q->d = malloc(sizeof(void*) * (n+1)))) {
+ free(q);
+ return NULL;
+ }
+ q->avail = q->step = (n+1); /* see comment above about n+1 */
+ q->pri = pri;
+ q->size = 1;
+ q->get = get;
+ q->set = set;
+ return q;
+}
+/*
+ * cleanup
+ */
+void cache_pq_free(cache_pqueue_t *q)
+{
+ free(q->d);
+ free(q);
+}
+/*
+ * pqsize: size of the queue.
+ */
+apr_ssize_t cache_pq_size(cache_pqueue_t *q)
+{
+ /* queue element 0 exists but doesn't count since it isn't used. */
+ return (q->size - 1);
+}
+
+static void cache_pq_bubble_up(cache_pqueue_t *q, apr_ssize_t i)
+{
+ apr_ssize_t parent_node;
+ void *moving_node = q->d[i];
+ long moving_pri = q->pri(moving_node);
+
+ for (parent_node = parent(i);
+ ((i > 1) && (q->pri(q->d[parent_node]) < moving_pri));
+ i = parent_node, parent_node = parent(i))
+ {
+ q->d[i] = q->d[parent_node];
+ q->set(q->d[i], i);
+ }
+
+ q->d[i] = moving_node;
+ q->set(moving_node, i);
+}
+
+static apr_ssize_t maxchild(cache_pqueue_t *q, apr_ssize_t i)
+{
+ apr_ssize_t child_node = left(i);
+
+ if (child_node >= q->size)
+ return 0;
+
+ if ((child_node+1 < q->size) &&
+ (q->pri(q->d[child_node+1]) > q->pri(q->d[child_node])))
+ {
+ child_node++; /* use right child instead of left */
+ }
+
+ return child_node;
+}
+
+static void cache_pq_percolate_down(cache_pqueue_t *q, apr_ssize_t i)
+{
+ apr_ssize_t child_node;
+ void *moving_node = q->d[i];
+ long moving_pri = q->pri(moving_node);
+
+ while ((child_node = maxchild(q, i)) &&
+ (moving_pri < q->pri(q->d[child_node])))
+ {
+ q->d[i] = q->d[child_node];
+ q->set(q->d[i], i);
+ i = child_node;
+ }
+
+ q->d[i] = moving_node;
+ q->set(moving_node, i);
+}
+
+apr_status_t cache_pq_insert(cache_pqueue_t *q, void *d)
+{
+ void *tmp;
+ apr_ssize_t i;
+ apr_ssize_t newsize;
+
+ if (!q) return APR_EGENERAL;
+
+ /* allocate more memory if necessary */
+ if (q->size >= q->avail) {
+ newsize = q->size + q->step;
+ if (!(tmp = realloc(q->d, sizeof(void*) * newsize))) {
+ return APR_EGENERAL;
+ };
+ q->d = tmp;
+ q->avail = newsize;
+ }
+
+ /* insert item */
+ i = q->size++;
+ q->d[i] = d;
+ cache_pq_bubble_up(q, i);
+ return APR_SUCCESS;
+}
+
+/*
+ * move a existing entry to a new priority
+ */
+void cache_pq_change_priority(cache_pqueue_t *q,
+ long old_priority,
+ long new_priority,
+ void *d)
+{
+ apr_ssize_t posn;
+
+ posn = q->get(d);
+ if (new_priority > old_priority)
+ cache_pq_bubble_up(q, posn);
+ else
+ cache_pq_percolate_down(q, posn);
+}
+
+apr_status_t cache_pq_remove(cache_pqueue_t *q, void *d)
+{
+ apr_ssize_t posn = q->get(d);
+ q->d[posn] = q->d[--q->size];
+ if (q->pri(q->d[posn]) > q->pri(d))
+ cache_pq_bubble_up(q, posn);
+ else
+ cache_pq_percolate_down(q, posn);
+
+ return APR_SUCCESS;
+}
+
+void *cache_pq_pop(cache_pqueue_t *q)
+{
+ void *head;
+
+ if (!q || q->size == 1)
+ return NULL;
+
+ head = q->d[1];
+ q->d[1] = q->d[--q->size];
+ cache_pq_percolate_down(q, 1);
+
+ return head;
+}
+
+void *cache_pq_peek(cache_pqueue_t *q)
+{
+ void *d;
+ if (!q || q->size == 1)
+ return NULL;
+ d = q->d[1];
+ return d;
+}
+
+static void cache_pq_set_null( void*d, apr_ssize_t val)
+{
+ /* do nothing */
+}
+
+/*
+ * this is a debug function.. so it's EASY not fast
+ */
+void cache_pq_dump(cache_pqueue_t *q,
+ FILE*out,
+ cache_pqueue_print_entry print)
+{
+ int i;
+
+ fprintf(stdout,"posn\tleft\tright\tparent\tmaxchild\t...\n");
+ for (i = 1; i < q->size ;i++) {
+ fprintf(stdout,
+ "%d\t%d\t%d\t%d\t%" APR_SSIZE_T_FMT "\t",
+ i,
+ left(i), right(i), parent(i),
+ maxchild(q, i));
+ print(out, q->d[i]);
+ }
+}
+
+/*
+ * this is a debug function.. so it's EASY not fast
+ */
+void cache_pq_print(cache_pqueue_t *q,
+ FILE*out,
+ cache_pqueue_print_entry print)
+{
+ cache_pqueue_t *dup;
+ dup = cache_pq_init(q->size, q->pri, q->get, cache_pq_set_null);
+ dup->size = q->size;
+ dup->avail = q->avail;
+ dup->step = q->step;
+
+ memcpy(dup->d, q->d, q->size*sizeof(void*));
+
+ while (cache_pq_size(dup) > 1) {
+ void *e = NULL;
+ e = cache_pq_pop(dup);
+ if (e)
+ print(out, e);
+ else
+ break;
+ }
+ cache_pq_free(dup);
+}
+
+static int cache_pq_subtree_is_valid(cache_pqueue_t *q, int pos)
+{
+ if (left(pos) < q->size) {
+ /* has a left child */
+ if (q->pri(q->d[pos]) < q->pri(q->d[left(pos)]))
+ return 0;
+ if (!cache_pq_subtree_is_valid(q, left(pos)))
+ return 0;
+ }
+ if (right(pos) < q->size) {
+ /* has a right child */
+ if (q->pri(q->d[pos]) < q->pri(q->d[right(pos)]))
+ return 0;
+ if (!cache_pq_subtree_is_valid(q, right(pos)))
+ return 0;
+ }
+ return 1;
+}
+
+int cache_pq_is_valid(cache_pqueue_t *q)
+{
+ return cache_pq_subtree_is_valid(q, 1);
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.h b/rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.h
new file mode 100644
index 00000000..19709764
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.h
@@ -0,0 +1,160 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CACHE_PQUEUE_H
+#define CACHE_PQUEUE_H
+
+#include <apr.h>
+#include <apr_errno.h>
+
+#if APR_HAVE_STDIO_H
+#include <stdio.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** the cache priority queue handle */
+typedef struct cache_pqueue_t cache_pqueue_t;
+
+/**
+ * callback function to assign a priority for a element
+ * @param a the element
+ * @return the score (the lower the score the longer it is kept int the queue)
+ */
+typedef long (*cache_pqueue_set_priority)(long queue_clock, void *a);
+typedef long (*cache_pqueue_get_priority)(void *a);
+
+/** callback function to get a position of a element */
+typedef apr_ssize_t (*cache_pqueue_getpos)(void *a);
+
+/**
+ * callback function to set a position of a element
+ * @param a the element
+ * @param pos the position to set it to
+ */
+typedef void (*cache_pqueue_setpos)(void *a, apr_ssize_t pos);
+
+/** debug callback function to print a entry */
+typedef void (*cache_pqueue_print_entry)(FILE *out, void *a);
+
+/**
+ * initialize the queue
+ *
+ * @param n the initial estimate of the number of queue items for which memory
+ * should be preallocated
+ * @param pri the callback function to run to assign a score to a element
+ * @param get the callback function to get the current element's position
+ * @param set the callback function to set the current element's position
+ *
+ * @Return the handle or NULL for insufficent memory
+ */
+cache_pqueue_t *cache_pq_init(apr_ssize_t n,
+ cache_pqueue_get_priority pri,
+ cache_pqueue_getpos get,
+ cache_pqueue_setpos set);
+/**
+ * free all memory used by the queue
+ * @param q the queue
+ */
+void cache_pq_free(cache_pqueue_t *q);
+/**
+ * return the size of the queue.
+ * @param q the queue
+ */
+apr_ssize_t cache_pq_size(cache_pqueue_t *q);
+
+/**
+ * insert an item into the queue.
+ * @param q the queue
+ * @param d the item
+ * @return APR_SUCCESS on success
+ */
+apr_status_t cache_pq_insert(cache_pqueue_t *q, void *d);
+
+/*
+ * move a existing entry to a different priority
+ * @param q the queue
+ * @param old the old priority
+ * @param d the entry
+ */
+void cache_pq_change_priority(cache_pqueue_t *q,
+ long old_priority,
+ long new_priority,
+ void *d);
+
+/**
+ * pop the highest-ranking item from the queue.
+ * @param p the queue
+ * @param d where to copy the entry to
+ * @return NULL on error, otherwise the entry
+ */
+void *cache_pq_pop(cache_pqueue_t *q);
+
+/**
+ * remove an item from the queue.
+ * @param p the queue
+ * @param d the entry
+ * @return APR_SUCCESS on success
+ */
+apr_status_t cache_pq_remove(cache_pqueue_t *q, void *d);
+
+/**
+ * access highest-ranking item without removing it.
+ * @param q the queue
+ * @param d the entry
+ * @return NULL on error, otherwise the entry
+ */
+void *cache_pq_peek(cache_pqueue_t *q);
+
+/**
+ * print the queue
+ * @internal
+ * DEBUG function only
+ * @param q the queue
+ * @param out the output handle
+ * @param the callback function to print the entry
+ */
+void cache_pq_print(cache_pqueue_t *q,
+ FILE *out,
+ cache_pqueue_print_entry print);
+
+/**
+ * dump the queue and it's internal structure
+ * @internal
+ * debug function only
+ * @param q the queue
+ * @param out the output handle
+ * @param the callback function to print the entry
+ */
+void cache_pq_dump(cache_pqueue_t *q,
+ FILE *out,
+ cache_pqueue_print_entry print);
+
+/**
+ * checks that the pq is in the right order, etc
+ * @internal
+ * debug function only
+ * @param q the queue
+ */
+int cache_pq_is_valid(cache_pqueue_t *q);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !CACHE_PQUEUE_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_storage.c b/rubbos/app/httpd-2.0.64/modules/experimental/cache_storage.c
new file mode 100644
index 00000000..88f3d5dd
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_storage.c
@@ -0,0 +1,311 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define CORE_PRIVATE
+
+#include "mod_cache.h"
+
+extern APR_OPTIONAL_FN_TYPE(ap_cache_generate_key) *cache_generate_key;
+
+extern module AP_MODULE_DECLARE_DATA cache_module;
+
+/* -------------------------------------------------------------- */
+
+/*
+ * delete all URL entities from the cache
+ *
+ */
+int cache_remove_url(request_rec *r, char *url)
+{
+ cache_provider_list *list;
+ apr_status_t rv;
+ char *key;
+ cache_request_rec *cache = (cache_request_rec *)
+ ap_get_module_config(r->request_config, &cache_module);
+
+ rv = cache_generate_key(r,r->pool,&key);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ list = cache->providers;
+
+ /* for each specified cache type, delete the URL */
+ while(list) {
+ list->provider->remove_url(key);
+ list = list->next;
+ }
+ return OK;
+}
+
+
+/*
+ * create a new URL entity in the cache
+ *
+ * It is possible to store more than once entity per URL. This
+ * function will always create a new entity, regardless of whether
+ * other entities already exist for the same URL.
+ *
+ * The size of the entity is provided so that a cache module can
+ * decide whether or not it wants to cache this particular entity.
+ * If the size is unknown, a size of -1 should be set.
+ */
+int cache_create_entity(request_rec *r, char *url, apr_off_t size)
+{
+ cache_provider_list *list;
+ cache_handle_t *h = apr_pcalloc(r->pool, sizeof(cache_handle_t));
+ char *key;
+ apr_status_t rv;
+ cache_request_rec *cache = (cache_request_rec *)
+ ap_get_module_config(r->request_config, &cache_module);
+
+ rv = cache_generate_key(r,r->pool,&key);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ list = cache->providers;
+ /* for each specified cache type, delete the URL */
+ while (list) {
+ switch (rv = list->provider->create_entity(h, r, key, size)) {
+ case OK: {
+ cache->handle = h;
+ cache->provider = list->provider;
+ cache->provider_name = list->provider_name;
+ return OK;
+ }
+ case DECLINED: {
+ list = list->next;
+ continue;
+ }
+ default: {
+ return rv;
+ }
+ }
+ }
+ return DECLINED;
+}
+
+static int set_cookie_doo_doo(void *v, const char *key, const char *val)
+{
+ apr_table_addn(v, key, val);
+ return 1;
+}
+
+static void accept_headers(cache_handle_t *h, request_rec *r)
+{
+ apr_table_t *cookie_table;
+ const char *v;
+
+ v = apr_table_get(h->resp_hdrs, "Content-Type");
+ if (v) {
+ ap_set_content_type(r, v);
+ apr_table_unset(h->resp_hdrs, "Content-Type");
+ }
+
+ /* If the cache gave us a Last-Modified header, we can't just
+ * pass it on blindly because of restrictions on future values.
+ */
+ v = apr_table_get(h->resp_hdrs, "Last-Modified");
+ if (v) {
+ ap_update_mtime(r, apr_date_parse_http(v));
+ ap_set_last_modified(r);
+ apr_table_unset(h->resp_hdrs, "Last-Modified");
+ }
+
+ /* The HTTP specification says that it is legal to merge duplicate
+ * headers into one. Some browsers that support Cookies don't like
+ * merged headers and prefer that each Set-Cookie header is sent
+ * separately. Lets humour those browsers by not merging.
+ * Oh what a pain it is.
+ */
+ cookie_table = apr_table_make(r->pool, 2);
+ apr_table_do(set_cookie_doo_doo, cookie_table, r->err_headers_out,
+ "Set-Cookie", NULL);
+ apr_table_do(set_cookie_doo_doo, cookie_table, h->resp_hdrs,
+ "Set-Cookie", NULL);
+ apr_table_unset(r->err_headers_out, "Set-Cookie");
+ apr_table_unset(h->resp_hdrs, "Set-Cookie");
+
+ apr_table_overlap(r->headers_out, h->resp_hdrs,
+ APR_OVERLAP_TABLES_SET);
+ apr_table_overlap(r->err_headers_out, h->resp_err_hdrs,
+ APR_OVERLAP_TABLES_SET);
+ if (!apr_is_empty_table(cookie_table)) {
+ r->err_headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ cookie_table);
+ }
+}
+
+/*
+ * select a specific URL entity in the cache
+ *
+ * It is possible to store more than one entity per URL. Content
+ * negotiation is used to select an entity. Once an entity is
+ * selected, details of it are stored in the per request
+ * config to save time when serving the request later.
+ *
+ * This function returns OK if successful, DECLINED if no
+ * cached entity fits the bill.
+ */
+int cache_select_url(request_rec *r, char *url)
+{
+ cache_provider_list *list;
+ apr_status_t rv;
+ cache_handle_t *h;
+ char *key;
+ cache_request_rec *cache = (cache_request_rec *)
+ ap_get_module_config(r->request_config, &cache_module);
+
+ rv = cache_generate_key(r, r->pool, &key);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ /* go through the cache types till we get a match */
+ h = apr_palloc(r->pool, sizeof(cache_handle_t));
+
+ list = cache->providers;
+
+ while (list) {
+ switch ((rv = list->provider->open_entity(h, r, key))) {
+ case OK: {
+ char *vary = NULL;
+ const char *varyhdr = NULL;
+ int fresh;
+
+ if (list->provider->recall_headers(h, r) != APR_SUCCESS) {
+ /* TODO: Handle this error */
+ return DECLINED;
+ }
+
+ /*
+ * Check Content-Negotiation - Vary
+ *
+ * At this point we need to make sure that the object we found in
+ * the cache is the same object that would be delivered to the
+ * client, when the effects of content negotiation are taken into
+ * effect.
+ *
+ * In plain english, we want to make sure that a language-negotiated
+ * document in one language is not given to a client asking for a
+ * language negotiated document in a different language by mistake.
+ *
+ * This code makes the assumption that the storage manager will
+ * cache the req_hdrs if the response contains a Vary
+ * header.
+ *
+ * RFC2616 13.6 and 14.44 describe the Vary mechanism.
+ */
+ if ((varyhdr = apr_table_get(h->resp_err_hdrs, "Vary")) == NULL) {
+ varyhdr = apr_table_get(h->resp_hdrs, "Vary");
+ }
+ vary = apr_pstrdup(r->pool, varyhdr);
+ while (vary && *vary) {
+ char *name = vary;
+ const char *h1, *h2;
+
+ /* isolate header name */
+ while (*vary && !apr_isspace(*vary) && (*vary != ','))
+ ++vary;
+ while (*vary && (apr_isspace(*vary) || (*vary == ','))) {
+ *vary = '\0';
+ ++vary;
+ }
+
+ /*
+ * is this header in the request and the header in the cached
+ * request identical? If not, we give up and do a straight get
+ */
+ h1 = apr_table_get(r->headers_in, name);
+ h2 = apr_table_get(h->req_hdrs, name);
+ if (h1 == h2) {
+ /* both headers NULL, so a match - do nothing */
+ }
+ else if (h1 && h2 && !strcmp(h1, h2)) {
+ /* both headers exist and are equal - do nothing */
+ }
+ else {
+ /* headers do not match, so Vary failed */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
+ r->server,
+ "cache_select_url(): Vary header mismatch.");
+ return DECLINED;
+ }
+ }
+
+ cache->provider = list->provider;
+ cache->provider_name = list->provider_name;
+
+ /* Is our cached response fresh enough? */
+ fresh = ap_cache_check_freshness(h, r);
+ if (!fresh) {
+ cache_info *info = &(h->cache_obj->info);
+
+ /* Make response into a conditional */
+ /* FIXME: What if the request is already conditional? */
+ if (info && info->etag) {
+ /* if we have a cached etag */
+ cache->stale_headers = apr_table_copy(r->pool,
+ r->headers_in);
+ apr_table_set(r->headers_in, "If-None-Match", info->etag);
+ cache->stale_handle = h;
+ }
+ else if (info && info->lastmods) {
+ /* if we have a cached Last-Modified header */
+ cache->stale_headers = apr_table_copy(r->pool,
+ r->headers_in);
+ apr_table_set(r->headers_in, "If-Modified-Since",
+ info->lastmods);
+ cache->stale_handle = h;
+ }
+
+ return DECLINED;
+ }
+
+ /* Okay, this response looks okay. Merge in our stuff and go. */
+ apr_table_setn(r->headers_out, "Content-Type",
+ ap_make_content_type(r, h->content_type));
+ r->filename = apr_pstrdup(r->pool, h->cache_obj->info.filename);
+ accept_headers(h, r);
+
+ cache->handle = h;
+ return OK;
+ }
+ case DECLINED: {
+ /* try again with next cache type */
+ list = list->next;
+ continue;
+ }
+ default: {
+ /* oo-er! an error */
+ return rv;
+ }
+ }
+ }
+ return DECLINED;
+}
+
+apr_status_t cache_generate_key_default( request_rec *r, apr_pool_t*p, char**key )
+{
+ if (r->hostname) {
+ *key = apr_pstrcat(p, r->hostname, r->uri, "?", r->args, NULL);
+ }
+ else {
+ *key = apr_pstrcat(p, r->uri, "?", r->args, NULL);
+ }
+ return APR_SUCCESS;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_util.c b/rubbos/app/httpd-2.0.64/modules/experimental/cache_util.c
new file mode 100644
index 00000000..9782cb7b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_util.c
@@ -0,0 +1,575 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define CORE_PRIVATE
+
+#include "mod_cache.h"
+
+#include <ap_provider.h>
+
+/* -------------------------------------------------------------- */
+
+extern module AP_MODULE_DECLARE_DATA cache_module;
+
+/* return true if the request is conditional */
+CACHE_DECLARE(int) ap_cache_request_is_conditional(apr_table_t *table)
+{
+ if (apr_table_get(table, "If-Match") ||
+ apr_table_get(table, "If-None-Match") ||
+ apr_table_get(table, "If-Modified-Since") ||
+ apr_table_get(table, "If-Unmodified-Since")) {
+ return 1;
+ }
+ return 0;
+}
+
+CACHE_DECLARE(cache_provider_list *)ap_cache_get_providers(request_rec *r,
+ cache_server_conf *conf,
+ const char *url)
+{
+ cache_provider_list *providers = NULL;
+ int i;
+
+ /* we can't cache if there's no URL */
+ /* Is this case even possible?? */
+ if (!url) return NULL;
+
+ /* loop through all the cacheenable entries */
+ for (i = 0; i < conf->cacheenable->nelts; i++) {
+ struct cache_enable *ent =
+ (struct cache_enable *)conf->cacheenable->elts;
+ if ((ent[i].url) && !strncasecmp(url, ent[i].url, ent[i].urllen)) {
+ /* Fetch from global config and add to the list. */
+ cache_provider *provider;
+ provider = ap_lookup_provider(CACHE_PROVIDER_GROUP, ent[i].type,
+ "0");
+ if (!provider) {
+ /* Log an error! */
+ }
+ else {
+ cache_provider_list *newp;
+ newp = apr_pcalloc(r->pool, sizeof(cache_provider_list));
+ newp->provider_name = ent[i].type;
+ newp->provider = provider;
+
+ if (!providers) {
+ providers = newp;
+ }
+ else {
+ cache_provider_list *last = providers;
+
+ while (last->next) {
+ last = last->next;
+ }
+ last->next = newp;
+ }
+ }
+ }
+ }
+
+ /* then loop through all the cachedisable entries
+ * Looking for urls that contain the full cachedisable url and possibly
+ * more.
+ * This means we are disabling cachedisable url and below...
+ */
+ for (i = 0; i < conf->cachedisable->nelts; i++) {
+ struct cache_disable *ent =
+ (struct cache_disable *)conf->cachedisable->elts;
+ if ((ent[i].url) && !strncasecmp(url, ent[i].url, ent[i].urllen)) {
+ /* Stop searching now. */
+ return NULL;
+ }
+ }
+
+ return providers;
+}
+
+
+/* do a HTTP/1.1 age calculation */
+CACHE_DECLARE(apr_int64_t) ap_cache_current_age(cache_info *info,
+ const apr_time_t age_value,
+ apr_time_t now)
+{
+ apr_time_t apparent_age, corrected_received_age, response_delay,
+ corrected_initial_age, resident_time, current_age,
+ age_value_usec;
+
+ age_value_usec = apr_time_from_sec(age_value);
+
+ /* Perform an HTTP/1.1 age calculation. (RFC2616 13.2.3) */
+
+ apparent_age = MAX(0, info->response_time - info->date);
+ corrected_received_age = MAX(apparent_age, age_value_usec);
+ response_delay = info->response_time - info->request_time;
+ corrected_initial_age = corrected_received_age + response_delay;
+ resident_time = now - info->response_time;
+ current_age = corrected_initial_age + resident_time;
+
+ return apr_time_sec(current_age);
+}
+
+CACHE_DECLARE(int) ap_cache_check_freshness(cache_handle_t *h,
+ request_rec *r)
+{
+ apr_int64_t age, maxage_req, maxage_cresp, maxage, smaxage, maxstale;
+ apr_int64_t minfresh;
+ int age_in_errhdr = 0;
+ const char *cc_cresp, *cc_ceresp, *cc_req;
+ const char *agestr = NULL;
+ const char *expstr = NULL;
+ char *val;
+ apr_time_t age_c = 0;
+ cache_info *info = &(h->cache_obj->info);
+
+ /*
+ * We now want to check if our cached data is still fresh. This depends
+ * on a few things, in this order:
+ *
+ * - RFC2616 14.9.4 End to end reload, Cache-Control: no-cache. no-cache in
+ * either the request or the cached response means that we must
+ * revalidate the request unconditionally, overriding any expiration
+ * mechanism. It's equivalent to max-age=0,must-revalidate.
+ *
+ * - RFC2616 14.32 Pragma: no-cache This is treated the same as
+ * Cache-Control: no-cache.
+ *
+ * - RFC2616 14.9.3 Cache-Control: max-stale, must-revalidate,
+ * proxy-revalidate if the max-stale request header exists, modify the
+ * stale calculations below so that an object can be at most <max-stale>
+ * seconds stale before we request a revalidation, _UNLESS_ a
+ * must-revalidate or proxy-revalidate cached response header exists to
+ * stop us doing this.
+ *
+ * - RFC2616 14.9.3 Cache-Control: s-maxage the origin server specifies the
+ * maximum age an object can be before it is considered stale. This
+ * directive has the effect of proxy|must revalidate, which in turn means
+ * simple ignore any max-stale setting.
+ *
+ * - RFC2616 14.9.4 Cache-Control: max-age this header can appear in both
+ * requests and responses. If both are specified, the smaller of the two
+ * takes priority.
+ *
+ * - RFC2616 14.21 Expires: if this request header exists in the cached
+ * entity, and it's value is in the past, it has expired.
+ *
+ */
+ cc_cresp = apr_table_get(h->resp_hdrs, "Cache-Control");
+ cc_ceresp = apr_table_get(h->resp_err_hdrs, "Cache-Control");
+ cc_req = apr_table_get(h->req_hdrs, "Cache-Control");
+
+ if ((agestr = apr_table_get(h->resp_hdrs, "Age"))) {
+ age_c = apr_atoi64(agestr);
+ }
+ else if ((agestr = apr_table_get(h->resp_err_hdrs, "Age"))) {
+ age_c = apr_atoi64(agestr);
+ age_in_errhdr = 1;
+ }
+
+ if (!(expstr = apr_table_get(h->resp_err_hdrs, "Expires"))) {
+ expstr = apr_table_get(h->resp_hdrs, "Expires");
+ }
+
+ /* calculate age of object */
+ age = ap_cache_current_age(info, age_c, r->request_time);
+
+ /* extract s-maxage */
+ if (cc_cresp && ap_cache_liststr(r->pool, cc_cresp, "s-maxage", &val)
+ && val != NULL) {
+ smaxage = apr_atoi64(val);
+ }
+ else if (cc_ceresp && ap_cache_liststr(r->pool, cc_ceresp, "s-maxage", &val)) {
+ smaxage = apr_atoi64(val);
+ }
+ else {
+ smaxage = -1;
+ }
+
+ /* extract max-age from request */
+ if (cc_req && ap_cache_liststr(r->pool, cc_req, "max-age", &val)
+ && val != NULL) {
+ maxage_req = apr_atoi64(val);
+ }
+ else {
+ maxage_req = -1;
+ }
+
+ /* extract max-age from response */
+ if (cc_cresp && ap_cache_liststr(r->pool, cc_cresp, "max-age", &val)
+ && val != NULL) {
+ maxage_cresp = apr_atoi64(val);
+ }
+ else if (cc_ceresp && ap_cache_liststr(r->pool, cc_ceresp, "max-age", &val)) {
+ maxage_cresp = apr_atoi64(val);
+ }
+ else
+ {
+ maxage_cresp = -1;
+ }
+
+ /*
+ * if both maxage request and response, the smaller one takes priority
+ */
+ if (-1 == maxage_req) {
+ maxage = maxage_cresp;
+ }
+ else if (-1 == maxage_cresp) {
+ maxage = maxage_req;
+ }
+ else {
+ maxage = MIN(maxage_req, maxage_cresp);
+ }
+
+ /* extract max-stale */
+ if (cc_req && ap_cache_liststr(r->pool, cc_req, "max-stale", &val)) {
+ if(val != NULL) {
+ maxstale = apr_atoi64(val);
+ }
+ else {
+ /*
+ * If no value is assigned to max-stale, then the client is willing
+ * to accept a stale response of any age (RFC2616 14.9.3). We will
+ * set it to one year in this case as this situation is somewhat
+ * similar to a "never expires" Expires header (RFC2616 14.21)
+ * which is set to a date one year from the time the response is
+ * sent in this case.
+ */
+ maxstale = APR_INT64_C(86400*365);
+ }
+ }
+ else {
+ maxstale = 0;
+ }
+
+ /* extract min-fresh */
+ if (cc_req && ap_cache_liststr(r->pool, cc_req, "min-fresh", &val)
+ && val != NULL) {
+ minfresh = apr_atoi64(val);
+ }
+ else {
+ minfresh = 0;
+ }
+
+ /* override maxstale if must-revalidate or proxy-revalidate */
+ if (maxstale && ((cc_cresp &&
+ ap_cache_liststr(NULL, cc_cresp,
+ "must-revalidate", NULL)) ||
+ (cc_cresp &&
+ ap_cache_liststr(NULL, cc_cresp,
+ "proxy-revalidate", NULL)) ||
+ (cc_ceresp &&
+ ap_cache_liststr(NULL, cc_ceresp,
+ "must-revalidate", NULL)) ||
+ (cc_ceresp &&
+ ap_cache_liststr(NULL, cc_ceresp,
+ "proxy-revalidate", NULL)))) {
+ maxstale = 0;
+ }
+
+ /* handle expiration */
+ if (((smaxage != -1) && (age < (smaxage - minfresh))) ||
+ ((maxage != -1) && (age < (maxage + maxstale - minfresh))) ||
+ ((smaxage == -1) && (maxage == -1) &&
+ (info->expire != APR_DATE_BAD) &&
+ (age < (apr_time_sec(info->expire - info->date) + maxstale - minfresh)))) {
+ const char *warn_head;
+ apr_table_t *head_ptr;
+
+ warn_head = apr_table_get(h->resp_hdrs, "Warning");
+ if (warn_head != NULL) {
+ head_ptr = h->resp_hdrs;
+ }
+ else {
+ warn_head = apr_table_get(h->resp_err_hdrs, "Warning");
+ head_ptr = h->resp_err_hdrs;
+ }
+
+ /* it's fresh darlings... */
+ /* set age header on response */
+ if (age_in_errhdr) {
+ apr_table_set(h->resp_err_hdrs, "Age",
+ apr_psprintf(r->pool, "%lu", (unsigned long)age));
+ }
+ else {
+ apr_table_set(h->resp_hdrs, "Age",
+ apr_psprintf(r->pool, "%lu", (unsigned long)age));
+ }
+
+ /* add warning if maxstale overrode freshness calculation */
+ if (!(((smaxage != -1) && age < smaxage) ||
+ ((maxage != -1) && age < maxage) ||
+ (info->expire != APR_DATE_BAD &&
+ (info->expire - info->date) > age))) {
+ /* make sure we don't stomp on a previous warning */
+ if ((warn_head == NULL) ||
+ ((warn_head != NULL) && (ap_strstr_c(warn_head, "110") == NULL))) {
+ apr_table_merge(head_ptr, "Warning", "110 Response is stale");
+ }
+ }
+ /*
+ * If none of Expires, Cache-Control: max-age, or Cache-Control:
+ * s-maxage appears in the response, and the respose header age
+ * calculated is more than 24 hours add the warning 113
+ */
+ if ((maxage_cresp == -1) && (smaxage == -1) &&
+ (expstr == NULL) && (age > 86400)) {
+
+ /* Make sure we don't stomp on a previous warning, and don't dup
+ * a 113 marning that is already present. Also, make sure to add
+ * the new warning to the correct *headers_out location.
+ */
+ if ((warn_head == NULL) ||
+ ((warn_head != NULL) && (ap_strstr_c(warn_head, "113") == NULL))) {
+ apr_table_merge(head_ptr, "Warning", "113 Heuristic expiration");
+ }
+ }
+ return 1; /* Cache object is fresh (enough) */
+ }
+ return 0; /* Cache object is stale */
+}
+
+/*
+ * list is a comma-separated list of case-insensitive tokens, with
+ * optional whitespace around the tokens.
+ * The return returns 1 if the token val is found in the list, or 0
+ * otherwise.
+ */
+CACHE_DECLARE(int) ap_cache_liststr(apr_pool_t *p, const char *list,
+ const char *key, char **val)
+{
+ apr_size_t key_len;
+ const char *next;
+
+ if (!list) {
+ return 0;
+ }
+
+ key_len = strlen(key);
+ next = list;
+
+ for (;;) {
+
+ /* skip whitespace and commas to find the start of the next key */
+ while (*next && (apr_isspace(*next) || (*next == ','))) {
+ next++;
+ }
+
+ if (!*next) {
+ return 0;
+ }
+
+ if (!strncasecmp(next, key, key_len)) {
+ /* this field matches the key (though it might just be
+ * a prefix match, so make sure the match is followed
+ * by either a space or an equals sign)
+ */
+ next += key_len;
+ if (!*next || (*next == '=') || apr_isspace(*next) ||
+ (*next == ',')) {
+ /* valid match */
+ if (val) {
+ while (*next && (*next != '=') && (*next != ',')) {
+ next++;
+ }
+ if (*next == '=') {
+ next++;
+ while (*next && apr_isspace(*next )) {
+ next++;
+ }
+ if (!*next) {
+ *val = NULL;
+ }
+ else {
+ const char *val_start = next;
+ while (*next && !apr_isspace(*next) &&
+ (*next != ',')) {
+ next++;
+ }
+ *val = apr_pstrmemdup(p, val_start,
+ next - val_start);
+ }
+ }
+ else {
+ *val = NULL;
+ }
+ }
+ return 1;
+ }
+ }
+
+ /* skip to the next field */
+ do {
+ next++;
+ if (!*next) {
+ return 0;
+ }
+ } while (*next != ',');
+ }
+}
+
+/* return each comma separated token, one at a time */
+CACHE_DECLARE(const char *)ap_cache_tokstr(apr_pool_t *p, const char *list,
+ const char **str)
+{
+ apr_size_t i;
+ const char *s;
+
+ s = ap_strchr_c(list, ',');
+ if (s != NULL) {
+ i = s - list;
+ do
+ s++;
+ while (apr_isspace(*s))
+ ; /* noop */
+ }
+ else
+ i = strlen(list);
+
+ while (i > 0 && apr_isspace(list[i - 1]))
+ i--;
+
+ *str = s;
+ if (i)
+ return apr_pstrndup(p, list, i);
+ else
+ return NULL;
+}
+
+/*
+ * Converts apr_time_t expressed as hex digits to
+ * a true apr_time_t.
+ */
+CACHE_DECLARE(apr_time_t) ap_cache_hex2usec(const char *x)
+{
+ int i, ch;
+ apr_time_t j;
+ for (i = 0, j = 0; i < sizeof(j) * 2; i++) {
+ ch = x[i];
+ j <<= 4;
+ if (apr_isdigit(ch))
+ j |= ch - '0';
+ else if (apr_isupper(ch))
+ j |= ch - ('A' - 10);
+ else
+ j |= ch - ('a' - 10);
+ }
+ return j;
+}
+
+/*
+ * Converts apr_time_t to apr_time_t expressed as hex digits.
+ */
+CACHE_DECLARE(void) ap_cache_usec2hex(apr_time_t j, char *y)
+{
+ int i, ch;
+
+ for (i = (sizeof(j) * 2)-1; i >= 0; i--) {
+ ch = (int)(j & 0xF);
+ j >>= 4;
+ if (ch >= 10)
+ y[i] = ch + ('A' - 10);
+ else
+ y[i] = ch + '0';
+ }
+ y[sizeof(j) * 2] = '\0';
+}
+
+static void cache_hash(const char *it, char *val, int ndepth, int nlength)
+{
+ apr_md5_ctx_t context;
+ unsigned char digest[16];
+ char tmp[22];
+ int i, k, d;
+ unsigned int x;
+ static const char enc_table[64] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_@";
+
+ apr_md5_init(&context);
+ apr_md5_update(&context, (const unsigned char *) it, strlen(it));
+ apr_md5_final(digest, &context);
+
+ /* encode 128 bits as 22 characters, using a modified uuencoding
+ * the encoding is 3 bytes -> 4 characters* i.e. 128 bits is
+ * 5 x 3 bytes + 1 byte -> 5 * 4 characters + 2 characters
+ */
+ for (i = 0, k = 0; i < 15; i += 3) {
+ x = (digest[i] << 16) | (digest[i + 1] << 8) | digest[i + 2];
+ tmp[k++] = enc_table[x >> 18];
+ tmp[k++] = enc_table[(x >> 12) & 0x3f];
+ tmp[k++] = enc_table[(x >> 6) & 0x3f];
+ tmp[k++] = enc_table[x & 0x3f];
+ }
+
+ /* one byte left */
+ x = digest[15];
+ tmp[k++] = enc_table[x >> 2]; /* use up 6 bits */
+ tmp[k++] = enc_table[(x << 4) & 0x3f];
+
+ /* now split into directory levels */
+ for (i = k = d = 0; d < ndepth; ++d) {
+ memcpy(&val[i], &tmp[k], nlength);
+ k += nlength;
+ val[i + nlength] = '/';
+ i += nlength + 1;
+ }
+ memcpy(&val[i], &tmp[k], 22 - k);
+ val[i + 22 - k] = '\0';
+}
+
+CACHE_DECLARE(char *)generate_name(apr_pool_t *p, int dirlevels,
+ int dirlength, const char *name)
+{
+ char hashfile[66];
+ cache_hash(name, hashfile, dirlevels, dirlength);
+ return apr_pstrdup(p, hashfile);
+}
+
+/* Create a new table consisting of those elements from an input
+ * headers table that are allowed to be stored in a cache.
+ */
+CACHE_DECLARE(apr_table_t *)ap_cache_cacheable_hdrs_out(apr_pool_t *pool,
+ apr_table_t *t,
+ server_rec *s)
+{
+ cache_server_conf *conf;
+ char **header;
+ int i;
+
+ /* Make a copy of the headers, and remove from
+ * the copy any hop-by-hop headers, as defined in Section
+ * 13.5.1 of RFC 2616
+ */
+ apr_table_t *headers_out;
+ headers_out = apr_table_copy(pool, t);
+ apr_table_unset(headers_out, "Connection");
+ apr_table_unset(headers_out, "Keep-Alive");
+ apr_table_unset(headers_out, "Proxy-Authenticate");
+ apr_table_unset(headers_out, "Proxy-Authorization");
+ apr_table_unset(headers_out, "TE");
+ apr_table_unset(headers_out, "Trailers");
+ apr_table_unset(headers_out, "Transfer-Encoding");
+ apr_table_unset(headers_out, "Upgrade");
+
+ conf = (cache_server_conf *)ap_get_module_config(s->module_config,
+ &cache_module);
+ /* Remove the user defined headers set with CacheIgnoreHeaders.
+ * This may break RFC 2616 compliance on behalf of the administrator.
+ */
+ header = (char **)conf->ignore_headers->elts;
+ for (i = 0; i < conf->ignore_headers->nelts; i++) {
+ apr_table_unset(headers_out, header[i]);
+ }
+ return headers_out;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/charset.conv b/rubbos/app/httpd-2.0.64/modules/experimental/charset.conv
new file mode 100644
index 00000000..3cd6fa9d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/charset.conv
@@ -0,0 +1,55 @@
+
+# Lang-abbv Charset Language
+#---------------------------------
+en ISO-8859-1 English
+UTF-8 utf8 UTF-8
+Unicode ucs Unicode
+th Cp874 Thai
+ja SJIS Japanese
+ko Cp949 Korean
+zh Cp950 Chinese-Traditional
+zh-cn GB2312 Chinese-Simplified
+zh-tw Cp950 Chinese
+cs ISO-8859-2 Czech
+hu ISO-8859-2 Hungarian
+hr ISO-8859-2 Croation
+pl ISO-8859-2 Polish
+ro ISO-8859-2 Romanian
+sr ISO-8859-2 Serbian
+sk ISO-8859-2 Slovak
+sl ISO-8859-2 Slovenian
+sq ISO-8859-2 Albanian
+bg ISO-8859-5 Bulgarian
+be ISO-8859-5 Byelorussian
+mk ISO-8859-5 Macedonian
+ru ISO-8859-5 Russian
+uk ISO-8859-5 Ukrainian
+ca ISO-8859-1 Catalan
+de ISO-8859-1 German
+da ISO-8859-1 Danish
+fi ISO-8859-1 Finnish
+fr ISO-8859-1 French
+es ISO-8859-1 Spanish
+is ISO-8859-1 Icelandic
+it ISO-8859-1 Italian
+nl ISO-8859-1 Dutch
+no ISO-8859-1 Norwegian
+pt ISO-8859-1 Portuguese
+sv ISO-8859-1 Swedish
+af ISO-8859-1 Afrikaans
+eu ISO-8859-1 Basque
+fo ISO-8859-1 Faroese
+gl ISO-8859-1 Galician
+ga ISO-8859-1 Irish
+gd ISO-8859-1 Scottish
+mt ISO-8859-3 Maltese
+eo ISO-8859-3 Esperanto
+el ISO-8859-7 Greek
+tr ISO-8859-9 Turkish
+he ISO-8859-8 Hebrew
+iw ISO-8859-8 Hebrew
+ar ISO-8859-6 Arabic
+et ISO-8859-1 Estonian
+lv ISO-8859-2 Latvian
+lt ISO-8859-2 Lithuanian
+ \ No newline at end of file
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/config.m4 b/rubbos/app/httpd-2.0.64/modules/experimental/config.m4
new file mode 100644
index 00000000..b9d2e7e3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/config.m4
@@ -0,0 +1,39 @@
+
+APACHE_MODPATH_INIT(experimental)
+
+if test "$ac_cv_ebcdic" = "yes"; then
+# mod_charset_lite can be very useful on an ebcdic system,
+# so include it by default
+ APACHE_MODULE(charset_lite, character set translation, , , yes)
+else
+ APACHE_MODULE(charset_lite, character set translation, , , no)
+fi
+
+dnl # list of object files for mod_cache
+cache_objs="dnl
+mod_cache.lo dnl
+cache_storage.lo dnl
+cache_util.lo dnl
+"
+dnl # list of object files for mod_mem_cache
+mem_cache_objs="dnl
+mod_mem_cache.lo dnl
+cache_cache.lo dnl
+cache_pqueue.lo dnl
+cache_hash.lo dnl
+"
+APACHE_MODULE(cache, dynamic file caching, $cache_objs, , no)
+APACHE_MODULE(disk_cache, disk caching module, , , no)
+APACHE_MODULE(mem_cache, memory caching module, $mem_cache_objs, , no)
+APACHE_MODULE(example, example and demo module, , , no)
+APACHE_MODULE(case_filter, example uppercase conversion filter, , , no)
+APACHE_MODULE(case_filter_in, example uppercase conversion input filter, , , no)
+APACHE_MODULE(dumpio, I/O dump filter, , , no)
+
+ldap_objects="util_ldap.lo util_ldap_cache.lo util_ldap_cache_mgr.lo"
+APACHE_MODULE(ldap, LDAP caching and connection pooling services, $ldap_objects, , no)
+
+auth_ldap_objects="mod_auth_ldap.lo"
+APACHE_MODULE(auth_ldap, LDAP based authentication, $auth_ldap_objects, , no)
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.c
new file mode 100644
index 00000000..10b3f17c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.c
@@ -0,0 +1,1117 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_auth_ldap.c: LDAP authentication module
+ *
+ * Original code from auth_ldap module for Apache v1.3:
+ * Copyright 1998, 1999 Enbridge Pipelines Inc.
+ * Copyright 1999-2001 Dave Carrigan
+ */
+
+#include <apr_ldap.h>
+#include <apr_strings.h>
+#include <apr_xlate.h>
+#define APR_WANT_STRFUNC
+#include <apr_want.h>
+
+#include "ap_config.h"
+#if APR_HAVE_UNISTD_H
+/* for getpid() */
+#include <unistd.h>
+#endif
+#include <ctype.h>
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_ldap.h"
+
+#ifndef APU_HAS_LDAP
+#error mod_auth_ldap requires APR-util to have LDAP support built in
+#endif
+
+/* per directory configuration */
+typedef struct {
+ apr_pool_t *pool; /* Pool that this config is allocated from */
+#if APR_HAS_THREADS
+ apr_thread_mutex_t *lock; /* Lock for this config */
+#endif
+ int auth_authoritative; /* Is this auth method the one and only? */
+ int enabled; /* Is auth_ldap enabled in this directory? */
+
+ /* These parameters are all derived from the AuthLDAPURL directive */
+ char *url; /* String representation of the URL */
+
+ char *host; /* Name of the LDAP server (or space separated list) */
+ int port; /* Port of the LDAP server */
+ char *basedn; /* Base DN to do all searches from */
+ char *attribute; /* Attribute to search for */
+ char **attributes; /* Array of all the attributes to return */
+ int scope; /* Scope of the search */
+ char *filter; /* Filter to further limit the search */
+ deref_options deref; /* how to handle alias dereferening */
+ char *binddn; /* DN to bind to server (can be NULL) */
+ char *bindpw; /* Password to bind to server (can be NULL) */
+
+ int frontpage_hack; /* Hack for frontpage support */
+ int user_is_dn; /* If true, connection->user is DN instead of userid */
+ int compare_dn_on_server; /* If true, will use server to do DN compare */
+
+ int have_ldap_url; /* Set if we have found an LDAP url */
+
+ apr_array_header_t *groupattr; /* List of Group attributes */
+ int group_attrib_is_dn; /* If true, the group attribute is the DN, otherwise,
+ it's the exact string passed by the HTTP client */
+
+ int secure; /* True if SSL connections are requested */
+} mod_auth_ldap_config_t;
+
+typedef struct mod_auth_ldap_request_t {
+ char *dn; /* The saved dn from a successful search */
+ char *user; /* The username provided by the client */
+} mod_auth_ldap_request_t;
+
+/* maximum group elements supported */
+#define GROUPATTR_MAX_ELTS 10
+
+struct mod_auth_ldap_groupattr_entry_t {
+ char *name;
+};
+
+module AP_MODULE_DECLARE_DATA auth_ldap_module;
+
+/* function prototypes */
+void mod_auth_ldap_build_filter(char *filtbuf,
+ request_rec *r,
+ mod_auth_ldap_config_t *sec);
+int mod_auth_ldap_check_user_id(request_rec *r);
+int mod_auth_ldap_auth_checker(request_rec *r);
+void *mod_auth_ldap_create_dir_config(apr_pool_t *p, char *d);
+
+/* ---------------------------------------- */
+
+static apr_hash_t *charset_conversions = NULL;
+static char *to_charset = NULL; /* UTF-8 identifier derived from the charset.conv file */
+
+/* Derive a code page ID give a language name or ID */
+static char* derive_codepage_from_lang (apr_pool_t *p, char *language)
+{
+ int lang_len;
+ int check_short = 0;
+ char *charset;
+
+ if (!language) /* our default codepage */
+ return apr_pstrdup(p, "ISO-8859-1");
+ else
+ lang_len = strlen(language);
+
+ charset = (char*) apr_hash_get(charset_conversions, language, APR_HASH_KEY_STRING);
+
+ if (!charset) {
+ language[2] = '\0';
+ charset = (char*) apr_hash_get(charset_conversions, language, APR_HASH_KEY_STRING);
+ }
+
+ if (charset) {
+ charset = apr_pstrdup(p, charset);
+ }
+
+ return charset;
+}
+
+static apr_xlate_t* get_conv_set (request_rec *r)
+{
+ char *lang_line = (char*)apr_table_get(r->headers_in, "accept-language");
+ char *lang;
+ apr_xlate_t *convset;
+
+ if (lang_line) {
+ lang_line = apr_pstrdup(r->pool, lang_line);
+ for (lang = lang_line;*lang;lang++) {
+ if ((*lang == ',') || (*lang == ';')) {
+ *lang = '\0';
+ break;
+ }
+ }
+ lang = derive_codepage_from_lang(r->pool, lang_line);
+
+ if (lang && (apr_xlate_open(&convset, to_charset, lang, r->pool) == APR_SUCCESS)) {
+ return convset;
+ }
+ }
+
+ return NULL;
+}
+
+
+/*
+ * Build the search filter, or at least as much of the search filter that
+ * will fit in the buffer. We don't worry about the buffer not being able
+ * to hold the entire filter. If the buffer wasn't big enough to hold the
+ * filter, ldap_search_s will complain, but the only situation where this
+ * is likely to happen is if the client sent a really, really long
+ * username, most likely as part of an attack.
+ *
+ * The search filter consists of the filter provided with the URL,
+ * combined with a filter made up of the attribute provided with the URL,
+ * and the actual username passed by the HTTP client. For example, assume
+ * that the LDAP URL is
+ *
+ * ldap://ldap.airius.com/ou=People, o=Airius?uid??(posixid=*)
+ *
+ * Further, assume that the userid passed by the client was `userj'. The
+ * search filter will be (&(posixid=*)(uid=userj)).
+ */
+#define FILTER_LENGTH MAX_STRING_LEN
+void mod_auth_ldap_build_filter(char *filtbuf,
+ request_rec *r,
+ mod_auth_ldap_config_t *sec)
+{
+ char *p, *q, *filtbuf_end;
+ char *user;
+ apr_xlate_t *convset = NULL;
+ apr_size_t inbytes;
+ apr_size_t outbytes;
+ char *outbuf;
+
+ if (r->user != NULL) {
+ user = apr_pstrdup (r->pool, r->user);
+ }
+ else
+ return;
+
+ if (charset_conversions) {
+ convset = get_conv_set(r);
+ }
+
+ if (convset) {
+ inbytes = strlen(user);
+ outbytes = (inbytes+1)*3;
+ outbuf = apr_pcalloc(r->pool, outbytes);
+
+ /* Convert the user name to UTF-8. This is only valid for LDAP v3 */
+ if (apr_xlate_conv_buffer(convset, user, &inbytes, outbuf, &outbytes) == APR_SUCCESS) {
+ user = apr_pstrdup(r->pool, outbuf);
+ }
+ }
+
+ /*
+ * Create the first part of the filter, which consists of the
+ * config-supplied portions.
+ */
+ apr_snprintf(filtbuf, FILTER_LENGTH, "(&(%s)(%s=", sec->filter, sec->attribute);
+
+ /*
+ * Now add the client-supplied username to the filter, ensuring that any
+ * LDAP filter metachars are escaped.
+ */
+ filtbuf_end = filtbuf + FILTER_LENGTH - 1;
+#if APR_HAS_MICROSOFT_LDAPSDK
+ for (p = user, q=filtbuf + strlen(filtbuf);
+ *p && q < filtbuf_end; ) {
+ if (strchr("*()\\", *p) != NULL) {
+ if ( q + 3 >= filtbuf_end)
+ break; /* Don't write part of escape sequence if we can't write all of it */
+ *q++ = '\\';
+ switch ( *p++ )
+ {
+ case '*':
+ *q++ = '2';
+ *q++ = 'a';
+ break;
+ case '(':
+ *q++ = '2';
+ *q++ = '8';
+ break;
+ case ')':
+ *q++ = '2';
+ *q++ = '9';
+ break;
+ case '\\':
+ *q++ = '5';
+ *q++ = 'c';
+ break;
+ }
+ }
+ else
+ *q++ = *p++;
+ }
+#else
+ for (p = user, q=filtbuf + strlen(filtbuf);
+ *p && q < filtbuf_end; *q++ = *p++) {
+ if (strchr("*()\\", *p) != NULL) {
+ *q++ = '\\';
+ if (q >= filtbuf_end) {
+ break;
+ }
+ }
+ }
+#endif
+ *q = '\0';
+
+ /*
+ * Append the closing parens of the filter, unless doing so would
+ * overrun the buffer.
+ */
+ if (q + 2 <= filtbuf_end)
+ strcat(filtbuf, "))");
+}
+
+static apr_status_t mod_auth_ldap_cleanup_connection_close(void *param)
+{
+ util_ldap_connection_t *ldc = param;
+ util_ldap_connection_close(ldc);
+ return APR_SUCCESS;
+}
+
+
+/*
+ * Authentication Phase
+ * --------------------
+ *
+ * This phase authenticates the credentials the user has sent with
+ * the request (ie the username and password are checked). This is done
+ * by making an attempt to bind to the LDAP server using this user's
+ * DN and the supplied password.
+ *
+ */
+int mod_auth_ldap_check_user_id(request_rec *r)
+{
+ int failures = 0;
+ const char **vals = NULL;
+ char filtbuf[FILTER_LENGTH];
+ mod_auth_ldap_config_t *sec =
+ (mod_auth_ldap_config_t *)ap_get_module_config(r->per_dir_config, &auth_ldap_module);
+
+ util_ldap_connection_t *ldc = NULL;
+ const char *sent_pw;
+ int result = 0;
+ const char *dn = NULL;
+
+ mod_auth_ldap_request_t *req =
+ (mod_auth_ldap_request_t *)apr_pcalloc(r->pool, sizeof(mod_auth_ldap_request_t));
+ ap_set_module_config(r->request_config, &auth_ldap_module, req);
+
+ if (!sec->enabled) {
+ return DECLINED;
+ }
+
+ /*
+ * Basic sanity checks before any LDAP operations even happen.
+ */
+ if (!sec->have_ldap_url) {
+ return DECLINED;
+ }
+
+start_over:
+
+ /* There is a good AuthLDAPURL, right? */
+ if (sec->host) {
+ ldc = util_ldap_connection_find(r, sec->host, sec->port,
+ sec->binddn, sec->bindpw, sec->deref,
+ sec->secure);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authenticate: no sec->host - weird...?", getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authenticate: using URL %s", getpid(), sec->url);
+
+ /* Get the password that the client sent */
+ if ((result = ap_get_basic_auth_pw(r, &sent_pw))) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authenticate: "
+ "ap_get_basic_auth_pw() returns %d", getpid(), result);
+ util_ldap_connection_close(ldc);
+ return result;
+ }
+
+ if (r->user == NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authenticate: no user specified", getpid());
+ util_ldap_connection_close(ldc);
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+
+ /* build the username filter */
+ mod_auth_ldap_build_filter(filtbuf, r, sec);
+
+ /* do the user search */
+ result = util_ldap_cache_checkuserid(r, ldc, sec->url, sec->basedn, sec->scope,
+ sec->attributes, filtbuf, sent_pw, &dn, &vals);
+ util_ldap_connection_close(ldc);
+
+ /* sanity check - if server is down, retry it up to 5 times */
+ if (result == LDAP_SERVER_DOWN) {
+ if (failures++ <= 5) {
+ goto start_over;
+ }
+ }
+
+ /* handle bind failure */
+ if (result != LDAP_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authenticate: "
+ "user %s authentication failed; URI %s [%s][%s]",
+ getpid(), r->user, r->uri, ldc->reason, ldap_err2string(result));
+ if ((LDAP_INVALID_CREDENTIALS == result) || sec->auth_authoritative) {
+ ap_note_basic_auth_failure(r);
+ return HTTP_UNAUTHORIZED;
+ }
+ else {
+ return DECLINED;
+ }
+ }
+
+ /* mark the user and DN */
+ req->dn = apr_pstrdup(r->pool, dn);
+ req->user = r->user;
+ if (sec->user_is_dn) {
+ r->user = req->dn;
+ }
+
+ /* add environment variables */
+ if (sec->attributes && vals) {
+ apr_table_t *e = r->subprocess_env;
+ int i = 0;
+ while (sec->attributes[i]) {
+ char *str = apr_pstrcat(r->pool, "AUTHENTICATE_", sec->attributes[i], NULL);
+ int j = 13;
+ while (str[j]) {
+ if (str[j] >= 'a' && str[j] <= 'z') {
+ str[j] = str[j] - ('a' - 'A');
+ }
+ j++;
+ }
+ apr_table_setn(e, str, vals[i]);
+ i++;
+ }
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authenticate: accepting %s", getpid(), r->user);
+
+ return OK;
+}
+
+
+/*
+ * Authorisation Phase
+ * -------------------
+ *
+ * After checking whether the username and password are correct, we need
+ * to check whether that user is authorised to view this resource. The
+ * require directive is used to do this:
+ *
+ * require valid-user Any authenticated is allowed in.
+ * require user <username> This particular user is allowed in.
+ * require group <groupname> The user must be a member of this group
+ * in order to be allowed in.
+ * require dn <dn> The user must have the following DN in the
+ * LDAP tree to be let in.
+ *
+ */
+int mod_auth_ldap_auth_checker(request_rec *r)
+{
+ int result = 0;
+ mod_auth_ldap_request_t *req =
+ (mod_auth_ldap_request_t *)ap_get_module_config(r->request_config,
+ &auth_ldap_module);
+ mod_auth_ldap_config_t *sec =
+ (mod_auth_ldap_config_t *)ap_get_module_config(r->per_dir_config,
+ &auth_ldap_module);
+
+ util_ldap_connection_t *ldc = NULL;
+ int m = r->method_number;
+
+ const apr_array_header_t *reqs_arr = ap_requires(r);
+ require_line *reqs = reqs_arr ? (require_line *)reqs_arr->elts : NULL;
+
+ register int x;
+ const char *t;
+ char *w, *value;
+ int method_restricted = 0;
+
+ if (!sec->enabled) {
+ return DECLINED;
+ }
+
+ if (!sec->have_ldap_url) {
+ return DECLINED;
+ }
+
+ /*
+ * It is possible that we've skipped mod_auth_ldap's
+ * check_user_id hook, but still get here. In that
+ * case, the req request_config struct hasn't been initialized
+ * causing problems when we try to use req->dn and/or req->name
+ * below. So we simply create one.
+ *
+ * Unlike 2.2, we don't try to search or populate it.
+ */
+ if (!req) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "no req struct - skipped mod_auth_ldap_check_user_id?",
+ getpid());
+
+ req = (mod_auth_ldap_request_t *)apr_pcalloc(r->pool,
+ sizeof(mod_auth_ldap_request_t));
+ ap_set_module_config(r->request_config, &auth_ldap_module, req);
+ }
+
+ if (sec->host) {
+ ldc = util_ldap_connection_find(r, sec->host, sec->port,
+ sec->binddn, sec->bindpw, sec->deref,
+ sec->secure);
+ apr_pool_cleanup_register(r->pool, ldc,
+ mod_auth_ldap_cleanup_connection_close,
+ apr_pool_cleanup_null);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: no sec->host - weird...?", getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+
+ /*
+ * If there are no elements in the group attribute array, the default should be
+ * member and uniquemember; populate the array now.
+ */
+ if (sec->groupattr->nelts == 0) {
+ struct mod_auth_ldap_groupattr_entry_t *grp;
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(sec->lock);
+#endif
+ grp = apr_array_push(sec->groupattr);
+ grp->name = "member";
+ grp = apr_array_push(sec->groupattr);
+ grp->name = "uniquemember";
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(sec->lock);
+#endif
+ }
+
+ if (!reqs_arr) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: no requirements array", getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+
+ /* Loop through the requirements array until there's no elements
+ * left, or something causes a return from inside the loop */
+ for(x=0; x < reqs_arr->nelts; x++) {
+ if (! (reqs[x].method_mask & (1 << m))) {
+ continue;
+ }
+ method_restricted = 1;
+
+ t = reqs[x].requirement;
+ w = ap_getword_white(r->pool, &t);
+
+ if (strcmp(w, "valid-user") == 0) {
+ /*
+ * Valid user will always be true if we authenticated with ldap,
+ * but when using front page, valid user should only be true if
+ * he exists in the frontpage password file. This hack will get
+ * auth_ldap to look up the user in the the pw file to really be
+ * sure that he's valid. Naturally, it requires mod_auth to be
+ * compiled in, but if mod_auth wasn't in there, then the need
+ * for this hack wouldn't exist anyway.
+ */
+ if (sec->frontpage_hack) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "deferring authorisation to mod_auth (FP Hack)",
+ getpid());
+ return OK;
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "successful authorisation because user "
+ "is valid-user", getpid());
+ return OK;
+ }
+ }
+ else if (strcmp(w, "user") == 0) {
+ if (req->dn == NULL || strlen(req->dn) == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require user: user's DN has not been defined; failing authorisation",
+ getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+ /*
+ * First do a whole-line compare, in case it's something like
+ * require user Babs Jensen
+ */
+ result = util_ldap_cache_compare(r, ldc, sec->url, req->dn, sec->attribute, t);
+ switch(result) {
+ case LDAP_COMPARE_TRUE: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require user: authorisation successful", getpid());
+ return OK;
+ }
+ default: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: require user: "
+ "authorisation failed [%s][%s]", getpid(),
+ ldc->reason, ldap_err2string(result));
+ }
+ }
+ /*
+ * Now break apart the line and compare each word on it
+ */
+ while (t[0]) {
+ w = ap_getword_conf(r->pool, &t);
+ result = util_ldap_cache_compare(r, ldc, sec->url, req->dn, sec->attribute, w);
+ switch(result) {
+ case LDAP_COMPARE_TRUE: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require user: authorisation successful", getpid());
+ return OK;
+ }
+ default: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require user: authorisation failed [%s][%s]",
+ getpid(), ldc->reason, ldap_err2string(result));
+ }
+ }
+ }
+ }
+ else if (strcmp(w, "dn") == 0) {
+ if (req->dn == NULL || strlen(req->dn) == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require dn: user's DN has not been defined; failing authorisation",
+ getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+
+ result = util_ldap_cache_comparedn(r, ldc, sec->url, req->dn, t, sec->compare_dn_on_server);
+ switch(result) {
+ case LDAP_COMPARE_TRUE: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require dn: authorisation successful", getpid());
+ return OK;
+ }
+ default: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require dn \"%s\": LDAP error [%s][%s]",
+ getpid(), t, ldc->reason, ldap_err2string(result));
+ }
+ }
+ }
+ else if (strcmp(w, "group") == 0) {
+ struct mod_auth_ldap_groupattr_entry_t *ent = (struct mod_auth_ldap_groupattr_entry_t *) sec->groupattr->elts;
+ int i;
+
+ if (sec->group_attrib_is_dn) {
+ if (req->dn == NULL || strlen(req->dn) == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: require group: user's DN has not been defined; failing authorisation",
+ getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+ }
+ else {
+ if (req->user == NULL || strlen(req->user) == 0) {
+ /* We weren't called in the authentication phase, so we didn't have a
+ * chance to set the user field. Do so now. */
+ req->user = r->user;
+ }
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: require group: testing for group membership in \"%s\"",
+ getpid(), t);
+
+ for (i = 0; i < sec->groupattr->nelts; i++) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: require group: testing for %s: %s (%s)", getpid(),
+ ent[i].name, sec->group_attrib_is_dn ? req->dn : req->user, t);
+
+ result = util_ldap_cache_compare(r, ldc, sec->url, t, ent[i].name,
+ sec->group_attrib_is_dn ? req->dn : req->user);
+ switch(result) {
+ case LDAP_COMPARE_TRUE: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: require group: "
+ "authorisation successful (attribute %s) [%s][%s]",
+ getpid(), ent[i].name, ldc->reason, ldap_err2string(result));
+ return OK;
+ }
+ default: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: require group \"%s\": "
+ "authorisation failed [%s][%s]",
+ getpid(), t, ldc->reason, ldap_err2string(result));
+ }
+ }
+ }
+ }
+ else if (strcmp(w, "ldap-attribute") == 0) {
+ if (req->dn == NULL || strlen(req->dn) == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require ldap-attribute: user's DN has not been defined; failing authorisation",
+ getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+ while (t[0]) {
+ w = ap_getword(r->pool, &t, '=');
+ value = ap_getword_conf(r->pool, &t);
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: checking attribute"
+ " %s has value %s", getpid(), w, value);
+ result = util_ldap_cache_compare(r, ldc, sec->url, req->dn,
+ w, value);
+ switch(result) {
+ case LDAP_COMPARE_TRUE: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO,
+ 0, r, "[%d] auth_ldap authorise: "
+ "require attribute: authorisation "
+ "successful", getpid());
+ return OK;
+ }
+ default: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO,
+ 0, r, "[%d] auth_ldap authorise: "
+ "require attribute: authorisation "
+ "failed [%s][%s]", getpid(),
+ ldc->reason, ldap_err2string(result));
+ }
+ }
+ }
+ }
+ }
+
+ if (!method_restricted) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: agreeing because non-restricted",
+ getpid());
+ return OK;
+ }
+
+ if (!sec->auth_authoritative) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: declining to authorise", getpid());
+ return DECLINED;
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: authorisation denied", getpid());
+ ap_note_basic_auth_failure (r);
+
+ return HTTP_UNAUTHORIZED;
+}
+
+
+/* ---------------------------------------- */
+/* config directives */
+
+
+void *mod_auth_ldap_create_dir_config(apr_pool_t *p, char *d)
+{
+ mod_auth_ldap_config_t *sec =
+ (mod_auth_ldap_config_t *)apr_pcalloc(p, sizeof(mod_auth_ldap_config_t));
+
+ sec->pool = p;
+#if APR_HAS_THREADS
+ apr_thread_mutex_create(&sec->lock, APR_THREAD_MUTEX_DEFAULT, p);
+#endif
+ sec->auth_authoritative = 1;
+ sec->enabled = 1;
+ sec->groupattr = apr_array_make(p, GROUPATTR_MAX_ELTS,
+ sizeof(struct mod_auth_ldap_groupattr_entry_t));
+
+ sec->have_ldap_url = 0;
+ sec->url = "";
+ sec->host = NULL;
+ sec->binddn = NULL;
+ sec->bindpw = NULL;
+ sec->deref = always;
+ sec->group_attrib_is_dn = 1;
+
+ sec->frontpage_hack = 0;
+ sec->secure = 0;
+
+ sec->user_is_dn = 0;
+ sec->compare_dn_on_server = 0;
+
+ return sec;
+}
+
+/*
+ * Use the ldap url parsing routines to break up the ldap url into
+ * host and port.
+ */
+static const char *mod_auth_ldap_parse_url(cmd_parms *cmd,
+ void *config,
+ const char *url)
+{
+ int result;
+ apr_ldap_url_desc_t *urld;
+
+ mod_auth_ldap_config_t *sec = config;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: `%s'",
+ getpid(), url);
+
+ result = apr_ldap_url_parse(url, &(urld));
+ if (result != LDAP_SUCCESS) {
+ switch (result) {
+ case LDAP_URL_ERR_NOTLDAP:
+ return "LDAP URL does not begin with ldap://";
+ case LDAP_URL_ERR_NODN:
+ return "LDAP URL does not have a DN";
+ case LDAP_URL_ERR_BADSCOPE:
+ return "LDAP URL has an invalid scope";
+ case LDAP_URL_ERR_MEM:
+ return "Out of memory parsing LDAP URL";
+ default:
+ return "Could not parse LDAP URL";
+ }
+ }
+ sec->url = apr_pstrdup(cmd->pool, url);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: Host: %s", getpid(), urld->lud_host);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: Port: %d", getpid(), urld->lud_port);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: DN: %s", getpid(), urld->lud_dn);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: attrib: %s", getpid(), urld->lud_attrs? urld->lud_attrs[0] : "(null)");
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: scope: %s", getpid(),
+ (urld->lud_scope == LDAP_SCOPE_SUBTREE? "subtree" :
+ urld->lud_scope == LDAP_SCOPE_BASE? "base" :
+ urld->lud_scope == LDAP_SCOPE_ONELEVEL? "onelevel" : "unknown"));
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: filter: %s", getpid(), urld->lud_filter);
+
+ /* Set all the values, or at least some sane defaults */
+ if (sec->host) {
+ char *p = apr_palloc(cmd->pool, strlen(sec->host) + strlen(urld->lud_host) + 2);
+ strcpy(p, urld->lud_host);
+ strcat(p, " ");
+ strcat(p, sec->host);
+ sec->host = p;
+ }
+ else {
+ sec->host = urld->lud_host? apr_pstrdup(cmd->pool, urld->lud_host) : "localhost";
+ }
+ sec->basedn = urld->lud_dn? apr_pstrdup(cmd->pool, urld->lud_dn) : "";
+ if (urld->lud_attrs && urld->lud_attrs[0]) {
+ int i = 1;
+ while (urld->lud_attrs[i]) {
+ i++;
+ }
+ sec->attributes = apr_pcalloc(cmd->pool, sizeof(char *) * (i+1));
+ i = 0;
+ while (urld->lud_attrs[i]) {
+ sec->attributes[i] = apr_pstrdup(cmd->pool, urld->lud_attrs[i]);
+ i++;
+ }
+ sec->attribute = sec->attributes[0];
+ }
+ else {
+ sec->attribute = "uid";
+ }
+
+ sec->scope = urld->lud_scope == LDAP_SCOPE_ONELEVEL ?
+ LDAP_SCOPE_ONELEVEL : LDAP_SCOPE_SUBTREE;
+
+ if (urld->lud_filter) {
+ if (urld->lud_filter[0] == '(') {
+ /*
+ * Get rid of the surrounding parens; later on when generating the
+ * filter, they'll be put back.
+ */
+ sec->filter = apr_pstrdup(cmd->pool, urld->lud_filter+1);
+ sec->filter[strlen(sec->filter)-1] = '\0';
+ }
+ else {
+ sec->filter = apr_pstrdup(cmd->pool, urld->lud_filter);
+ }
+ }
+ else {
+ sec->filter = "objectclass=*";
+ }
+
+ /* "ldaps" indicates secure ldap connections desired
+ */
+ if (strncasecmp(url, "ldaps", 5) == 0)
+ {
+ sec->secure = 1;
+ sec->port = urld->lud_port? urld->lud_port : LDAPS_PORT;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "LDAP: auth_ldap using SSL connections");
+ }
+ else
+ {
+ sec->secure = 0;
+ sec->port = urld->lud_port? urld->lud_port : LDAP_PORT;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server,
+ "LDAP: auth_ldap not using SSL connections");
+ }
+
+ sec->have_ldap_url = 1;
+ apr_ldap_free_urldesc(urld);
+ return NULL;
+}
+
+static const char *mod_auth_ldap_set_deref(cmd_parms *cmd, void *config, const char *arg)
+{
+ mod_auth_ldap_config_t *sec = config;
+
+ if (strcmp(arg, "never") == 0 || strcasecmp(arg, "off") == 0) {
+ sec->deref = never;
+ }
+ else if (strcmp(arg, "searching") == 0) {
+ sec->deref = searching;
+ }
+ else if (strcmp(arg, "finding") == 0) {
+ sec->deref = finding;
+ }
+ else if (strcmp(arg, "always") == 0 || strcasecmp(arg, "on") == 0) {
+ sec->deref = always;
+ }
+ else {
+ return "Unrecognized value for AuthLDAPAliasDereference directive";
+ }
+ return NULL;
+}
+
+static const char *mod_auth_ldap_add_group_attribute(cmd_parms *cmd, void *config, const char *arg)
+{
+ struct mod_auth_ldap_groupattr_entry_t *new;
+
+ mod_auth_ldap_config_t *sec = config;
+
+ if (sec->groupattr->nelts > GROUPATTR_MAX_ELTS)
+ return "Too many AuthLDAPGroupAttribute directives";
+
+ new = apr_array_push(sec->groupattr);
+ new->name = apr_pstrdup(cmd->pool, arg);
+
+ return NULL;
+}
+
+static const char *set_charset_config(cmd_parms *cmd, void *config, const char *arg)
+{
+ ap_set_module_config(cmd->server->module_config, &auth_ldap_module,
+ (void *)arg);
+ return NULL;
+}
+
+
+command_rec mod_auth_ldap_cmds[] = {
+ AP_INIT_TAKE1("AuthLDAPURL", mod_auth_ldap_parse_url, NULL, OR_AUTHCFG,
+ "URL to define LDAP connection. This should be an RFC 2255 complaint\n"
+ "URL of the form ldap://host[:port]/basedn[?attrib[?scope[?filter]]].\n"
+ "<ul>\n"
+ "<li>Host is the name of the LDAP server. Use a space separated list of hosts \n"
+ "to specify redundant servers.\n"
+ "<li>Port is optional, and specifies the port to connect to.\n"
+ "<li>basedn specifies the base DN to start searches from\n"
+ "<li>Attrib specifies what attribute to search for in the directory. If not "
+ "provided, it defaults to <b>uid</b>.\n"
+ "<li>Scope is the scope of the search, and can be either <b>sub</b> or "
+ "<b>one</b>. If not provided, the default is <b>sub</b>.\n"
+ "<li>Filter is a filter to use in the search. If not provided, "
+ "defaults to <b>(objectClass=*)</b>.\n"
+ "</ul>\n"
+ "Searches are performed using the attribute and the filter combined. "
+ "For example, assume that the\n"
+ "LDAP URL is <b>ldap://ldap.airius.com/ou=People, o=Airius?uid?sub?(posixid=*)</b>. "
+ "Searches will\n"
+ "be done using the filter <b>(&((posixid=*))(uid=<i>username</i>))</b>, "
+ "where <i>username</i>\n"
+ "is the user name passed by the HTTP client. The search will be a subtree "
+ "search on the branch <b>ou=People, o=Airius</b>."),
+
+ AP_INIT_TAKE1("AuthLDAPBindDN", ap_set_string_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, binddn), OR_AUTHCFG,
+ "DN to use to bind to LDAP server. If not provided, will do an anonymous bind."),
+
+ AP_INIT_TAKE1("AuthLDAPBindPassword", ap_set_string_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, bindpw), OR_AUTHCFG,
+ "Password to use to bind to LDAP server. If not provided, will do an anonymous bind."),
+
+ AP_INIT_FLAG("AuthLDAPRemoteUserIsDN", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, user_is_dn), OR_AUTHCFG,
+ "Set to 'on' to set the REMOTE_USER environment variable to be the full "
+ "DN of the remote user. By default, this is set to off, meaning that "
+ "the REMOTE_USER variable will contain whatever value the remote user sent."),
+
+ AP_INIT_FLAG("AuthLDAPAuthoritative", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, auth_authoritative), OR_AUTHCFG,
+ "Set to 'off' to allow access control to be passed along to lower modules if "
+ "the UserID and/or group is not known to this module"),
+
+ AP_INIT_FLAG("AuthLDAPCompareDNOnServer", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, compare_dn_on_server), OR_AUTHCFG,
+ "Set to 'on' to force auth_ldap to do DN compares (for the \"require dn\" "
+ "directive) using the server, and set it 'off' to do the compares locally "
+ "(at the expense of possible false matches). See the documentation for "
+ "a complete description of this option."),
+
+ AP_INIT_ITERATE("AuthLDAPGroupAttribute", mod_auth_ldap_add_group_attribute, NULL, OR_AUTHCFG,
+ "A list of attributes used to define group membership - defaults to "
+ "member and uniquemember"),
+
+ AP_INIT_FLAG("AuthLDAPGroupAttributeIsDN", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, group_attrib_is_dn), OR_AUTHCFG,
+ "If set to 'on', auth_ldap uses the DN that is retrieved from the server for"
+ "subsequent group comparisons. If set to 'off', auth_ldap uses the string"
+ "provided by the client directly. Defaults to 'on'."),
+
+ AP_INIT_TAKE1("AuthLDAPDereferenceAliases", mod_auth_ldap_set_deref, NULL, OR_AUTHCFG,
+ "Determines how aliases are handled during a search. Can bo one of the"
+ "values \"never\", \"searching\", \"finding\", or \"always\". "
+ "Defaults to always."),
+
+ AP_INIT_FLAG("AuthLDAPEnabled", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, enabled), OR_AUTHCFG,
+ "Set to off to disable auth_ldap, even if it's been enabled in a higher tree"),
+
+ AP_INIT_FLAG("AuthLDAPFrontPageHack", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, frontpage_hack), OR_AUTHCFG,
+ "Set to 'on' to support Microsoft FrontPage"),
+
+ AP_INIT_TAKE1("AuthLDAPCharsetConfig", set_charset_config, NULL, RSRC_CONF,
+ "Character set conversion configuration file. If omitted, character set"
+ "conversion is disabled."),
+
+ {NULL}
+};
+
+static int auth_ldap_post_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s)
+{
+ ap_configfile_t *f;
+ char l[MAX_STRING_LEN];
+ const char *charset_confname = ap_get_module_config(s->module_config,
+ &auth_ldap_module);
+ apr_status_t status;
+
+ /*
+ mod_auth_ldap_config_t *sec = (mod_auth_ldap_config_t *)
+ ap_get_module_config(s->module_config,
+ &auth_ldap_module);
+
+ if (sec->secure)
+ {
+ if (!util_ldap_ssl_supported(s))
+ {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "LDAP: SSL connections (ldaps://) not supported by utilLDAP");
+ return(!OK);
+ }
+ }
+ */
+
+ /* make sure that mod_ldap (util_ldap) is loaded */
+ if (ap_find_linked_module("util_ldap.c") == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR|APLOG_NOERRNO, 0, s,
+ "Module mod_ldap missing. Mod_ldap (aka. util_ldap) "
+ "must be loaded in order for mod_auth_ldap to function properly");
+ return HTTP_INTERNAL_SERVER_ERROR;
+
+ }
+
+ if (!charset_confname) {
+ return OK;
+ }
+
+ charset_confname = ap_server_root_relative(p, charset_confname);
+ if (!charset_confname) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, APR_EBADPATH, s,
+ "Invalid charset conversion config path %s",
+ (const char *)ap_get_module_config(s->module_config,
+ &auth_ldap_module));
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ if ((status = ap_pcfg_openfile(&f, ptemp, charset_confname))
+ != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, s,
+ "could not open charset conversion config file %s.",
+ charset_confname);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ charset_conversions = apr_hash_make(p);
+
+ while (!(ap_cfg_getline(l, MAX_STRING_LEN, f))) {
+ const char *ll = l;
+ char *lang;
+
+ if (l[0] == '#') {
+ continue;
+ }
+ lang = ap_getword_conf(p, &ll);
+ ap_str_tolower(lang);
+
+ if (ll[0]) {
+ char *charset = ap_getword_conf(p, &ll);
+ apr_hash_set(charset_conversions, lang, APR_HASH_KEY_STRING, charset);
+ }
+ }
+ ap_cfg_closefile(f);
+
+ to_charset = derive_codepage_from_lang (p, "utf-8");
+ if (to_charset == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, s,
+ "could not find the UTF-8 charset in the file %s.",
+ charset_confname);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ return OK;
+}
+
+static void mod_auth_ldap_register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(auth_ldap_post_config,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_check_user_id(mod_auth_ldap_check_user_id, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_auth_checker(mod_auth_ldap_auth_checker, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module auth_ldap_module = {
+ STANDARD20_MODULE_STUFF,
+ mod_auth_ldap_create_dir_config, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ mod_auth_ldap_cmds, /* command table */
+ mod_auth_ldap_register_hooks, /* set up request processing hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.def b/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.def
new file mode 100644
index 00000000..599636fb
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.def
@@ -0,0 +1,6 @@
+IMPORT util_ldap_connection_find
+IMPORT util_ldap_connection_close
+IMPORT util_ldap_cache_checkuserid
+IMPORT util_ldap_cache_compare
+IMPORT util_ldap_cache_comparedn
+EXPORT auth_ldap_module
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.dsp
new file mode 100644
index 00000000..f26a31a0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_auth_ldap" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_auth_ldap - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_auth_ldap.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_auth_ldap.mak" CFG="mod_auth_ldap - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_auth_ldap - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_auth_ldap - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_auth_ldap - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_auth_ldap_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_auth_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_ldap.so
+# ADD LINK32 kernel32.lib wldap32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_auth_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_ldap.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_auth_ldap - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_auth_ldap_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_auth_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_ldap.so
+# ADD LINK32 kernel32.lib wldap32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_auth_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_ldap.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_auth_ldap - Win32 Release"
+# Name "mod_auth_ldap - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_auth_ldap.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_auth_ldap.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_auth_ldap - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_auth_ldap.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_auth_ldap.so "auth_ldap_module for Apache" ../../include/ap_release.h > .\mod_auth_ldap.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_auth_ldap - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_auth_ldap.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_auth_ldap.so "auth_ldap_module for Apache" ../../include/ap_release.h > .\mod_auth_ldap.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.c
new file mode 100644
index 00000000..a208a510
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.c
@@ -0,0 +1,1006 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define CORE_PRIVATE
+
+#include "mod_cache.h"
+
+module AP_MODULE_DECLARE_DATA cache_module;
+APR_OPTIONAL_FN_TYPE(ap_cache_generate_key) *cache_generate_key;
+
+/* -------------------------------------------------------------- */
+
+
+/* Handles for cache filters, resolved at startup to eliminate
+ * a name-to-function mapping on each request
+ */
+static ap_filter_rec_t *cache_save_filter_handle;
+static ap_filter_rec_t *cache_out_filter_handle;
+
+/*
+ * CACHE handler
+ * -------------
+ *
+ * Can we deliver this request from the cache?
+ * If yes:
+ * deliver the content by installing the CACHE_OUT filter.
+ * If no:
+ * check whether we're allowed to try cache it
+ * If yes:
+ * add CACHE_SAVE filter
+ * If No:
+ * oh well.
+ */
+
+static int cache_url_handler(request_rec *r, int lookup)
+{
+ apr_status_t rv;
+ const char *pragma, *auth;
+ apr_uri_t uri;
+ char *url;
+ char *path;
+ cache_provider_list *providers;
+ cache_info *info;
+ cache_request_rec *cache;
+ cache_server_conf *conf;
+ apr_bucket_brigade *out;
+
+ /* Delay initialization until we know we are handling a GET */
+ if (r->method_number != M_GET) {
+ return DECLINED;
+ }
+
+ uri = r->parsed_uri;
+ url = r->unparsed_uri;
+ path = uri.path;
+ info = NULL;
+
+ conf = (cache_server_conf *) ap_get_module_config(r->server->module_config,
+ &cache_module);
+
+ /*
+ * Which cache module (if any) should handle this request?
+ */
+ if (!(providers = ap_cache_get_providers(r, conf, path))) {
+ return DECLINED;
+ }
+
+ /* make space for the per request config */
+ cache = (cache_request_rec *) ap_get_module_config(r->request_config,
+ &cache_module);
+ if (!cache) {
+ cache = apr_pcalloc(r->pool, sizeof(cache_request_rec));
+ ap_set_module_config(r->request_config, &cache_module, cache);
+ }
+
+ /* save away the possible providers */
+ cache->providers = providers;
+
+ /*
+ * Are we allowed to serve cached info at all?
+ */
+
+ /* find certain cache controlling headers */
+ pragma = apr_table_get(r->headers_in, "Pragma");
+ auth = apr_table_get(r->headers_in, "Authorization");
+
+ /* first things first - does the request allow us to return
+ * cached information at all? If not, just decline the request.
+ *
+ * Note that there is a big difference between not being allowed
+ * to cache a request (no-store) and not being allowed to return
+ * a cached request without revalidation (max-age=0).
+ *
+ * Caching is forbidden under the following circumstances:
+ *
+ * - RFC2616 14.9.2 Cache-Control: no-store
+ * - Pragma: no-cache
+ * - Any requests requiring authorization.
+ */
+ if (conf->ignorecachecontrol == 1 && auth == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "incoming request is asking for a uncached version of "
+ "%s, but we know better and are ignoring it", url);
+ }
+ else {
+ if (ap_cache_liststr(NULL, pragma, "no-cache", NULL) ||
+ auth != NULL) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache: no-cache or authorization forbids caching "
+ "of %s", url);
+ return DECLINED;
+ }
+ }
+
+ /*
+ * Try to serve this request from the cache.
+ *
+ * If no existing cache file (DECLINED)
+ * add cache_save filter
+ * If cached file (OK)
+ * clear filter stack
+ * add cache_out filter
+ * return OK
+ */
+ rv = cache_select_url(r, url);
+ if (rv != OK) {
+ if (rv == DECLINED) {
+ if (!lookup) {
+ /* add cache_save filter to cache this request */
+ ap_add_output_filter_handle(cache_save_filter_handle, NULL, r,
+ r->connection);
+ }
+ }
+ else {
+ /* error */
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "cache: error returned while checking for cached "
+ "file by %s cache", cache->provider_name);
+ }
+ return DECLINED;
+ }
+
+ /* We have located a suitable cache file now. */
+ info = &(cache->handle->cache_obj->info);
+
+ if (info && info->lastmod) {
+ ap_update_mtime(r, info->lastmod);
+ }
+
+ rv = ap_meets_conditions(r);
+ if (rv != OK) {
+ /* Return cached status. */
+ return rv;
+ }
+
+ /* If we're a lookup, we can exit now instead of serving the content. */
+ if (lookup) {
+ return OK;
+ }
+
+ /* Serve up the content */
+
+ /* We are in the quick handler hook, which means that no output
+ * filters have been set. So lets run the insert_filter hook.
+ */
+ ap_run_insert_filter(r);
+ ap_add_output_filter_handle(cache_out_filter_handle, NULL,
+ r, r->connection);
+
+ /* kick off the filter stack */
+ out = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ rv = ap_pass_brigade(r->output_filters, out);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "cache: error returned while trying to return %s "
+ "cached data",
+ cache->provider_name);
+ return rv;
+ }
+
+ return OK;
+}
+
+/*
+ * CACHE_OUT filter
+ * ----------------
+ *
+ * Deliver cached content (headers and body) up the stack.
+ */
+static int cache_out_filter(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ request_rec *r = f->r;
+ cache_request_rec *cache;
+
+ cache = (cache_request_rec *) ap_get_module_config(r->request_config,
+ &cache_module);
+
+ if (!cache) {
+ /* user likely configured CACHE_OUT manually; they should use mod_cache
+ * configuration to do that */
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "CACHE_OUT enabled unexpectedly");
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
+ "cache: running CACHE_OUT filter");
+
+ /* restore status of cached response */
+ r->status = cache->handle->status;
+
+ /* recall_headers() was called in cache_select_url() */
+ cache->provider->recall_body(cache->handle, r->pool, bb);
+
+ /* This filter is done once it has served up its content */
+ ap_remove_output_filter(f);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
+ "cache: serving %s", r->uri);
+ return ap_pass_brigade(f->next, bb);
+}
+
+
+/*
+ * CACHE_SAVE filter
+ * ---------------
+ *
+ * Decide whether or not this content should be cached.
+ * If we decide no it should not:
+ * remove the filter from the chain
+ * If we decide yes it should:
+ * Have we already started saving the response?
+ * If we have started, pass the data to the storage manager via store_body
+ * Otherwise:
+ * Check to see if we *can* save this particular response.
+ * If we can, call cache_create_entity() and save the headers and body
+ * Finally, pass the data to the next filter (the network or whatever)
+ */
+
+static int cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in)
+{
+ int rv;
+ int date_in_errhdr = 0;
+ request_rec *r = f->r;
+ cache_request_rec *cache;
+ cache_server_conf *conf;
+ char *url = r->unparsed_uri;
+ const char *cc_in, *cc_out, *cl, *vary_out;
+ const char *exps, *lastmods, *dates, *etag;
+ apr_time_t exp, date, lastmod, now;
+ apr_off_t size;
+ cache_info *info;
+ char *reason;
+ apr_pool_t *p;
+
+ /* check first whether running this filter has any point or not */
+ /* If the user has Cache-Control: no-store from RFC 2616, don't store! */
+ cc_in = apr_table_get(r->headers_in, "Cache-Control");
+ vary_out = apr_table_get(r->headers_out, "Vary");
+ if (r->no_cache || ap_cache_liststr(NULL, cc_in, "no-store", NULL) ||
+ ap_cache_liststr(NULL, vary_out, "*", NULL)) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, in);
+ }
+
+ /* Setup cache_request_rec */
+ cache = (cache_request_rec *) ap_get_module_config(r->request_config,
+ &cache_module);
+ if (!cache) {
+ /* user likely configured CACHE_SAVE manually; they should really use
+ * mod_cache configuration to do that
+ */
+ cache = apr_pcalloc(r->pool, sizeof(cache_request_rec));
+ ap_set_module_config(r->request_config, &cache_module, cache);
+ }
+
+ reason = NULL;
+ p = r->pool;
+ /*
+ * Pass Data to Cache
+ * ------------------
+ * This section passes the brigades into the cache modules, but only
+ * if the setup section (see below) is complete.
+ */
+ if (cache->block_response) {
+ /* We've already sent down the response and EOS. So, ignore
+ * whatever comes now.
+ */
+ return APR_SUCCESS;
+ }
+
+ /* have we already run the cachability check and set up the
+ * cached file handle?
+ */
+ if (cache->in_checked) {
+ /* pass the brigades into the cache, then pass them
+ * up the filter stack
+ */
+ rv = cache->provider->store_body(cache->handle, r, in);
+ if (rv != APR_SUCCESS) {
+ ap_remove_output_filter(f);
+ }
+ return ap_pass_brigade(f->next, in);
+ }
+
+ /*
+ * Setup Data in Cache
+ * -------------------
+ * This section opens the cache entity and sets various caching
+ * parameters, and decides whether this URL should be cached at
+ * all. This section is* run before the above section.
+ */
+
+ /* read expiry date; if a bad date, then leave it so the client can
+ * read it
+ */
+ exps = apr_table_get(r->err_headers_out, "Expires");
+ if (exps == NULL) {
+ exps = apr_table_get(r->headers_out, "Expires");
+ }
+ if (exps != NULL) {
+ if (APR_DATE_BAD == (exp = apr_date_parse_http(exps))) {
+ exps = NULL;
+ }
+ }
+ else {
+ exp = APR_DATE_BAD;
+ }
+
+ /* read the last-modified date; if the date is bad, then delete it */
+ lastmods = apr_table_get(r->err_headers_out, "Last-Modified");
+ if (lastmods == NULL) {
+ lastmods = apr_table_get(r->headers_out, "Last-Modified");
+ }
+ if (lastmods != NULL) {
+ if (APR_DATE_BAD == (lastmod = apr_date_parse_http(lastmods))) {
+ lastmods = NULL;
+ }
+ }
+ else {
+ lastmod = APR_DATE_BAD;
+ }
+
+ conf = (cache_server_conf *) ap_get_module_config(r->server->module_config, &cache_module);
+ /* read the etag and cache-control from the entity */
+ etag = apr_table_get(r->err_headers_out, "Etag");
+ if (etag == NULL) {
+ etag = apr_table_get(r->headers_out, "Etag");
+ }
+ cc_out = apr_table_get(r->err_headers_out, "Cache-Control");
+ if (cc_out == NULL) {
+ cc_out = apr_table_get(r->headers_out, "Cache-Control");
+ }
+
+ /*
+ * what responses should we not cache?
+ *
+ * At this point we decide based on the response headers whether it
+ * is appropriate _NOT_ to cache the data from the server. There are
+ * a whole lot of conditions that prevent us from caching this data.
+ * They are tested here one by one to be clear and unambiguous.
+ */
+ if (r->status != HTTP_OK && r->status != HTTP_NON_AUTHORITATIVE
+ && r->status != HTTP_MULTIPLE_CHOICES
+ && r->status != HTTP_MOVED_PERMANENTLY
+ && r->status != HTTP_NOT_MODIFIED) {
+ /* RFC2616 13.4 we are allowed to cache 200, 203, 206, 300, 301 or 410
+ * We don't cache 206, because we don't (yet) cache partial responses.
+ * We include 304 Not Modified here too as this is the origin server
+ * telling us to serve the cached copy.
+ */
+ reason = apr_psprintf(p, "Response status %d", r->status);
+ }
+ else if (exps != NULL && exp == APR_DATE_BAD) {
+ /* if a broken Expires header is present, don't cache it */
+ reason = apr_pstrcat(p, "Broken expires header: ", exps, NULL);
+ }
+ else if (r->args && exps == NULL) {
+ /* if query string present but no expiration time, don't cache it
+ * (RFC 2616/13.9)
+ */
+ reason = "Query string present but no expires header";
+ }
+ else if (r->status == HTTP_NOT_MODIFIED &&
+ !cache->handle && !cache->stale_handle) {
+ /* if the server said 304 Not Modified but we have no cache
+ * file - pass this untouched to the user agent, it's not for us.
+ */
+ reason = "HTTP Status 304 Not Modified";
+ }
+ else if (r->status == HTTP_OK && lastmods == NULL && etag == NULL
+ && (exps == NULL) && (conf->no_last_mod_ignore ==0)) {
+ /* 200 OK response from HTTP/1.0 and up without Last-Modified,
+ * Etag, or Expires headers.
+ */
+ /* Note: mod-include clears last_modified/expires/etags - this
+ * is why we have an optional function for a key-gen ;-)
+ */
+ reason = "No Last-Modified, Etag, or Expires headers";
+ }
+ else if (r->header_only) {
+ /* HEAD requests */
+ reason = "HTTP HEAD request";
+ }
+ else if (ap_cache_liststr(NULL, cc_out, "no-store", NULL)) {
+ /* RFC2616 14.9.2 Cache-Control: no-store response
+ * indicating do not cache, or stop now if you are
+ * trying to cache it */
+ reason = "Cache-Control: no-store present";
+ }
+ else if (ap_cache_liststr(NULL, cc_out, "private", NULL)) {
+ /* RFC2616 14.9.1 Cache-Control: private
+ * this object is marked for this user's eyes only. Behave
+ * as a tunnel.
+ */
+ reason = "Cache-Control: private present";
+ }
+ else if (apr_table_get(r->headers_in, "Authorization") != NULL
+ && !(ap_cache_liststr(NULL, cc_out, "s-maxage", NULL)
+ || ap_cache_liststr(NULL, cc_out, "must-revalidate", NULL)
+ || ap_cache_liststr(NULL, cc_out, "public", NULL))) {
+ /* RFC2616 14.8 Authorisation:
+ * if authorisation is included in the request, we don't cache,
+ * but we can cache if the following exceptions are true:
+ * 1) If Cache-Control: s-maxage is included
+ * 2) If Cache-Control: must-revalidate is included
+ * 3) If Cache-Control: public is included
+ */
+ reason = "Authorization required";
+ }
+ else if (r->no_cache) {
+ /* or we've been asked not to cache it above */
+ reason = "no_cache present";
+ }
+
+ if (reason) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache: %s not cached. Reason: %s", url, reason);
+ /* remove this object from the cache
+ * BillS Asks.. Why do we need to make this call to remove_url?
+ * leave it in for now..
+ */
+ cache_remove_url(r, url);
+
+ /* remove this filter from the chain */
+ ap_remove_output_filter(f);
+
+ /* ship the data up the stack */
+ return ap_pass_brigade(f->next, in);
+ }
+
+ /* Make it so that we don't execute this path again. */
+ cache->in_checked = 1;
+
+ /* Set the content length if known.
+ */
+ cl = apr_table_get(r->err_headers_out, "Content-Length");
+ if (cl == NULL) {
+ cl = apr_table_get(r->headers_out, "Content-Length");
+ }
+ if (cl) {
+#if 0
+ char *errp;
+ if (apr_strtoff(&size, cl, &errp, 10) || *errp || size < 0) {
+ cl = NULL; /* parse error, see next 'if' block */
+ }
+#else
+ size = apr_atoi64(cl);
+ if (size < 0) {
+ cl = NULL;
+ }
+#endif
+ }
+
+ if (!cl) {
+ /* if we don't get the content-length, see if we have all the
+ * buckets and use their length to calculate the size
+ */
+ apr_bucket *e;
+ int all_buckets_here=0;
+ int unresolved_length = 0;
+ size=0;
+ for (e = APR_BRIGADE_FIRST(in);
+ e != APR_BRIGADE_SENTINEL(in);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (APR_BUCKET_IS_EOS(e)) {
+ all_buckets_here=1;
+ break;
+ }
+ if (APR_BUCKET_IS_FLUSH(e)) {
+ unresolved_length = 1;
+ continue;
+ }
+ if (e->length == (apr_size_t)-1) {
+ break;
+ }
+ size += e->length;
+ }
+ if (!all_buckets_here) {
+ size = -1;
+ }
+ }
+
+ /* It's safe to cache the response.
+ *
+ * There are two possiblities at this point:
+ * - cache->handle == NULL. In this case there is no previously
+ * cached entity anywhere on the system. We must create a brand
+ * new entity and store the response in it.
+ * - cache->stale_handle != NULL. In this case there is a stale
+ * entity in the system which needs to be replaced by new
+ * content (unless the result was 304 Not Modified, which means
+ * the cached entity is actually fresh, and we should update
+ * the headers).
+ */
+
+ /* Did we have a stale cache entry that really is stale? */
+ if (cache->stale_handle) {
+ if (r->status == HTTP_NOT_MODIFIED) {
+ /* Oh, hey. It isn't that stale! Yay! */
+ cache->handle = cache->stale_handle;
+ info = &cache->handle->cache_obj->info;
+ }
+ else {
+ /* Oh, well. Toss it. */
+ cache->provider->remove_entity(cache->stale_handle);
+ /* Treat the request as if it wasn't conditional. */
+ cache->stale_handle = NULL;
+ }
+ }
+
+ /* no cache handle, create a new entity */
+ if (!cache->handle) {
+ rv = cache_create_entity(r, url, size);
+ info = apr_pcalloc(r->pool, sizeof(cache_info));
+ /* We only set info->status upon the initial creation. */
+ info->status = r->status;
+ }
+
+ if (rv != OK) {
+ /* Caching layer declined the opportunity to cache the response */
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, in);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache: Caching url: %s", url);
+
+ /*
+ * We now want to update the cache file header information with
+ * the new date, last modified, expire and content length and write
+ * it away to our cache file. First, we determine these values from
+ * the response, using heuristics if appropriate.
+ *
+ * In addition, we make HTTP/1.1 age calculations and write them away
+ * too.
+ */
+
+ /* Read the date. Generate one if one is not supplied */
+ dates = apr_table_get(r->err_headers_out, "Date");
+ if (dates != NULL) {
+ date_in_errhdr = 1;
+ }
+ else {
+ dates = apr_table_get(r->headers_out, "Date");
+ }
+ if (dates != NULL) {
+ info->date = apr_date_parse_http(dates);
+ }
+ else {
+ info->date = APR_DATE_BAD;
+ }
+
+ now = apr_time_now();
+ if (info->date == APR_DATE_BAD) { /* No, or bad date */
+ char *dates;
+ /* no date header (or bad header)! */
+ /* add one; N.B. use the time _now_ rather than when we were checking
+ * the cache
+ */
+ if (date_in_errhdr == 1) {
+ apr_table_unset(r->err_headers_out, "Date");
+ }
+ date = now;
+ dates = apr_pcalloc(r->pool, MAX_STRING_LEN);
+ apr_rfc822_date(dates, now);
+ apr_table_set(r->headers_out, "Date", dates);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache: Added date header");
+ info->date = date;
+ }
+ else {
+ date = info->date;
+ }
+
+ /* set response_time for HTTP/1.1 age calculations */
+ info->response_time = now;
+
+ /* get the request time */
+ info->request_time = r->request_time;
+
+ /* check last-modified date */
+ if (lastmod != APR_DATE_BAD && lastmod > date) {
+ /* if it's in the future, then replace by date */
+ lastmod = date;
+ lastmods = dates;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
+ r->server,
+ "cache: Last modified is in the future, "
+ "replacing with now");
+ }
+ info->lastmod = lastmod;
+
+ /* if no expiry date then
+ * if lastmod
+ * expiry date = date + min((date - lastmod) * factor, maxexpire)
+ * else
+ * expire date = date + defaultexpire
+ */
+ if (exp == APR_DATE_BAD) {
+ /* if lastmod == date then you get 0*conf->factor which results in
+ * an expiration time of now. This causes some problems with
+ * freshness calculations, so we choose the else path...
+ */
+ if ((lastmod != APR_DATE_BAD) && (lastmod < date)) {
+ apr_time_t x = (apr_time_t) ((date - lastmod) * conf->factor);
+
+ if (x > conf->maxex) {
+ x = conf->maxex;
+ }
+ exp = date + x;
+ }
+ else {
+ exp = date + conf->defex;
+ }
+ }
+ info->expire = exp;
+
+ info->content_type = apr_pstrdup(r->pool, r->content_type);
+ info->etag = apr_pstrdup(r->pool, etag);
+ info->lastmods = apr_pstrdup(r->pool, lastmods);
+ info->filename = apr_pstrdup(r->pool, r->filename);
+
+ /*
+ * Write away header information to cache.
+ */
+ rv = cache->provider->store_headers(cache->handle, r, info);
+
+ /* Did we actually find an entity before, but it wasn't really stale? */
+ if (rv == APR_SUCCESS && cache->stale_handle) {
+ apr_bucket_brigade *bb;
+ apr_bucket *bkt;
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+
+ /* Were we initially a conditional request? */
+ if (ap_cache_request_is_conditional(cache->stale_headers)) {
+ /* FIXME: Should we now go and make sure it's really not
+ * modified since what the user thought?
+ */
+ bkt = apr_bucket_eos_create(bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, bkt);
+ }
+ else {
+ r->status = info->status;
+ cache->provider->recall_body(cache->handle, r->pool, bb);
+ }
+
+ cache->block_response = 1;
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ if (rv == APR_SUCCESS) {
+ rv = cache->provider->store_body(cache->handle, r, in);
+ }
+ if (rv != APR_SUCCESS) {
+ ap_remove_output_filter(f);
+ }
+
+ return ap_pass_brigade(f->next, in);
+}
+
+/* -------------------------------------------------------------- */
+/* Setup configurable data */
+
+static void * create_cache_config(apr_pool_t *p, server_rec *s)
+{
+ cache_server_conf *ps = apr_pcalloc(p, sizeof(cache_server_conf));
+
+ /* array of URL prefixes for which caching is enabled */
+ ps->cacheenable = apr_array_make(p, 10, sizeof(struct cache_enable));
+ /* array of URL prefixes for which caching is disabled */
+ ps->cachedisable = apr_array_make(p, 10, sizeof(struct cache_disable));
+ /* maximum time to cache a document */
+ ps->maxex = DEFAULT_CACHE_MAXEXPIRE;
+ ps->maxex_set = 0;
+ /* default time to cache a document */
+ ps->defex = DEFAULT_CACHE_EXPIRE;
+ ps->defex_set = 0;
+ /* factor used to estimate Expires date from LastModified date */
+ ps->factor = DEFAULT_CACHE_LMFACTOR;
+ ps->factor_set = 0;
+ /* default percentage to force cache completion */
+ ps->complete = DEFAULT_CACHE_COMPLETION;
+ ps->complete_set = 0;
+ ps->no_last_mod_ignore_set = 0;
+ ps->no_last_mod_ignore = 0;
+ ps->ignorecachecontrol = 0;
+ ps->ignorecachecontrol_set = 0 ;
+ /* array of headers that should not be stored in cache */
+ ps->ignore_headers = apr_array_make(p, 10, sizeof(char *));
+ ps->ignore_headers_set = CACHE_IGNORE_HEADERS_UNSET;
+ return ps;
+}
+
+static void * merge_cache_config(apr_pool_t *p, void *basev, void *overridesv)
+{
+ cache_server_conf *ps = apr_pcalloc(p, sizeof(cache_server_conf));
+ cache_server_conf *base = (cache_server_conf *) basev;
+ cache_server_conf *overrides = (cache_server_conf *) overridesv;
+
+ /* array of URL prefixes for which caching is disabled */
+ ps->cachedisable = apr_array_append(p,
+ base->cachedisable,
+ overrides->cachedisable);
+ /* array of URL prefixes for which caching is enabled */
+ ps->cacheenable = apr_array_append(p,
+ base->cacheenable,
+ overrides->cacheenable);
+ /* maximum time to cache a document */
+ ps->maxex = (overrides->maxex_set == 0) ? base->maxex : overrides->maxex;
+ /* default time to cache a document */
+ ps->defex = (overrides->defex_set == 0) ? base->defex : overrides->defex;
+ /* factor used to estimate Expires date from LastModified date */
+ ps->factor =
+ (overrides->factor_set == 0) ? base->factor : overrides->factor;
+ /* default percentage to force cache completion */
+ ps->complete =
+ (overrides->complete_set == 0) ? base->complete : overrides->complete;
+
+ ps->no_last_mod_ignore =
+ (overrides->no_last_mod_ignore_set == 0)
+ ? base->no_last_mod_ignore
+ : overrides->no_last_mod_ignore;
+ ps->ignorecachecontrol =
+ (overrides->ignorecachecontrol_set == 0)
+ ? base->ignorecachecontrol
+ : overrides->ignorecachecontrol;
+ ps->ignore_headers =
+ (overrides->ignore_headers_set == CACHE_IGNORE_HEADERS_UNSET)
+ ? base->ignore_headers
+ : overrides->ignore_headers;
+ return ps;
+}
+static const char *set_cache_ignore_no_last_mod(cmd_parms *parms, void *dummy,
+ int flag)
+{
+ cache_server_conf *conf;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ conf->no_last_mod_ignore = flag;
+ conf->no_last_mod_ignore_set = 1;
+ return NULL;
+
+}
+
+static const char *set_cache_ignore_cachecontrol(cmd_parms *parms,
+ void *dummy, int flag)
+{
+ cache_server_conf *conf;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ conf->ignorecachecontrol = flag;
+ conf->ignorecachecontrol_set = 1;
+ return NULL;
+}
+
+static const char *add_ignore_header(cmd_parms *parms, void *dummy,
+ const char *header)
+{
+ cache_server_conf *conf;
+ char **new;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ if (!strncasecmp(header, "None", 4)) {
+ /* if header None is listed clear array */
+ conf->ignore_headers->nelts = 0;
+ }
+ else {
+ if ((conf->ignore_headers_set == CACHE_IGNORE_HEADERS_UNSET) ||
+ (conf->ignore_headers->nelts)) {
+ /* Only add header if no "None" has been found in header list
+ * so far.
+ * (When 'None' is passed, IGNORE_HEADERS_SET && nelts == 0.)
+ */
+ new = (char **)apr_array_push(conf->ignore_headers);
+ (*new) = (char*)header;
+ }
+ }
+ conf->ignore_headers_set = CACHE_IGNORE_HEADERS_SET;
+ return NULL;
+}
+
+static const char *add_cache_enable(cmd_parms *parms, void *dummy,
+ const char *type,
+ const char *url)
+{
+ cache_server_conf *conf;
+ struct cache_enable *new;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ new = apr_array_push(conf->cacheenable);
+ new->type = type;
+ new->url = url;
+ new->urllen = strlen(url);
+ return NULL;
+}
+
+static const char *add_cache_disable(cmd_parms *parms, void *dummy,
+ const char *url)
+{
+ cache_server_conf *conf;
+ struct cache_disable *new;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ new = apr_array_push(conf->cachedisable);
+ new->url = url;
+ new->urllen = strlen(url);
+ return NULL;
+}
+
+static const char *set_cache_maxex(cmd_parms *parms, void *dummy,
+ const char *arg)
+{
+ cache_server_conf *conf;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ conf->maxex = (apr_time_t) (atol(arg) * MSEC_ONE_SEC);
+ conf->maxex_set = 1;
+ return NULL;
+}
+
+static const char *set_cache_defex(cmd_parms *parms, void *dummy,
+ const char *arg)
+{
+ cache_server_conf *conf;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ conf->defex = (apr_time_t) (atol(arg) * MSEC_ONE_SEC);
+ conf->defex_set = 1;
+ return NULL;
+}
+
+static const char *set_cache_factor(cmd_parms *parms, void *dummy,
+ const char *arg)
+{
+ cache_server_conf *conf;
+ double val;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ if (sscanf(arg, "%lg", &val) != 1) {
+ return "CacheLastModifiedFactor value must be a float";
+ }
+ conf->factor = val;
+ conf->factor_set = 1;
+ return NULL;
+}
+
+static const char *set_cache_complete(cmd_parms *parms, void *dummy,
+ const char *arg)
+{
+ cache_server_conf *conf;
+ int val;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ if (sscanf(arg, "%u", &val) != 1) {
+ return "CacheForceCompletion value must be a percentage";
+ }
+ conf->complete = val;
+ conf->complete_set = 1;
+ return NULL;
+}
+
+static int cache_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ /* This is the means by which unusual (non-unix) os's may find alternate
+ * means to run a given command (e.g. shebang/registry parsing on Win32)
+ */
+ cache_generate_key = APR_RETRIEVE_OPTIONAL_FN(ap_cache_generate_key);
+ if (!cache_generate_key) {
+ cache_generate_key = cache_generate_key_default;
+ }
+ return OK;
+}
+
+static const command_rec cache_cmds[] =
+{
+ /* XXX
+ * Consider a new config directive that enables loading specific cache
+ * implememtations (like mod_cache_mem, mod_cache_file, etc.).
+ * Rather than using a LoadModule directive, admin would use something
+ * like CacheModule mem_cache_module | file_cache_module, etc,
+ * which would cause the approprpriate cache module to be loaded.
+ * This is more intuitive that requiring a LoadModule directive.
+ */
+
+ AP_INIT_TAKE2("CacheEnable", add_cache_enable, NULL, RSRC_CONF,
+ "A cache type and partial URL prefix below which "
+ "caching is enabled"),
+ AP_INIT_TAKE1("CacheDisable", add_cache_disable, NULL, RSRC_CONF,
+ "A partial URL prefix below which caching is disabled"),
+ AP_INIT_TAKE1("CacheMaxExpire", set_cache_maxex, NULL, RSRC_CONF,
+ "The maximum time in seconds to cache a document"),
+ AP_INIT_TAKE1("CacheDefaultExpire", set_cache_defex, NULL, RSRC_CONF,
+ "The default time in seconds to cache a document"),
+ AP_INIT_FLAG("CacheIgnoreNoLastMod", set_cache_ignore_no_last_mod, NULL,
+ RSRC_CONF,
+ "Ignore Responses where there is no Last Modified Header"),
+ AP_INIT_FLAG("CacheIgnoreCacheControl", set_cache_ignore_cachecontrol,
+ NULL,
+ RSRC_CONF,
+ "Ignore requests from the client for uncached content"),
+ AP_INIT_ITERATE("CacheIgnoreHeaders", add_ignore_header, NULL, RSRC_CONF,
+ "A space separated list of headers that should not be "
+ "stored by the cache"),
+ AP_INIT_TAKE1("CacheLastModifiedFactor", set_cache_factor, NULL, RSRC_CONF,
+ "The factor used to estimate Expires date from "
+ "LastModified date"),
+ AP_INIT_TAKE1("CacheForceCompletion", set_cache_complete, NULL, RSRC_CONF,
+ "Percentage of download to arrive for the cache to force "
+ "complete transfer"),
+ {NULL}
+};
+
+static void register_hooks(apr_pool_t *p)
+{
+ /* cache initializer */
+ /* cache handler */
+ ap_hook_quick_handler(cache_url_handler, NULL, NULL, APR_HOOK_FIRST);
+ /* cache filters
+ * XXX The cache filters need to run right after the handlers and before
+ * any other filters. Consider creating AP_FTYPE_CACHE for this purpose.
+ * Make them AP_FTYPE_CONTENT for now.
+ * XXX ianhH:they should run AFTER all the other content filters.
+ */
+ cache_save_filter_handle =
+ ap_register_output_filter("CACHE_SAVE",
+ cache_save_filter,
+ NULL,
+ AP_FTYPE_CONTENT_SET-1);
+ /* CACHE_OUT must go into the filter chain before SUBREQ_CORE to
+ * handle subrequsts. Decrementing filter type by 1 ensures this
+ * happens.
+ */
+ cache_out_filter_handle =
+ ap_register_output_filter("CACHE_OUT",
+ cache_out_filter,
+ NULL,
+ AP_FTYPE_CONTENT_SET-1);
+ ap_hook_post_config(cache_post_config, NULL, NULL, APR_HOOK_REALLY_FIRST);
+}
+
+module AP_MODULE_DECLARE_DATA cache_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ create_cache_config, /* create per-server config structure */
+ merge_cache_config, /* merge per-server config structures */
+ cache_cmds, /* command apr_table_t */
+ register_hooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.dsp
new file mode 100644
index 00000000..8fea9982
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.dsp
@@ -0,0 +1,168 @@
+# Microsoft Developer Studio Project File - Name="mod_cache" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_cache - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_cache.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_cache.mak" CFG="mod_cache - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_cache - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_cache - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_cache - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "MOD_CACHE_EXPORTS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "CACHE_DECLARE_EXPORT" /D "MOD_CACHE_EXPORTS" /Fd"Release\mod_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cache.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_cache - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "CACHE_DECLARE_EXPORT" /Fd"Debug\mod_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cache.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_cache - Win32 Release"
+# Name "mod_cache - Win32 Debug"
+# Begin Group "Source Files"
+
+# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90"
+# Begin Source File
+
+SOURCE=.\cache_cache.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_hash.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_pqueue.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_storage.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_cache.c
+# End Source File
+# End Group
+# Begin Group "Header Files"
+
+# PROP Default_Filter "h;hpp;hxx;hm;inl"
+# Begin Source File
+
+SOURCE=.\cache_cache.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_hash.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_pqueue.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_cache.h
+# End Source File
+# End Group
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_cache - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_cache.so "cache_module for Apache" ../../include/ap_release.h > .\mod_cache.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_cache - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_cache.so "cache_module for Apache" ../../include/ap_release.h > .\mod_cache.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.h b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.h
new file mode 100644
index 00000000..62298a50
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.h
@@ -0,0 +1,319 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MOD_CACHE_H
+#define MOD_CACHE_H
+
+/*
+ * Main include file for the Apache Transparent Cache
+ */
+
+#define CORE_PRIVATE
+
+#include "apr_hooks.h"
+#include "apr.h"
+#include "apr_lib.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_md5.h"
+#include "apr_pools.h"
+#include "apr_strings.h"
+#include "apr_optional.h"
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "ap_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_main.h"
+#include "http_log.h"
+#include "http_connection.h"
+#include "util_filter.h"
+#include "apr_date.h"
+#include "apr_uri.h"
+
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+
+#ifdef HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+
+#include "apr_atomic.h"
+
+#ifndef MAX
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#endif
+#ifndef MIN
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#endif
+
+/* default completion is 60% */
+#define DEFAULT_CACHE_COMPLETION (60)
+#define MSEC_ONE_DAY ((apr_time_t)(86400*APR_USEC_PER_SEC)) /* one day, in microseconds */
+#define MSEC_ONE_HR ((apr_time_t)(3600*APR_USEC_PER_SEC)) /* one hour, in microseconds */
+#define MSEC_ONE_MIN ((apr_time_t)(60*APR_USEC_PER_SEC)) /* one minute, in microseconds */
+#define MSEC_ONE_SEC ((apr_time_t)(APR_USEC_PER_SEC)) /* one second, in microseconds */
+#define DEFAULT_CACHE_MAXEXPIRE MSEC_ONE_DAY
+#define DEFAULT_CACHE_EXPIRE MSEC_ONE_HR
+#define DEFAULT_CACHE_LMFACTOR (0.1)
+
+/* Create a set of PROXY_DECLARE(type), PROXY_DECLARE_NONSTD(type) and
+ * PROXY_DECLARE_DATA with appropriate export and import tags for the platform
+ */
+#if !defined(WIN32)
+#define CACHE_DECLARE(type) type
+#define CACHE_DECLARE_NONSTD(type) type
+#define CACHE_DECLARE_DATA
+#elif defined(CACHE_DECLARE_STATIC)
+#define CACHE_DECLARE(type) type __stdcall
+#define CACHE_DECLARE_NONSTD(type) type
+#define CACHE_DECLARE_DATA
+#elif defined(CACHE_DECLARE_EXPORT)
+#define CACHE_DECLARE(type) __declspec(dllexport) type __stdcall
+#define CACHE_DECLARE_NONSTD(type) __declspec(dllexport) type
+#define CACHE_DECLARE_DATA __declspec(dllexport)
+#else
+#define CACHE_DECLARE(type) __declspec(dllimport) type __stdcall
+#define CACHE_DECLARE_NONSTD(type) __declspec(dllimport) type
+#define CACHE_DECLARE_DATA __declspec(dllimport)
+#endif
+
+struct cache_enable {
+ const char *url;
+ const char *type;
+ apr_size_t urllen;
+};
+
+struct cache_disable {
+ const char *url;
+ apr_size_t urllen;
+};
+
+/* static information about the local cache */
+typedef struct {
+ apr_array_header_t *cacheenable; /* URLs to cache */
+ apr_array_header_t *cachedisable; /* URLs not to cache */
+ apr_time_t maxex; /* Maximum time to keep cached files in msecs */
+ int maxex_set;
+ apr_time_t defex; /* default time to keep cached file in msecs */
+ int defex_set;
+ double factor; /* factor for estimating expires date */
+ int factor_set;
+ int complete; /* Force cache completion after this point */
+ int complete_set;
+ /** ignore the last-modified header when deciding to cache this request */
+ int no_last_mod_ignore_set;
+ int no_last_mod_ignore;
+ /** ignore client's requests for uncached responses */
+ int ignorecachecontrol;
+ int ignorecachecontrol_set;
+ /** store the headers that should not be stored in the cache */
+ apr_array_header_t *ignore_headers;
+ /* flag if CacheIgnoreHeader has been set */
+ #define CACHE_IGNORE_HEADERS_SET 1
+ #define CACHE_IGNORE_HEADERS_UNSET 0
+ int ignore_headers_set;
+} cache_server_conf;
+
+/* cache info information */
+typedef struct cache_info cache_info;
+struct cache_info {
+ int status;
+ char *content_type;
+ char *etag;
+ char *lastmods; /* last modified of cache entity */
+ char *filename;
+ apr_time_t date;
+ apr_time_t lastmod;
+ char lastmod_str[APR_RFC822_DATE_LEN];
+ apr_time_t expire;
+ apr_time_t request_time;
+ apr_time_t response_time;
+ apr_size_t len;
+ apr_time_t ims; /* If-Modified_Since header value */
+ apr_time_t ius; /* If-UnModified_Since header value */
+ const char *im; /* If-Match header value */
+ const char *inm; /* If-None-Match header value */
+};
+
+/* cache handle information */
+
+/* XXX TODO On the next structure change/MMN bump,
+ * count must become an apr_off_t, representing
+ * the potential size of disk cached objects.
+ * Then dig for
+ * "XXX Bad Temporary Cast - see cache_object_t notes"
+ */
+typedef struct cache_object cache_object_t;
+struct cache_object {
+ char *key;
+ cache_object_t *next;
+ cache_info info;
+ void *vobj; /* Opaque portion (specific to the cache implementation) of the cache object */
+ apr_size_t count; /* Number of body bytes written to the cache so far */
+ int complete;
+ apr_atomic_t refcount;
+ apr_size_t cleanup;
+};
+
+typedef struct cache_handle cache_handle_t;
+
+#define CACHE_PROVIDER_GROUP "cache"
+
+typedef struct {
+ int (*remove_entity) (cache_handle_t *h);
+ apr_status_t (*store_headers)(cache_handle_t *h, request_rec *r, cache_info *i);
+ apr_status_t (*store_body)(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
+ apr_status_t (*recall_headers) (cache_handle_t *h, request_rec *r);
+ apr_status_t (*recall_body) (cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+ int (*create_entity) (cache_handle_t *h, request_rec *r,
+ const char *urlkey, apr_off_t len);
+ int (*open_entity) (cache_handle_t *h, request_rec *r,
+ const char *urlkey);
+ int (*remove_url) (const char *urlkey);
+} cache_provider;
+
+/* A linked-list of authn providers. */
+typedef struct cache_provider_list cache_provider_list;
+
+struct cache_provider_list {
+ const char *provider_name;
+ const cache_provider *provider;
+ cache_provider_list *next;
+};
+
+struct cache_handle {
+ cache_object_t *cache_obj;
+ apr_table_t *req_hdrs; /* cached request headers */
+ apr_table_t *resp_hdrs; /* cached response headers */
+ apr_table_t *resp_err_hdrs; /* cached response err headers */
+ const char *content_type; /* cached content type */
+ int status; /* cached status */
+};
+
+/* per request cache information */
+typedef struct {
+ cache_provider_list *providers; /* possible cache providers */
+ const cache_provider *provider; /* current cache provider */
+ const char *provider_name; /* current cache provider name */
+ int fresh; /* is the entitey fresh? */
+ cache_handle_t *handle; /* current cache handle */
+ cache_handle_t *stale_handle; /* stale cache handle */
+ apr_table_t *stale_headers; /* original request headers. */
+ int in_checked; /* CACHE_SAVE must cache the entity */
+ int block_response; /* CACHE_SAVE must block response. */
+ apr_bucket_brigade *saved_brigade; /* copy of partial response */
+ apr_off_t saved_size; /* length of saved_brigade */
+ apr_time_t exp; /* expiration */
+ apr_time_t lastmod; /* last-modified time */
+ cache_info *info; /* current cache info */
+} cache_request_rec;
+
+
+/* cache_util.c */
+/* do a HTTP/1.1 age calculation */
+CACHE_DECLARE(apr_time_t) ap_cache_current_age(cache_info *info, const apr_time_t age_value,
+ apr_time_t now);
+
+/**
+ * Check the freshness of the cache object per RFC2616 section 13.2 (Expiration Model)
+ * @param h cache_handle_t
+ * @param r request_rec
+ * @return 0 ==> cache object is stale, 1 ==> cache object is fresh
+ */
+CACHE_DECLARE(int) ap_cache_check_freshness(cache_handle_t *h, request_rec *r);
+CACHE_DECLARE(apr_time_t) ap_cache_hex2usec(const char *x);
+CACHE_DECLARE(void) ap_cache_usec2hex(apr_time_t j, char *y);
+CACHE_DECLARE(char *) generate_name(apr_pool_t *p, int dirlevels,
+ int dirlength,
+ const char *name);
+CACHE_DECLARE(int) ap_cache_request_is_conditional(apr_table_t *table);
+CACHE_DECLARE(cache_provider_list *)ap_cache_get_providers(request_rec *r, cache_server_conf *conf, const char *url);
+CACHE_DECLARE(int) ap_cache_liststr(apr_pool_t *p, const char *list,
+ const char *key, char **val);
+CACHE_DECLARE(const char *)ap_cache_tokstr(apr_pool_t *p, const char *list, const char **str);
+
+/* Create a new table consisting of those elements from a request_rec's
+ * headers_out that are allowed to be stored in a cache
+ */
+CACHE_DECLARE(apr_table_t *)ap_cache_cacheable_hdrs_out(apr_pool_t *pool,
+ apr_table_t *t,
+ server_rec *s);
+
+/**
+ * cache_storage.c
+ */
+int cache_remove_url(request_rec *r, char *url);
+int cache_create_entity(request_rec *r, char *url, apr_off_t size);
+int cache_select_url(request_rec *r, char *url);
+apr_status_t cache_generate_key_default( request_rec *r, apr_pool_t*p, char**key );
+/**
+ * create a key for the cache based on the request record
+ * this is the 'default' version, which can be overridden by a default function
+ */
+const char* cache_create_key( request_rec*r );
+
+/*
+apr_status_t cache_store_entity_headers(cache_handle_t *h, request_rec *r, cache_info *info);
+apr_status_t cache_store_entity_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *bb);
+
+apr_status_t cache_recall_entity_headers(cache_handle_t *h, request_rec *r);
+apr_status_t cache_recall_entity_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+*/
+
+/* hooks */
+
+/* Create a set of CACHE_DECLARE(type), CACHE_DECLARE_NONSTD(type) and
+ * CACHE_DECLARE_DATA with appropriate export and import tags for the platform
+ */
+#if !defined(WIN32)
+#define CACHE_DECLARE(type) type
+#define CACHE_DECLARE_NONSTD(type) type
+#define CACHE_DECLARE_DATA
+#elif defined(CACHE_DECLARE_STATIC)
+#define CACHE_DECLARE(type) type __stdcall
+#define CACHE_DECLARE_NONSTD(type) type
+#define CACHE_DECLARE_DATA
+#elif defined(CACHE_DECLARE_EXPORT)
+#define CACHE_DECLARE(type) __declspec(dllexport) type __stdcall
+#define CACHE_DECLARE_NONSTD(type) __declspec(dllexport) type
+#define CACHE_DECLARE_DATA __declspec(dllexport)
+#else
+#define CACHE_DECLARE(type) __declspec(dllimport) type __stdcall
+#define CACHE_DECLARE_NONSTD(type) __declspec(dllimport) type
+#define CACHE_DECLARE_DATA __declspec(dllimport)
+#endif
+
+APR_DECLARE_OPTIONAL_FN(apr_status_t,
+ ap_cache_generate_key,
+ (request_rec *r, apr_pool_t*p, char**key ));
+
+
+#endif /*MOD_CACHE_H*/
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.imp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.imp
new file mode 100644
index 00000000..6bf4db08
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.imp
@@ -0,0 +1,10 @@
+ (MODCACHE)
+ ap_cache_request_is_conditional,
+ ap_cache_get_providers,
+ ap_cache_liststr,
+ ap_cache_tokstr,
+ ap_cache_hex2usec,
+ ap_cache_usec2hex,
+ ap_cache_cacheable_hdrs_out,
+ generate_name
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter.c
new file mode 100644
index 00000000..657c174b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter.c
@@ -0,0 +1,137 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "apr_buckets.h"
+#include "apr_general.h"
+#include "apr_lib.h"
+#include "util_filter.h"
+#include "http_request.h"
+
+#include <ctype.h>
+
+static const char s_szCaseFilterName[]="CaseFilter";
+module AP_MODULE_DECLARE_DATA case_filter_module;
+
+typedef struct
+ {
+ int bEnabled;
+ } CaseFilterConfig;
+
+static void *CaseFilterCreateServerConfig(apr_pool_t *p,server_rec *s)
+ {
+ CaseFilterConfig *pConfig=apr_pcalloc(p,sizeof *pConfig);
+
+ pConfig->bEnabled=0;
+
+ return pConfig;
+ }
+
+static void CaseFilterInsertFilter(request_rec *r)
+ {
+ CaseFilterConfig *pConfig=ap_get_module_config(r->server->module_config,
+ &case_filter_module);
+
+ if(!pConfig->bEnabled)
+ return;
+
+ ap_add_output_filter(s_szCaseFilterName,NULL,r,r->connection);
+ }
+
+static apr_status_t CaseFilterOutFilter(ap_filter_t *f,
+ apr_bucket_brigade *pbbIn)
+ {
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ apr_bucket *pbktIn;
+ apr_bucket_brigade *pbbOut;
+
+ pbbOut=apr_brigade_create(r->pool, c->bucket_alloc);
+ APR_BRIGADE_FOREACH(pbktIn,pbbIn)
+ {
+ const char *data;
+ apr_size_t len;
+ char *buf;
+ apr_size_t n;
+ apr_bucket *pbktOut;
+
+ if(APR_BUCKET_IS_EOS(pbktIn))
+ {
+ apr_bucket *pbktEOS=apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS);
+ continue;
+ }
+
+ /* read */
+ apr_bucket_read(pbktIn,&data,&len,APR_BLOCK_READ);
+
+ /* write */
+ buf = apr_bucket_alloc(len, c->bucket_alloc);
+ for(n=0 ; n < len ; ++n)
+ buf[n] = apr_toupper(data[n]);
+
+ pbktOut = apr_bucket_heap_create(buf, len, apr_bucket_free,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(pbbOut,pbktOut);
+ }
+
+ /* Q: is there any advantage to passing a brigade for each bucket?
+ * A: obviously, it can cut down server resource consumption, if this
+ * experimental module was fed a file of 4MB, it would be using 8MB for
+ * the 'read' buckets and the 'write' buckets.
+ *
+ * Note it is more efficient to consume (destroy) each bucket as it's
+ * processed above than to do a single cleanup down here. In any case,
+ * don't let our caller pass the same buckets to us, twice;
+ */
+ apr_brigade_cleanup(pbbIn);
+ return ap_pass_brigade(f->next,pbbOut);
+ }
+
+static const char *CaseFilterEnable(cmd_parms *cmd, void *dummy, int arg)
+ {
+ CaseFilterConfig *pConfig=ap_get_module_config(cmd->server->module_config,
+ &case_filter_module);
+ pConfig->bEnabled=arg;
+
+ return NULL;
+ }
+
+static const command_rec CaseFilterCmds[] =
+ {
+ AP_INIT_FLAG("CaseFilter", CaseFilterEnable, NULL, RSRC_CONF,
+ "Run a case filter on this host"),
+ { NULL }
+ };
+
+static void CaseFilterRegisterHooks(apr_pool_t *p)
+ {
+ ap_hook_insert_filter(CaseFilterInsertFilter,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_register_output_filter(s_szCaseFilterName,CaseFilterOutFilter,NULL,
+ AP_FTYPE_RESOURCE);
+ }
+
+module AP_MODULE_DECLARE_DATA case_filter_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL,
+ NULL,
+ CaseFilterCreateServerConfig,
+ NULL,
+ CaseFilterCmds,
+ CaseFilterRegisterHooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter_in.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter_in.c
new file mode 100644
index 00000000..9cac660d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter_in.c
@@ -0,0 +1,160 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * An example input filter - this converts input to upper case. Note that
+ * because of the moment it gets inserted it does NOT convert request headers.
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "apr_buckets.h"
+#include "apr_general.h"
+#include "apr_lib.h"
+#include "util_filter.h"
+#include "http_request.h"
+
+#include <ctype.h>
+
+static const char s_szCaseFilterName[] = "CaseFilterIn";
+module AP_MODULE_DECLARE_DATA case_filter_in_module;
+
+typedef struct
+{
+ int bEnabled;
+} CaseFilterInConfig;
+
+typedef struct
+{
+ apr_bucket_brigade *pbbTmp;
+} CaseFilterInContext;
+
+static void *CaseFilterInCreateServerConfig(apr_pool_t *p, server_rec *s)
+{
+ CaseFilterInConfig *pConfig = apr_pcalloc(p, sizeof *pConfig);
+
+ pConfig->bEnabled = 0;
+
+ return pConfig;
+}
+
+static void CaseFilterInInsertFilter(request_rec *r)
+{
+ CaseFilterInConfig *pConfig=ap_get_module_config(r->server->module_config,
+ &case_filter_in_module);
+ if(!pConfig->bEnabled)
+ return;
+
+ ap_add_input_filter(s_szCaseFilterName,NULL,r,r->connection);
+}
+
+static apr_status_t CaseFilterInFilter(ap_filter_t *f,
+ apr_bucket_brigade *pbbOut,
+ ap_input_mode_t eMode,
+ apr_read_type_e eBlock,
+ apr_off_t nBytes)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ CaseFilterInContext *pCtx;
+ apr_status_t ret;
+
+ if (!(pCtx = f->ctx)) {
+ f->ctx = pCtx = apr_palloc(r->pool, sizeof *pCtx);
+ pCtx->pbbTmp = apr_brigade_create(r->pool, c->bucket_alloc);
+ }
+
+ if (APR_BRIGADE_EMPTY(pCtx->pbbTmp)) {
+ ret = ap_get_brigade(f->next, pCtx->pbbTmp, eMode, eBlock, nBytes);
+
+ if (eMode == AP_MODE_EATCRLF || ret != APR_SUCCESS)
+ return ret;
+ }
+
+ while(!APR_BRIGADE_EMPTY(pCtx->pbbTmp)) {
+ apr_bucket *pbktIn = APR_BRIGADE_FIRST(pCtx->pbbTmp);
+ apr_bucket *pbktOut;
+ const char *data;
+ apr_size_t len;
+ char *buf;
+ int n;
+
+ /* It is tempting to do this...
+ * APR_BUCKET_REMOVE(pB);
+ * APR_BRIGADE_INSERT_TAIL(pbbOut,pB);
+ * and change the case of the bucket data, but that would be wrong
+ * for a file or socket buffer, for example...
+ */
+
+ if(APR_BUCKET_IS_EOS(pbktIn)) {
+ APR_BUCKET_REMOVE(pbktIn);
+ APR_BRIGADE_INSERT_TAIL(pbbOut, pbktIn);
+ break;
+ }
+
+ ret=apr_bucket_read(pbktIn, &data, &len, eBlock);
+ if(ret != APR_SUCCESS)
+ return ret;
+
+ buf = malloc(len);
+ for(n=0 ; n < len ; ++n)
+ buf[n] = apr_toupper(data[n]);
+
+ pbktOut = apr_bucket_heap_create(buf, len, 0, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(pbbOut, pbktOut);
+ apr_bucket_delete(pbktIn);
+ }
+
+ return APR_SUCCESS;
+}
+
+
+static const char *CaseFilterInEnable(cmd_parms *cmd, void *dummy, int arg)
+{
+ CaseFilterInConfig *pConfig
+ = ap_get_module_config(cmd->server->module_config,
+ &case_filter_in_module);
+ pConfig->bEnabled=arg;
+
+ return NULL;
+}
+
+static const command_rec CaseFilterInCmds[] =
+{
+ AP_INIT_FLAG("CaseFilterIn", CaseFilterInEnable, NULL, RSRC_CONF,
+ "Run an input case filter on this host"),
+ { NULL }
+};
+
+
+static void CaseFilterInRegisterHooks(apr_pool_t *p)
+{
+ ap_hook_insert_filter(CaseFilterInInsertFilter, NULL, NULL,
+ APR_HOOK_MIDDLE);
+ ap_register_input_filter(s_szCaseFilterName, CaseFilterInFilter, NULL,
+ AP_FTYPE_RESOURCE);
+}
+
+module AP_MODULE_DECLARE_DATA case_filter_in_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL,
+ NULL,
+ CaseFilterInCreateServerConfig,
+ NULL,
+ CaseFilterInCmds,
+ CaseFilterInRegisterHooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.c
new file mode 100644
index 00000000..a39261da
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.c
@@ -0,0 +1,1082 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * simple hokey charset recoding configuration module
+ *
+ * See mod_ebcdic and mod_charset for more thought-out examples. This
+ * one is just so Jeff can learn how a module works and experiment with
+ * basic character set recoding configuration.
+ *
+ * !!!This is an extremely cheap ripoff of mod_charset.c from Russian Apache!!!
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#define CORE_PRIVATE
+#include "http_core.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_charset.h"
+#include "apr_buckets.h"
+#include "util_filter.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#include "apr_xlate.h"
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#define OUTPUT_XLATE_BUF_SIZE (16*1024) /* size of translation buffer used on output */
+#define INPUT_XLATE_BUF_SIZE (8*1024) /* size of translation buffer used on input */
+
+#define XLATE_MIN_BUFF_LEFT 128 /* flush once there is no more than this much
+ * space left in the translation buffer
+ */
+
+#define FATTEST_CHAR 8 /* we don't handle chars wider than this that straddle
+ * two buckets
+ */
+
+/* extended error status codes; this is used in addition to an apr_status_t to
+ * track errors in the translation filter
+ */
+typedef enum {
+ EES_INIT = 0, /* no error info yet; value must be 0 for easy init */
+ EES_LIMIT, /* built-in restriction encountered */
+ EES_INCOMPLETE_CHAR, /* incomplete multi-byte char at end of content */
+ EES_BUCKET_READ,
+ EES_DOWNSTREAM, /* something bad happened in a filter below xlate */
+ EES_BAD_INPUT /* input data invalid */
+} ees_t;
+
+/* registered name of the output translation filter */
+#define XLATEOUT_FILTER_NAME "XLATEOUT"
+/* registered name of input translation filter */
+#define XLATEIN_FILTER_NAME "XLATEIN"
+
+typedef struct charset_dir_t {
+ /** debug level; -1 means uninitialized, 0 means no debug */
+ int debug;
+ const char *charset_source; /* source encoding */
+ const char *charset_default; /* how to ship on wire */
+ /** module does ap_add_*_filter()? */
+ enum {IA_INIT, IA_IMPADD, IA_NOIMPADD} implicit_add;
+} charset_dir_t;
+
+/* charset_filter_ctx_t is created for each filter instance; because the same
+ * filter code is used for translating in both directions, we need this context
+ * data to tell the filter which translation handle to use; it also can hold a
+ * character which was split between buckets
+ */
+typedef struct charset_filter_ctx_t {
+ apr_xlate_t *xlate;
+ charset_dir_t *dc;
+ ees_t ees; /* extended error status */
+ apr_size_t saved;
+ char buf[FATTEST_CHAR]; /* we want to be able to build a complete char here */
+ int ran; /* has filter instance run before? */
+ int noop; /* should we pass brigades through unchanged? */
+ char *tmp; /* buffer for input filtering */
+ apr_bucket_brigade *bb; /* input buckets we couldn't finish translating */
+} charset_filter_ctx_t;
+
+/* charset_req_t is available via r->request_config if any translation is
+ * being performed
+ */
+typedef struct charset_req_t {
+ charset_dir_t *dc;
+ charset_filter_ctx_t *output_ctx, *input_ctx;
+} charset_req_t;
+
+/* debug level definitions */
+#define DBGLVL_GORY 9 /* gory details */
+#define DBGLVL_FLOW 4 /* enough messages to see what happens on
+ * each request */
+#define DBGLVL_PMC 2 /* messages about possible misconfiguration */
+
+module AP_MODULE_DECLARE_DATA charset_lite_module;
+
+static void *create_charset_dir_conf(apr_pool_t *p,char *dummy)
+{
+ charset_dir_t *dc = (charset_dir_t *)apr_pcalloc(p,sizeof(charset_dir_t));
+
+ dc->debug = -1;
+ return dc;
+}
+
+static void *merge_charset_dir_conf(apr_pool_t *p, void *basev, void *overridesv)
+{
+ charset_dir_t *a = (charset_dir_t *)apr_pcalloc (p, sizeof(charset_dir_t));
+ charset_dir_t *base = (charset_dir_t *)basev,
+ *over = (charset_dir_t *)overridesv;
+
+ /* If it is defined in the current container, use it. Otherwise, use the one
+ * from the enclosing container.
+ */
+
+ a->debug =
+ over->debug != -1 ? over->debug : base->debug;
+ a->charset_default =
+ over->charset_default ? over->charset_default : base->charset_default;
+ a->charset_source =
+ over->charset_source ? over->charset_source : base->charset_source;
+ a->implicit_add =
+ over->implicit_add != IA_INIT ? over->implicit_add : base->implicit_add;
+ return a;
+}
+
+/* CharsetSourceEnc charset
+ */
+static const char *add_charset_source(cmd_parms *cmd, void *in_dc,
+ const char *name)
+{
+ charset_dir_t *dc = in_dc;
+
+ dc->charset_source = name;
+ return NULL;
+}
+
+/* CharsetDefault charset
+ */
+static const char *add_charset_default(cmd_parms *cmd, void *in_dc,
+ const char *name)
+{
+ charset_dir_t *dc = in_dc;
+
+ dc->charset_default = name;
+ return NULL;
+}
+
+/* CharsetOptions optionflag...
+ */
+static const char *add_charset_options(cmd_parms *cmd, void *in_dc,
+ const char *flag)
+{
+ charset_dir_t *dc = in_dc;
+
+ if (!strcasecmp(flag, "ImplicitAdd")) {
+ dc->implicit_add = IA_IMPADD;
+ }
+ else if (!strcasecmp(flag, "NoImplicitAdd")) {
+ dc->implicit_add = IA_NOIMPADD;
+ }
+ else if (!strncasecmp(flag, "DebugLevel=", 11)) {
+ dc->debug = atoi(flag + 11);
+ }
+ else {
+ return apr_pstrcat(cmd->temp_pool,
+ "Invalid CharsetOptions option: ",
+ flag,
+ NULL);
+ }
+
+ return NULL;
+}
+
+/* find_code_page() is a fixup hook that decides if translation should be
+ * enabled; if so, it sets up request data for use by the filter registration
+ * hook so that it knows what to do
+ */
+static int find_code_page(request_rec *r)
+{
+ charset_dir_t *dc = ap_get_module_config(r->per_dir_config,
+ &charset_lite_module);
+ charset_req_t *reqinfo;
+ charset_filter_ctx_t *input_ctx, *output_ctx;
+ apr_status_t rv;
+ const char *mime_type;
+
+ if (dc->debug >= DBGLVL_FLOW) {
+ ap_log_rerror(APLOG_MARK,APLOG_DEBUG, 0, r,
+ "uri: %s file: %s method: %d "
+ "imt: %s flags: %s%s%s %s->%s",
+ r->uri, r->filename, r->method_number,
+ r->content_type ? r->content_type : "(unknown)",
+ r->main ? "S" : "", /* S if subrequest */
+ r->prev ? "R" : "", /* R if redirect */
+ r->proxyreq ? "P" : "", /* P if proxy */
+ dc->charset_source, dc->charset_default);
+ }
+
+ /* If we don't have a full directory configuration, bail out.
+ */
+ if (!dc->charset_source || !dc->charset_default) {
+ if (dc->debug >= DBGLVL_PMC) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "incomplete configuration: src %s, dst %s",
+ dc->charset_source ? dc->charset_source : "unspecified",
+ dc->charset_default ? dc->charset_default : "unspecified");
+ }
+ return DECLINED;
+ }
+
+ /* catch proxy requests */
+ if (r->proxyreq) return DECLINED;
+ /* mod_rewrite indicators */
+ if (!strncmp(r->filename, "redirect:", 9)) return DECLINED;
+ if (!strncmp(r->filename, "gone:", 5)) return DECLINED;
+ if (!strncmp(r->filename, "passthrough:", 12)) return DECLINED;
+ if (!strncmp(r->filename, "forbidden:", 10)) return DECLINED;
+
+ mime_type = r->content_type ? r->content_type : ap_default_type(r);
+
+ /* If mime type isn't text or message, bail out.
+ */
+
+/* XXX When we handle translation of the request body, watch out here as
+ * 1.3 allowed additional mime types: multipart and
+ * application/x-www-form-urlencoded
+ */
+
+ if (strncasecmp(mime_type, "text/", 5) &&
+#if APR_CHARSET_EBCDIC || AP_WANT_DIR_TRANSLATION
+ /* On an EBCDIC machine, be willing to translate mod_autoindex-
+ * generated output. Otherwise, it doesn't look too cool.
+ *
+ * XXX This isn't a perfect fix because this doesn't trigger us
+ * to convert from the charset of the source code to ASCII. The
+ * general solution seems to be to allow a generator to set an
+ * indicator in the r specifying that the body is coded in the
+ * implementation character set (i.e., the charset of the source
+ * code). This would get several different types of documents
+ * translated properly: mod_autoindex output, mod_status output,
+ * mod_info output, hard-coded error documents, etc.
+ */
+ strcmp(mime_type, DIR_MAGIC_TYPE) &&
+#endif
+ strncasecmp(mime_type, "message/", 8)) {
+ if (dc->debug >= DBGLVL_GORY) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "mime type is %s; no translation selected",
+ mime_type);
+ }
+ return DECLINED;
+ }
+
+ if (dc->debug >= DBGLVL_GORY) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "charset_source: %s charset_default: %s",
+ dc && dc->charset_source ? dc->charset_source : "(none)",
+ dc && dc->charset_default ? dc->charset_default : "(none)");
+ }
+
+ /* Get storage for the request data and the output filter context.
+ * We rarely need the input filter context, so allocate that separately.
+ */
+ reqinfo = (charset_req_t *)apr_pcalloc(r->pool,
+ sizeof(charset_req_t) +
+ sizeof(charset_filter_ctx_t));
+ output_ctx = (charset_filter_ctx_t *)(reqinfo + 1);
+
+ reqinfo->dc = dc;
+ output_ctx->dc = dc;
+ ap_set_module_config(r->request_config, &charset_lite_module, reqinfo);
+
+ reqinfo->output_ctx = output_ctx;
+ rv = apr_xlate_open(&output_ctx->xlate,
+ dc->charset_default, dc->charset_source, r->pool);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "can't open translation %s->%s",
+ dc->charset_source, dc->charset_default);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ switch (r->method_number) {
+ case M_PUT:
+ case M_POST:
+ /* Set up input translation. Note: A request body can be included
+ * with the OPTIONS method, but for now we don't set up translation
+ * of it.
+ */
+ input_ctx = apr_pcalloc(r->pool, sizeof(charset_filter_ctx_t));
+ input_ctx->bb = apr_brigade_create(r->pool,
+ r->connection->bucket_alloc);
+ input_ctx->tmp = apr_palloc(r->pool, INPUT_XLATE_BUF_SIZE);
+ input_ctx->dc = dc;
+ reqinfo->input_ctx = input_ctx;
+ rv = apr_xlate_open(&input_ctx->xlate, dc->charset_source,
+ dc->charset_default, r->pool);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "can't open translation %s->%s",
+ dc->charset_default, dc->charset_source);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+
+ return DECLINED;
+}
+
+static int configured_in_list(request_rec *r, const char *filter_name,
+ struct ap_filter_t *filter_list)
+{
+ struct ap_filter_t *filter = filter_list;
+
+ while (filter) {
+ if (!strcasecmp(filter_name, filter->frec->name)) {
+ return 1;
+ }
+ filter = filter->next;
+ }
+ return 0;
+}
+
+static int configured_on_input(request_rec *r, const char *filter_name)
+{
+ return configured_in_list(r, filter_name, r->input_filters);
+}
+
+static int configured_on_output(request_rec *r, const char *filter_name)
+{
+ return configured_in_list(r, filter_name, r->output_filters);
+}
+
+/* xlate_insert_filter() is a filter hook which decides whether or not
+ * to insert a translation filter for the current request.
+ */
+static void xlate_insert_filter(request_rec *r)
+{
+ /* Hey... don't be so quick to use reqinfo->dc here; reqinfo may be NULL */
+ charset_req_t *reqinfo = ap_get_module_config(r->request_config,
+ &charset_lite_module);
+ charset_dir_t *dc = ap_get_module_config(r->per_dir_config,
+ &charset_lite_module);
+
+ if (reqinfo) {
+ if (reqinfo->output_ctx && !configured_on_output(r, XLATEOUT_FILTER_NAME)) {
+ ap_add_output_filter(XLATEOUT_FILTER_NAME, reqinfo->output_ctx, r,
+ r->connection);
+ }
+ else if (dc->debug >= DBGLVL_FLOW) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "xlate output filter not added implicitly because %s",
+ !reqinfo->output_ctx ?
+ "no output configuration available" :
+ "another module added the filter");
+ }
+
+ if (reqinfo->input_ctx && !configured_on_input(r, XLATEIN_FILTER_NAME)) {
+ ap_add_input_filter(XLATEIN_FILTER_NAME, reqinfo->input_ctx, r,
+ r->connection);
+ }
+ else if (dc->debug >= DBGLVL_FLOW) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "xlate input filter not added implicitly because %s",
+ !reqinfo->input_ctx ?
+ "no input configuration available" :
+ "another module added the filter");
+ }
+ }
+}
+
+/* stuff that sucks that I know of:
+ *
+ * bucket handling:
+ * why create an eos bucket when we see it come down the stream? just send the one
+ * passed as input... news flash: this will be fixed when xlate_out_filter() starts
+ * using the more generic xlate_brigade()
+ *
+ * translation mechanics:
+ * we don't handle characters that straddle more than two buckets; an error
+ * will be generated
+ */
+
+/* send_downstream() is passed the translated data; it puts it in a single-
+ * bucket brigade and passes the brigade to the next filter
+ */
+static apr_status_t send_downstream(ap_filter_t *f, const char *tmp, apr_size_t len)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ charset_filter_ctx_t *ctx = f->ctx;
+ apr_status_t rv;
+
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = apr_bucket_transient_create(tmp, len, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(f->next, bb);
+ if (rv != APR_SUCCESS) {
+ ctx->ees = EES_DOWNSTREAM;
+ }
+ return rv;
+}
+
+static apr_status_t send_eos(ap_filter_t *f)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ charset_filter_ctx_t *ctx = f->ctx;
+ apr_status_t rv;
+
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(f->next, bb);
+ if (rv != APR_SUCCESS) {
+ ctx->ees = EES_DOWNSTREAM;
+ }
+ return rv;
+}
+
+static apr_status_t set_aside_partial_char(charset_filter_ctx_t *ctx,
+ const char *partial,
+ apr_size_t partial_len)
+{
+ apr_status_t rv;
+
+ if (sizeof(ctx->buf) > partial_len) {
+ ctx->saved = partial_len;
+ memcpy(ctx->buf, partial, partial_len);
+ rv = APR_SUCCESS;
+ }
+ else {
+ rv = APR_INCOMPLETE;
+ ctx->ees = EES_LIMIT; /* we don't handle chars this wide which straddle
+ * buckets
+ */
+ }
+ return rv;
+}
+
+static apr_status_t finish_partial_char(charset_filter_ctx_t *ctx,
+ /* input buffer: */
+ const char **cur_str,
+ apr_size_t *cur_len,
+ /* output buffer: */
+ char **out_str,
+ apr_size_t *out_len)
+{
+ apr_status_t rv;
+ apr_size_t tmp_input_len;
+
+ /* Keep adding bytes from the input string to the saved string until we
+ * 1) finish the input char
+ * 2) get an error
+ * or 3) run out of bytes to add
+ */
+
+ do {
+ ctx->buf[ctx->saved] = **cur_str;
+ ++ctx->saved;
+ ++*cur_str;
+ --*cur_len;
+ tmp_input_len = ctx->saved;
+ rv = apr_xlate_conv_buffer(ctx->xlate,
+ ctx->buf,
+ &tmp_input_len,
+ *out_str,
+ out_len);
+ } while (rv == APR_INCOMPLETE && *cur_len);
+
+ if (rv == APR_SUCCESS) {
+ ctx->saved = 0;
+ }
+ else {
+ ctx->ees = EES_LIMIT; /* code isn't smart enough to handle chars
+ * straddling more than two buckets
+ */
+ }
+
+ return rv;
+}
+
+static void log_xlate_error(ap_filter_t *f, apr_status_t rv)
+{
+ charset_filter_ctx_t *ctx = f->ctx;
+ const char *msg;
+ char msgbuf[100];
+ int cur;
+
+ switch(ctx->ees) {
+ case EES_LIMIT:
+ rv = 0;
+ msg = "xlate filter - a built-in restriction was encountered";
+ break;
+ case EES_BAD_INPUT:
+ rv = 0;
+ msg = "xlate filter - an input character was invalid";
+ break;
+ case EES_BUCKET_READ:
+ rv = 0;
+ msg = "xlate filter - bucket read routine failed";
+ break;
+ case EES_INCOMPLETE_CHAR:
+ rv = 0;
+ strcpy(msgbuf, "xlate filter - incomplete char at end of input - ");
+ cur = 0;
+ while ((apr_size_t)cur < ctx->saved) {
+ apr_snprintf(msgbuf + strlen(msgbuf), sizeof(msgbuf) - strlen(msgbuf),
+ "%02X", (unsigned)ctx->buf[cur]);
+ ++cur;
+ }
+ msg = msgbuf;
+ break;
+ case EES_DOWNSTREAM:
+ msg = "xlate filter - an error occurred in a lower filter";
+ break;
+ default:
+ msg = "xlate filter - returning error";
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r,
+ "%s", msg);
+}
+
+/* chk_filter_chain() is called once per filter instance; it tries to
+ * determine if the current filter instance should be disabled because
+ * its translation is incompatible with the translation of an existing
+ * instance of the translate filter
+ *
+ * Example bad scenario:
+ *
+ * configured filter chain for the request:
+ * INCLUDES XLATEOUT(8859-1->UTS-16)
+ * configured filter chain for the subrequest:
+ * XLATEOUT(8859-1->UTS-16)
+ *
+ * When the subrequest is processed, the filter chain will be
+ * XLATEOUT(8859-1->UTS-16) XLATEOUT(8859-1->UTS-16)
+ * This makes no sense, so the instance of XLATEOUT added for the
+ * subrequest will be noop-ed.
+ *
+ * Example good scenario:
+ *
+ * configured filter chain for the request:
+ * INCLUDES XLATEOUT(8859-1->UTS-16)
+ * configured filter chain for the subrequest:
+ * XLATEOUT(IBM-1047->8859-1)
+ *
+ * When the subrequest is processed, the filter chain will be
+ * XLATEOUT(IBM-1047->8859-1) XLATEOUT(8859-1->UTS-16)
+ * This makes sense, so the instance of XLATEOUT added for the
+ * subrequest will be left alone and it will translate from
+ * IBM-1047->8859-1.
+ */
+static void chk_filter_chain(ap_filter_t *f)
+{
+ ap_filter_t *curf;
+ charset_filter_ctx_t *curctx, *last_xlate_ctx = NULL,
+ *ctx = f->ctx;
+ int debug = ctx->dc->debug;
+ int output = !strcasecmp(f->frec->name, XLATEOUT_FILTER_NAME);
+
+ if (ctx->noop) {
+ return;
+ }
+
+ /* walk the filter chain; see if it makes sense for our filter to
+ * do any translation
+ */
+ curf = output ? f->r->output_filters : f->r->input_filters;
+ while (curf) {
+ if (!strcasecmp(curf->frec->name, f->frec->name) &&
+ curf->ctx) {
+ curctx = (charset_filter_ctx_t *)curf->ctx;
+ if (!last_xlate_ctx) {
+ last_xlate_ctx = curctx;
+ }
+ else {
+ if (strcmp(last_xlate_ctx->dc->charset_default,
+ curctx->dc->charset_source)) {
+ /* incompatible translation
+ * if our filter instance is incompatible with an instance
+ * already in place, noop our instance
+ * Notes:
+ * . We are only willing to noop our own instance.
+ * . It is possible to noop another instance which has not
+ * yet run, but this is not currently implemented.
+ * Hopefully it will not be needed.
+ * . It is not possible to noop an instance which has
+ * already run.
+ */
+ if (last_xlate_ctx == f->ctx) {
+ last_xlate_ctx->noop = 1;
+ if (debug >= DBGLVL_PMC) {
+ const char *symbol = output ? "->" : "<-";
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG,
+ 0, f->r,
+ "%s %s - disabling "
+ "translation %s%s%s; existing "
+ "translation %s%s%s",
+ f->r->uri ? "uri" : "file",
+ f->r->uri ? f->r->uri : f->r->filename,
+ last_xlate_ctx->dc->charset_source,
+ symbol,
+ last_xlate_ctx->dc->charset_default,
+ curctx->dc->charset_source,
+ symbol,
+ curctx->dc->charset_default);
+ }
+ }
+ else {
+ const char *symbol = output ? "->" : "<-";
+
+ ap_log_rerror(APLOG_MARK, APLOG_ERR,
+ 0, f->r,
+ "chk_filter_chain() - can't disable "
+ "translation %s%s%s; existing "
+ "translation %s%s%s",
+ last_xlate_ctx->dc->charset_source,
+ symbol,
+ last_xlate_ctx->dc->charset_default,
+ curctx->dc->charset_source,
+ symbol,
+ curctx->dc->charset_default);
+ }
+ break;
+ }
+ }
+ }
+ curf = curf->next;
+ }
+}
+
+/* xlate_brigade() is used to filter request and response bodies
+ *
+ * we'll stop when one of the following occurs:
+ * . we run out of buckets
+ * . we run out of space in the output buffer
+ * . we hit an error
+ *
+ * inputs:
+ * bb: brigade to process
+ * buffer: storage to hold the translated characters
+ * buffer_size: size of buffer
+ * (and a few more uninteresting parms)
+ *
+ * outputs:
+ * return value: APR_SUCCESS or some error code
+ * bb: we've removed any buckets representing the
+ * translated characters; the eos bucket, if
+ * present, will be left in the brigade
+ * buffer: filled in with translated characters
+ * buffer_size: updated with the bytes remaining
+ * hit_eos: did we hit an EOS bucket?
+ */
+static apr_status_t xlate_brigade(charset_filter_ctx_t *ctx,
+ apr_bucket_brigade *bb,
+ char *buffer,
+ apr_size_t *buffer_avail,
+ int *hit_eos)
+{
+ apr_bucket *b = NULL; /* set to NULL only to quiet some gcc */
+ apr_bucket *consumed_bucket;
+ const char *bucket;
+ apr_size_t bytes_in_bucket; /* total bytes read from current bucket */
+ apr_size_t bucket_avail; /* bytes left in current bucket */
+ apr_status_t rv = APR_SUCCESS;
+
+ *hit_eos = 0;
+ bucket_avail = 0;
+ consumed_bucket = NULL;
+ while (1) {
+ if (!bucket_avail) { /* no bytes left to process in the current bucket... */
+ if (consumed_bucket) {
+ apr_bucket_delete(consumed_bucket);
+ consumed_bucket = NULL;
+ }
+ b = APR_BRIGADE_FIRST(bb);
+ if (b == APR_BRIGADE_SENTINEL(bb) ||
+ APR_BUCKET_IS_EOS(b)) {
+ break;
+ }
+ rv = apr_bucket_read(b, &bucket, &bytes_in_bucket, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ ctx->ees = EES_BUCKET_READ;
+ break;
+ }
+ bucket_avail = bytes_in_bucket;
+ consumed_bucket = b; /* for axing when we're done reading it */
+ }
+ if (bucket_avail) {
+ /* We've got data, so translate it. */
+ if (ctx->saved) {
+ /* Rats... we need to finish a partial character from the previous
+ * bucket.
+ *
+ * Strangely, finish_partial_char() increments the input buffer
+ * pointer but does not increment the output buffer pointer.
+ */
+ apr_size_t old_buffer_avail = *buffer_avail;
+ rv = finish_partial_char(ctx,
+ &bucket, &bucket_avail,
+ &buffer, buffer_avail);
+ buffer += old_buffer_avail - *buffer_avail;
+ }
+ else {
+ apr_size_t old_buffer_avail = *buffer_avail;
+ apr_size_t old_bucket_avail = bucket_avail;
+ rv = apr_xlate_conv_buffer(ctx->xlate,
+ bucket, &bucket_avail,
+ buffer,
+ buffer_avail);
+ buffer += old_buffer_avail - *buffer_avail;
+ bucket += old_bucket_avail - bucket_avail;
+
+ if (rv == APR_INCOMPLETE) { /* partial character at end of input */
+ /* We need to save the final byte(s) for next time; we can't
+ * convert it until we look at the next bucket.
+ */
+ rv = set_aside_partial_char(ctx, bucket, bucket_avail);
+ bucket_avail = 0;
+ }
+ }
+ if (rv != APR_SUCCESS) {
+ /* bad input byte or partial char too big to store */
+ break;
+ }
+ if (*buffer_avail < XLATE_MIN_BUFF_LEFT) {
+ /* if any data remains in the current bucket, split there */
+ if (bucket_avail) {
+ apr_bucket_split(b, bytes_in_bucket - bucket_avail);
+ }
+ apr_bucket_delete(b);
+ break;
+ }
+ }
+ }
+
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ b = APR_BRIGADE_FIRST(bb);
+ if (APR_BUCKET_IS_EOS(b)) {
+ /* Leave the eos bucket in the brigade for reporting to
+ * subsequent filters.
+ */
+ *hit_eos = 1;
+ if (ctx->saved) {
+ /* Oops... we have a partial char from the previous bucket
+ * that won't be completed because there's no more data.
+ */
+ rv = APR_INCOMPLETE;
+ ctx->ees = EES_INCOMPLETE_CHAR;
+ }
+ }
+ }
+
+ return rv;
+}
+
+/* xlate_out_filter() handles (almost) arbitrary conversions from one charset
+ * to another...
+ * translation is determined in the fixup hook (find_code_page), which is
+ * where the filter's context data is set up... the context data gives us
+ * the translation handle
+ */
+static apr_status_t xlate_out_filter(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ charset_req_t *reqinfo = ap_get_module_config(f->r->request_config,
+ &charset_lite_module);
+ charset_dir_t *dc = ap_get_module_config(f->r->per_dir_config,
+ &charset_lite_module);
+ charset_filter_ctx_t *ctx = f->ctx;
+ apr_bucket *dptr, *consumed_bucket;
+ const char *cur_str;
+ apr_size_t cur_len, cur_avail;
+ char tmp[OUTPUT_XLATE_BUF_SIZE];
+ apr_size_t space_avail;
+ int done;
+ apr_status_t rv = APR_SUCCESS;
+
+ if (!ctx) {
+ /* this is SetOutputFilter path; grab the preallocated context,
+ * if any; note that if we decided not to do anything in an earlier
+ * handler, we won't even have a reqinfo
+ */
+ if (reqinfo) {
+ ctx = f->ctx = reqinfo->output_ctx;
+ reqinfo->output_ctx = NULL; /* prevent SNAFU if user coded us twice
+ * in the filter chain; we can't have two
+ * instances using the same context
+ */
+ }
+ if (!ctx) { /* no idea how to translate; don't do anything */
+ ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(charset_filter_ctx_t));
+ ctx->dc = dc;
+ ctx->noop = 1;
+ }
+ }
+
+ if (dc->debug >= DBGLVL_GORY) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
+ "xlate_out_filter() - "
+ "charset_source: %s charset_default: %s",
+ dc && dc->charset_source ? dc->charset_source : "(none)",
+ dc && dc->charset_default ? dc->charset_default : "(none)");
+ }
+
+ if (!ctx->ran) { /* filter never ran before */
+ chk_filter_chain(f);
+ ctx->ran = 1;
+ }
+
+ if (ctx->noop) {
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ dptr = APR_BRIGADE_FIRST(bb);
+ done = 0;
+ cur_len = 0;
+ space_avail = sizeof(tmp);
+ consumed_bucket = NULL;
+ while (!done) {
+ if (!cur_len) { /* no bytes left to process in the current bucket... */
+ if (consumed_bucket) {
+ apr_bucket_delete(consumed_bucket);
+ consumed_bucket = NULL;
+ }
+ if (dptr == APR_BRIGADE_SENTINEL(bb)) {
+ done = 1;
+ break;
+ }
+ if (APR_BUCKET_IS_EOS(dptr)) {
+ done = 1;
+ cur_len = -1; /* XXX yuck, but that tells us to send
+ * eos down; when we minimize our bb construction
+ * we'll fix this crap */
+ if (ctx->saved) {
+ /* Oops... we have a partial char from the previous bucket
+ * that won't be completed because there's no more data.
+ */
+ rv = APR_INCOMPLETE;
+ ctx->ees = EES_INCOMPLETE_CHAR;
+ }
+ break;
+ }
+ rv = apr_bucket_read(dptr, &cur_str, &cur_len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ done = 1;
+ ctx->ees = EES_BUCKET_READ;
+ break;
+ }
+ consumed_bucket = dptr; /* for axing when we're done reading it */
+ dptr = APR_BUCKET_NEXT(dptr); /* get ready for when we access the
+ * next bucket */
+ }
+ /* Try to fill up our tmp buffer with translated data. */
+ cur_avail = cur_len;
+
+ if (cur_len) { /* maybe we just hit the end of a pipe (len = 0) ? */
+ if (ctx->saved) {
+ /* Rats... we need to finish a partial character from the previous
+ * bucket.
+ */
+ char *tmp_tmp;
+
+ tmp_tmp = tmp + sizeof(tmp) - space_avail;
+ rv = finish_partial_char(ctx,
+ &cur_str, &cur_len,
+ &tmp_tmp, &space_avail);
+ }
+ else {
+ rv = apr_xlate_conv_buffer(ctx->xlate,
+ cur_str, &cur_avail,
+ tmp + sizeof(tmp) - space_avail, &space_avail);
+
+ /* Update input ptr and len after consuming some bytes */
+ cur_str += cur_len - cur_avail;
+ cur_len = cur_avail;
+
+ if (rv == APR_INCOMPLETE) { /* partial character at end of input */
+ /* We need to save the final byte(s) for next time; we can't
+ * convert it until we look at the next bucket.
+ */
+ rv = set_aside_partial_char(ctx, cur_str, cur_len);
+ cur_len = 0;
+ }
+ }
+ }
+
+ if (rv != APR_SUCCESS) {
+ /* bad input byte or partial char too big to store */
+ done = 1;
+ }
+
+ if (space_avail < XLATE_MIN_BUFF_LEFT) {
+ /* It is time to flush, as there is not enough space left in the
+ * current output buffer to bother with converting more data.
+ */
+ rv = send_downstream(f, tmp, sizeof(tmp) - space_avail);
+ if (rv != APR_SUCCESS) {
+ done = 1;
+ }
+
+ /* tmp is now empty */
+ space_avail = sizeof(tmp);
+ }
+ }
+
+ if (rv == APR_SUCCESS) {
+ if (space_avail < sizeof(tmp)) { /* gotta write out what we converted */
+ rv = send_downstream(f, tmp, sizeof(tmp) - space_avail);
+ }
+ }
+ if (rv == APR_SUCCESS) {
+ if (cur_len == -1) {
+ rv = send_eos(f);
+ }
+ }
+ else {
+ log_xlate_error(f, rv);
+ }
+
+ return rv;
+}
+
+static int xlate_in_filter(ap_filter_t *f, apr_bucket_brigade *bb,
+ ap_input_mode_t mode, apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ apr_status_t rv;
+ charset_req_t *reqinfo = ap_get_module_config(f->r->request_config,
+ &charset_lite_module);
+ charset_dir_t *dc = ap_get_module_config(f->r->per_dir_config,
+ &charset_lite_module);
+ charset_filter_ctx_t *ctx = f->ctx;
+ apr_size_t buffer_size;
+ int hit_eos;
+
+ if (!ctx) {
+ /* this is SetInputFilter path; grab the preallocated context,
+ * if any; note that if we decided not to do anything in an earlier
+ * handler, we won't even have a reqinfo
+ */
+ if (reqinfo) {
+ ctx = f->ctx = reqinfo->input_ctx;
+ reqinfo->input_ctx = NULL; /* prevent SNAFU if user coded us twice
+ * in the filter chain; we can't have two
+ * instances using the same context
+ */
+ }
+ if (!ctx) { /* no idea how to translate; don't do anything */
+ ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(charset_filter_ctx_t));
+ ctx->dc = dc;
+ ctx->noop = 1;
+ }
+ }
+
+ if (dc->debug >= DBGLVL_GORY) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
+ "xlate_in_filter() - "
+ "charset_source: %s charset_default: %s",
+ dc && dc->charset_source ? dc->charset_source : "(none)",
+ dc && dc->charset_default ? dc->charset_default : "(none)");
+ }
+
+ if (!ctx->ran) { /* filter never ran before */
+ chk_filter_chain(f);
+ ctx->ran = 1;
+ }
+
+ if (ctx->noop) {
+ return ap_get_brigade(f->next, bb, mode, block, readbytes);
+ }
+
+ if (APR_BRIGADE_EMPTY(ctx->bb)) {
+ if ((rv = ap_get_brigade(f->next, bb, mode, block,
+ readbytes)) != APR_SUCCESS) {
+ return rv;
+ }
+ }
+ else {
+ APR_BRIGADE_PREPEND(bb, ctx->bb); /* first use the leftovers */
+ }
+
+ buffer_size = INPUT_XLATE_BUF_SIZE;
+ rv = xlate_brigade(ctx, bb, ctx->tmp, &buffer_size, &hit_eos);
+ if (rv == APR_SUCCESS) {
+ if (!hit_eos) {
+ /* move anything leftover into our context for next time;
+ * we don't currently "set aside" since the data came from
+ * down below, but I suspect that for long-term we need to
+ * do that
+ */
+ APR_BRIGADE_CONCAT(ctx->bb, bb);
+ }
+ if (buffer_size < INPUT_XLATE_BUF_SIZE) { /* do we have output? */
+ apr_bucket *e;
+
+ e = apr_bucket_heap_create(ctx->tmp,
+ INPUT_XLATE_BUF_SIZE - buffer_size,
+ NULL, f->r->connection->bucket_alloc);
+ /* make sure we insert at the head, because there may be
+ * an eos bucket already there, and the eos bucket should
+ * come after the data
+ */
+ APR_BRIGADE_INSERT_HEAD(bb, e);
+ }
+ else {
+ /* XXX need to get some more data... what if the last brigade
+ * we got had only the first byte of a multibyte char? we need
+ * to grab more data from the network instead of returning an
+ * empty brigade
+ */
+ }
+ }
+ else {
+ log_xlate_error(f, rv);
+ }
+
+ return rv;
+}
+
+static const command_rec cmds[] =
+{
+ AP_INIT_TAKE1("CharsetSourceEnc",
+ add_charset_source,
+ NULL,
+ OR_FILEINFO,
+ "source (html,cgi,ssi) file charset"),
+ AP_INIT_TAKE1("CharsetDefault",
+ add_charset_default,
+ NULL,
+ OR_FILEINFO,
+ "name of default charset"),
+ AP_INIT_ITERATE("CharsetOptions",
+ add_charset_options,
+ NULL,
+ OR_FILEINFO,
+ "valid options: ImplicitAdd, NoImplicitAdd, DebugLevel=n"),
+ {NULL}
+};
+
+static void charset_register_hooks(apr_pool_t *p)
+{
+ ap_hook_fixups(find_code_page, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_insert_filter(xlate_insert_filter, NULL, NULL, APR_HOOK_REALLY_LAST);
+ ap_register_output_filter(XLATEOUT_FILTER_NAME, xlate_out_filter, NULL,
+ AP_FTYPE_RESOURCE);
+ ap_register_input_filter(XLATEIN_FILTER_NAME, xlate_in_filter, NULL,
+ AP_FTYPE_RESOURCE);
+}
+
+module AP_MODULE_DECLARE_DATA charset_lite_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_charset_dir_conf,
+ merge_charset_dir_conf,
+ NULL,
+ NULL,
+ cmds,
+ charset_register_hooks
+};
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.dsp
new file mode 100644
index 00000000..8a133ff7
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.dsp
@@ -0,0 +1,124 @@
+# Microsoft Developer Studio Project File - Name="mod_charset_lite" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_charset_lite - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_charset_lite.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_charset_lite.mak" CFG="mod_charset_lite - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_charset_lite - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_charset_lite - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_charset_lite - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_charset_lite_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_charset_lite.so" /base:@..\..\os\win32\BaseAddr.ref,mod_charset_lite.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_charset_lite - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_charset_lite_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_charset_lite.so" /base:@..\..\os\win32\BaseAddr.ref,mod_charset_lite.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_charset_lite - Win32 Release"
+# Name "mod_charset_lite - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_charset_lite.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_charset_lite - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_charset_lite.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_charset_lite.so "charset_lite_module for Apache" ../../include/ap_release.h > .\mod_charset_lite.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_charset_lite - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_charset_lite.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_charset_lite.so "charset_lite_module for Apache" ../../include/ap_release.h > .\mod_charset_lite.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.exp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.exp
new file mode 100644
index 00000000..3f0bf14b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.exp
@@ -0,0 +1 @@
+charset_lite_module
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.c
new file mode 100644
index 00000000..f8c1642f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.c
@@ -0,0 +1,963 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_file_io.h"
+#include "apr_strings.h"
+#include "mod_cache.h"
+#include "ap_provider.h"
+#include "util_filter.h"
+#include "util_script.h"
+
+#if APR_HAVE_UNISTD_H
+#include <unistd.h> /* needed for unlink/link */
+#endif
+
+/* Our on-disk header format is:
+ *
+ * disk_cache_info_t
+ * entity name (dobj->name) [length is in disk_cache_info_t->name_len]
+ * r->headers_out (delimited by CRLF)
+ * CRLF
+ * r->headers_in (delimited by CRLF)
+ * CRLF
+ */
+#define DISK_FORMAT_VERSION 0
+typedef struct {
+ /* Indicates the format of the header struct stored on-disk. */
+ int format;
+ /* The HTTP status code returned for this response. */
+ int status;
+ /* The size of the entity name that follows. */
+ apr_size_t name_len;
+ /* The number of times we've cached this entity. */
+ apr_size_t entity_version;
+ /* Miscellaneous time values. */
+ apr_time_t date;
+ apr_time_t expire;
+ apr_time_t request_time;
+ apr_time_t response_time;
+} disk_cache_info_t;
+
+/*
+ * disk_cache_object_t
+ * Pointed to by cache_object_t::vobj
+ */
+typedef struct disk_cache_object {
+ const char *root; /* the location of the cache directory */
+ char *tempfile; /* temp file tohold the content */
+#if 0
+ int dirlevels; /* Number of levels of subdirectories */
+ int dirlength; /* Length of subdirectory names */
+#endif
+ char *datafile; /* name of file where the data will go */
+ char *hdrsfile; /* name of file where the hdrs will go */
+ char *hashfile; /* Computed hash key for this URI */
+ char *name;
+ apr_file_t *fd; /* data file */
+ apr_file_t *hfd; /* headers file */
+ apr_file_t *tfd; /* temporary file for data */
+ apr_off_t file_size; /* File size of the cached data file */
+ disk_cache_info_t disk_info; /* Header information. */
+} disk_cache_object_t;
+
+
+/*
+ * mod_disk_cache configuration
+ */
+/* TODO: Make defaults OS specific */
+#define CACHEFILE_LEN 20 /* must be less than HASH_LEN/2 */
+#define DEFAULT_DIRLEVELS 3
+#define DEFAULT_DIRLENGTH 2
+#define DEFAULT_MIN_FILE_SIZE 1
+#define DEFAULT_MAX_FILE_SIZE 1000000
+#define DEFAULT_CACHE_SIZE 1000000
+
+typedef struct {
+ const char* cache_root;
+ apr_size_t cache_root_len;
+ off_t space; /* Maximum cache size (in 1024 bytes) */
+ apr_time_t maxexpire; /* Maximum time to keep cached files in msecs */
+ apr_time_t defaultexpire; /* default time to keep cached file in msecs */
+ double lmfactor; /* factor for estimating expires date */
+ apr_time_t gcinterval; /* garbage collection interval, in msec */
+ int dirlevels; /* Number of levels of subdirectories */
+ int dirlength; /* Length of subdirectory names */
+ int expirychk; /* true if expiry time is observed for cached files */
+ apr_size_t minfs; /* minumum file size for cached files */
+ apr_size_t maxfs; /* maximum file size for cached files */
+ apr_time_t mintm; /* minimum time margin for caching files */
+ /* dgc_time_t gcdt; time of day for daily garbage collection */
+ apr_array_header_t *gcclnun; /* gc_retain_t entries for unused files */
+ apr_array_header_t *gcclean; /* gc_retain_t entries for all files */
+ int maxgcmem; /* maximum memory used by garbage collection */
+} disk_cache_conf;
+
+module AP_MODULE_DECLARE_DATA disk_cache_module;
+
+/* Forward declarations */
+static int remove_entity(cache_handle_t *h);
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *i);
+static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r);
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+
+/*
+ * Local static functions
+ */
+#define CACHE_HEADER_SUFFIX ".header"
+#define CACHE_DATA_SUFFIX ".data"
+static char *header_file(apr_pool_t *p, disk_cache_conf *conf,
+ disk_cache_object_t *dobj, const char *name)
+{
+ if (!dobj->hashfile) {
+ dobj->hashfile = generate_name(p, conf->dirlevels, conf->dirlength,
+ name);
+ }
+ return apr_pstrcat(p, conf->cache_root, "/", dobj->hashfile,
+ CACHE_HEADER_SUFFIX, NULL);
+}
+
+static char *data_file(apr_pool_t *p, disk_cache_conf *conf,
+ disk_cache_object_t *dobj, const char *name)
+{
+ if (!dobj->hashfile) {
+ dobj->hashfile = generate_name(p, conf->dirlevels, conf->dirlength,
+ name);
+ }
+ return apr_pstrcat(p, conf->cache_root, "/", dobj->hashfile,
+ CACHE_DATA_SUFFIX, NULL);
+}
+
+static void mkdir_structure(disk_cache_conf *conf, char *file, apr_pool_t *pool)
+{
+ apr_status_t rv;
+ char *p;
+
+ for (p = file + conf->cache_root_len + 1;;) {
+ p = strchr(p, '/');
+ if (!p)
+ break;
+ *p = '\0';
+
+ rv = apr_dir_make(file,
+ APR_UREAD|APR_UWRITE|APR_UEXECUTE, pool);
+ if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
+ /* XXX */
+ }
+ *p = '/';
+ ++p;
+ }
+}
+
+static apr_status_t file_cache_el_final(disk_cache_object_t *dobj,
+ request_rec *r)
+{
+ /* move the data over */
+ if (dobj->tfd) {
+ apr_status_t rv;
+
+ apr_file_close(dobj->tfd);
+
+ /* This assumes that the tempfile is on the same file system
+ * as the cache_root. If not, then we need a file copy/move
+ * rather than a rename.
+ */
+ rv = apr_file_rename(dobj->tempfile, dobj->datafile, r->pool);
+ if (rv != APR_SUCCESS) {
+ /* XXX log */
+ }
+
+ dobj->tfd = NULL;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t file_cache_errorcleanup(disk_cache_object_t *dobj, request_rec *r)
+{
+ /* Remove the header file and the body file. */
+ apr_file_remove(dobj->hdrsfile, r->pool);
+ apr_file_remove(dobj->datafile, r->pool);
+
+ /* If we opened the temporary data file, close and remove it. */
+ if (dobj->tfd) {
+ apr_file_close(dobj->tfd);
+ apr_file_remove(dobj->tempfile, r->pool);
+ dobj->tfd = NULL;
+ }
+
+ return APR_SUCCESS;
+}
+
+
+/* These two functions get and put state information into the data
+ * file for an ap_cache_el, this state information will be read
+ * and written transparent to clients of this module
+ */
+static int file_cache_recall_mydata(apr_file_t *fd, cache_info *info,
+ disk_cache_object_t *dobj, request_rec *r)
+{
+ apr_status_t rv;
+ char *urlbuff;
+ disk_cache_info_t disk_info;
+ apr_size_t len;
+
+ /* read the data from the cache file */
+ len = sizeof(disk_cache_info_t);
+ rv = apr_file_read_full(fd, &disk_info, len, &len);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ if (disk_info.format != DISK_FORMAT_VERSION) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "cache_disk: URL %s had a on-disk version mismatch",
+ r->uri);
+ return APR_EGENERAL;
+ }
+
+ /* Store it away so we can get it later. */
+ dobj->disk_info = disk_info;
+
+ info->date = disk_info.date;
+ info->expire = disk_info.expire;
+ info->request_time = disk_info.request_time;
+ info->response_time = disk_info.response_time;
+
+ /* Note that we could optimize this by conditionally doing the palloc
+ * depending upon the size. */
+ urlbuff = apr_palloc(r->pool, disk_info.name_len + 1);
+ len = disk_info.name_len;
+ rv = apr_file_read_full(fd, urlbuff, len, &len);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ urlbuff[disk_info.name_len] = '\0';
+
+ /* check that we have the same URL */
+ /* Would strncmp be correct? */
+ if (strcmp(urlbuff, dobj->name) != 0) {
+ return APR_EGENERAL;
+ }
+
+ return APR_SUCCESS;
+}
+
+/*
+ * Hook and mod_cache callback functions
+ */
+#define AP_TEMPFILE "/aptmpXXXXXX"
+static int create_entity(cache_handle_t *h, request_rec *r,
+ const char *key,
+ apr_off_t len)
+{
+ disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
+ &disk_cache_module);
+ cache_object_t *obj;
+ disk_cache_object_t *dobj;
+
+ if (conf->cache_root == NULL) {
+ return DECLINED;
+ }
+
+ /* If the Content-Length is still unknown, cache anyway */
+ if (len != -1 && (len < conf->minfs || len > conf->maxfs)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache_disk: URL %s failed the size check, "
+ "or is incomplete",
+ key);
+ return DECLINED;
+ }
+
+ /* Allocate and initialize cache_object_t and disk_cache_object_t */
+ h->cache_obj = obj = apr_pcalloc(r->pool, sizeof(*obj));
+ obj->vobj = dobj = apr_pcalloc(r->pool, sizeof(*dobj));
+
+ obj->key = apr_pstrdup(r->pool, key);
+ /* XXX Bad Temporary Cast - see cache_object_t notes */
+ obj->info.len = (apr_size_t) len;
+ obj->complete = 0; /* Cache object is not complete */
+
+ dobj->name = obj->key;
+ dobj->datafile = data_file(r->pool, conf, dobj, key);
+ dobj->hdrsfile = header_file(r->pool, conf, dobj, key);
+ dobj->tempfile = apr_pstrcat(r->pool, conf->cache_root, AP_TEMPFILE, NULL);
+
+ return OK;
+}
+
+static int open_entity(cache_handle_t *h, request_rec *r, const char *key)
+{
+ apr_status_t rc;
+ static int error_logged = 0;
+ disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
+ &disk_cache_module);
+ apr_finfo_t finfo;
+ cache_object_t *obj;
+ cache_info *info;
+ disk_cache_object_t *dobj;
+ int flags;
+
+ h->cache_obj = NULL;
+
+ /* Look up entity keyed to 'url' */
+ if (conf->cache_root == NULL) {
+ if (!error_logged) {
+ error_logged = 1;
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "disk_cache: Cannot cache files to disk without a CacheRoot specified.");
+ }
+ return DECLINED;
+ }
+
+ /* Create and init the cache object */
+ h->cache_obj = obj = apr_pcalloc(r->pool, sizeof(cache_object_t));
+ obj->vobj = dobj = apr_pcalloc(r->pool, sizeof(disk_cache_object_t));
+
+ info = &(obj->info);
+ obj->key = (char *) key;
+ dobj->name = (char *) key;
+ dobj->datafile = data_file(r->pool, conf, dobj, key);
+ dobj->hdrsfile = header_file(r->pool, conf, dobj, key);
+ dobj->tempfile = apr_pstrcat(r->pool, conf->cache_root, AP_TEMPFILE, NULL);
+
+ /* Open the data file */
+ flags = APR_READ|APR_BINARY;
+#ifdef APR_SENDFILE_ENABLED
+ flags |= APR_SENDFILE_ENABLED;
+#endif
+ rc = apr_file_open(&dobj->fd, dobj->datafile, flags, 0, r->pool);
+ if (rc != APR_SUCCESS) {
+ /* XXX: Log message */
+ return DECLINED;
+ }
+
+ /* Open the headers file */
+ flags = APR_READ|APR_BINARY|APR_BUFFERED;
+ rc = apr_file_open(&dobj->hfd, dobj->hdrsfile, flags, 0, r->pool);
+ if (rc != APR_SUCCESS) {
+ /* XXX: Log message */
+ return DECLINED;
+ }
+
+ rc = apr_file_info_get(&finfo, APR_FINFO_SIZE, dobj->fd);
+ if (rc == APR_SUCCESS) {
+ dobj->file_size = finfo.size;
+ }
+
+ /* Read the bytes to setup the cache_info fields */
+ rc = file_cache_recall_mydata(dobj->hfd, info, dobj, r);
+ if (rc != APR_SUCCESS) {
+ /* XXX log message */
+ return DECLINED;
+ }
+
+ /* Initialize the cache_handle callback functions */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "disk_cache: Recalled cached URL info header %s", dobj->name);
+ return OK;
+}
+
+static int remove_entity(cache_handle_t *h)
+{
+ /* Null out the cache object pointer so next time we start from scratch */
+ h->cache_obj = NULL;
+ return OK;
+}
+
+static int remove_url(const char *key)
+{
+ /* XXX: Delete file from cache! */
+ return OK;
+}
+
+static apr_status_t read_table(cache_handle_t *handle, request_rec *r,
+ apr_table_t *table, apr_file_t *file)
+{
+ char w[MAX_STRING_LEN];
+ char *l;
+ int p;
+ apr_status_t rv;
+
+ while (1) {
+
+ /* ### What about APR_EOF? */
+ rv = apr_file_gets(w, MAX_STRING_LEN - 1, file);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Premature end of cache headers.");
+ return rv;
+ }
+
+ /* Delete terminal (CR?)LF */
+
+ p = strlen(w);
+ /* Indeed, the host's '\n':
+ '\012' for UNIX; '\015' for MacOS; '\025' for OS/390
+ -- whatever the script generates.
+ */
+ if (p > 0 && w[p - 1] == '\n') {
+ if (p > 1 && w[p - 2] == CR) {
+ w[p - 2] = '\0';
+ }
+ else {
+ w[p - 1] = '\0';
+ }
+ }
+
+ /* If we've finished reading the headers, break out of the loop. */
+ if (w[0] == '\0') {
+ break;
+ }
+
+#if APR_CHARSET_EBCDIC
+ /* Chances are that we received an ASCII header text instead of
+ * the expected EBCDIC header lines. Try to auto-detect:
+ */
+ if (!(l = strchr(w, ':'))) {
+ int maybeASCII = 0, maybeEBCDIC = 0;
+ unsigned char *cp, native;
+ apr_size_t inbytes_left, outbytes_left;
+
+ for (cp = w; *cp != '\0'; ++cp) {
+ native = apr_xlate_conv_byte(ap_hdrs_from_ascii, *cp);
+ if (apr_isprint(*cp) && !apr_isprint(native))
+ ++maybeEBCDIC;
+ if (!apr_isprint(*cp) && apr_isprint(native))
+ ++maybeASCII;
+ }
+ if (maybeASCII > maybeEBCDIC) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "CGI Interface Error: Script headers apparently ASCII: (CGI = %s)",
+ r->filename);
+ inbytes_left = outbytes_left = cp - w;
+ apr_xlate_conv_buffer(ap_hdrs_from_ascii,
+ w, &inbytes_left, w, &outbytes_left);
+ }
+ }
+#endif /*APR_CHARSET_EBCDIC*/
+
+ /* if we see a bogus header don't ignore it. Shout and scream */
+ if (!(l = strchr(w, ':'))) {
+ return APR_EGENERAL;
+ }
+
+ *l++ = '\0';
+ while (*l && apr_isspace(*l)) {
+ ++l;
+ }
+
+ apr_table_add(table, w, l);
+ }
+
+ return APR_SUCCESS;
+}
+
+/*
+ * Reads headers from a buffer and returns an array of headers.
+ * Returns NULL on file error
+ * This routine tries to deal with too long lines and continuation lines.
+ * @@@: XXX: FIXME: currently the headers are passed thru un-merged.
+ * Is that okay, or should they be collapsed where possible?
+ */
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r)
+{
+ disk_cache_object_t *dobj = (disk_cache_object_t *) h->cache_obj->vobj;
+
+ /* This case should not happen... */
+ if (!dobj->hfd) {
+ /* XXX log message */
+ return APR_NOTFOUND;
+ }
+
+ h->req_hdrs = apr_table_make(r->pool, 20);
+ h->resp_hdrs = apr_table_make(r->pool, 20);
+ h->resp_err_hdrs = apr_table_make(r->pool, 20);
+
+ /* Call routine to read the header lines/status line */
+ read_table(h, r, h->resp_hdrs, dobj->hfd);
+ read_table(h, r, h->req_hdrs, dobj->hfd);
+
+ apr_file_close(dobj->hfd);
+
+ h->status = dobj->disk_info.status;
+ h->content_type = apr_table_get(h->resp_hdrs, "Content-Type");
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "disk_cache: Recalled headers for URL %s", dobj->name);
+ return APR_SUCCESS;
+}
+
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb)
+{
+ apr_bucket *e;
+ disk_cache_object_t *dobj = (disk_cache_object_t*) h->cache_obj->vobj;
+
+ e = apr_bucket_file_create(dobj->fd, 0, (apr_size_t) dobj->file_size, p,
+ bb->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(bb, e);
+ e = apr_bucket_eos_create(bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t store_table(apr_file_t *fd, apr_table_t *table)
+{
+ int i;
+ apr_status_t rv;
+ struct iovec iov[4];
+ apr_size_t amt;
+ apr_table_entry_t *elts;
+
+ elts = (apr_table_entry_t *) apr_table_elts(table)->elts;
+ for (i = 0; i < apr_table_elts(table)->nelts; ++i) {
+ if (elts[i].key != NULL) {
+ iov[0].iov_base = elts[i].key;
+ iov[0].iov_len = strlen(elts[i].key);
+ iov[1].iov_base = ": ";
+ iov[1].iov_len = sizeof(": ") - 1;
+ iov[2].iov_base = elts[i].val;
+ iov[2].iov_len = strlen(elts[i].val);
+ iov[3].iov_base = CRLF;
+ iov[3].iov_len = sizeof(CRLF) - 1;
+
+ rv = apr_file_writev(fd, (const struct iovec *) &iov, 4,
+ &amt);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+ }
+ iov[0].iov_base = CRLF;
+ iov[0].iov_len = sizeof(CRLF) - 1;
+ rv = apr_file_writev(fd, (const struct iovec *) &iov, 1,
+ &amt);
+ return rv;
+}
+
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *info)
+{
+ disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
+ &disk_cache_module);
+ apr_status_t rv;
+ apr_size_t amt;
+ disk_cache_object_t *dobj = (disk_cache_object_t*) h->cache_obj->vobj;
+
+ if (!dobj->hfd) {
+ disk_cache_info_t disk_info;
+ struct iovec iov[2];
+
+ /* This is flaky... we need to manage the cache_info differently */
+ h->cache_obj->info = *info;
+
+ /* Remove old file with the same name. If remove fails, then
+ * perhaps we need to create the directory tree where we are
+ * about to write the new headers file.
+ */
+ rv = apr_file_remove(dobj->hdrsfile, r->pool);
+ if (rv != APR_SUCCESS) {
+ mkdir_structure(conf, dobj->hdrsfile, r->pool);
+ }
+
+ rv = apr_file_open(&dobj->hfd, dobj->hdrsfile,
+ APR_WRITE | APR_CREATE | APR_EXCL,
+ APR_OS_DEFAULT, r->pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ dobj->name = h->cache_obj->key;
+
+ disk_info.format = DISK_FORMAT_VERSION;
+ disk_info.date = info->date;
+ disk_info.expire = info->expire;
+ disk_info.entity_version = dobj->disk_info.entity_version++;
+ disk_info.request_time = info->request_time;
+ disk_info.response_time = info->response_time;
+ disk_info.status = info->status;
+
+ disk_info.name_len = strlen(dobj->name);
+
+ iov[0].iov_base = (void*)&disk_info;
+ iov[0].iov_len = sizeof(disk_cache_info_t);
+ iov[1].iov_base = dobj->name;
+ iov[1].iov_len = disk_info.name_len;
+
+ rv = apr_file_writev(dobj->hfd, (const struct iovec *) &iov, 2, &amt);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ if (r->headers_out) {
+ apr_table_t *headers_out;
+
+ headers_out = ap_cache_cacheable_hdrs_out(r->pool, r->headers_out,
+ r->server);
+
+ if (!apr_table_get(headers_out, "Content-Type") &&
+ r->content_type) {
+ apr_table_setn(headers_out, "Content-Type",
+ ap_make_content_type(r, r->content_type));
+ }
+
+ rv = store_table(dobj->hfd, headers_out);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ }
+
+ /* Parse the vary header and dump those fields from the headers_in. */
+ /* Make call to the same thing cache_select_url calls to crack Vary. */
+ /* @@@ Some day, not today. */
+ if (r->headers_in) {
+ apr_table_t *headers_in;
+
+ headers_in = ap_cache_cacheable_hdrs_out(r->pool, r->headers_in,
+ r->server);
+ rv = store_table(dobj->hfd, headers_in);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+ apr_file_close(dobj->hfd); /* flush and close */
+ }
+ else {
+ /* XXX log message */
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "disk_cache: Stored headers for URL %s", dobj->name);
+ return APR_SUCCESS;
+}
+
+static apr_status_t store_body(cache_handle_t *h, request_rec *r,
+ apr_bucket_brigade *bb)
+{
+ apr_bucket *e;
+ apr_status_t rv;
+ disk_cache_object_t *dobj = (disk_cache_object_t *) h->cache_obj->vobj;
+ disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
+ &disk_cache_module);
+
+ /* We write to a temp file and then atomically rename the file over
+ * in file_cache_el_final().
+ */
+ if (!dobj->tfd) {
+ rv = apr_file_mktemp(&dobj->tfd, dobj->tempfile,
+ APR_CREATE | APR_WRITE | APR_BINARY |
+ APR_BUFFERED | APR_EXCL, r->pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ dobj->file_size = 0;
+ }
+
+ for (e = APR_BRIGADE_FIRST(bb);
+ e != APR_BRIGADE_SENTINEL(bb);
+ e = APR_BUCKET_NEXT(e))
+ {
+ const char *str;
+ apr_size_t length, written;
+ apr_bucket_read(e, &str, &length, APR_BLOCK_READ);
+ rv = apr_file_write_full(dobj->tfd, str, length, &written);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "cache_disk: Error when writing cache file for URL %s",
+ h->cache_obj->key);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ file_cache_errorcleanup(dobj, r);
+ return APR_EGENERAL;
+ }
+ dobj->file_size += written;
+ if (dobj->file_size > conf->maxfs) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache_disk: URL %s failed the size check (%lu>%lu)",
+ h->cache_obj->key, (unsigned long)dobj->file_size,
+ (unsigned long)conf->maxfs);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ file_cache_errorcleanup(dobj, r);
+ return APR_EGENERAL;
+ }
+ }
+
+ /* Was this the final bucket? If yes, close the temp file and perform
+ * sanity checks.
+ */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
+ if (h->cache_obj->info.len <= 0) {
+ /* If the target value of the content length is unknown
+ * (h->cache_obj->info.len <= 0), check if connection has been
+ * aborted by client to avoid caching incomplete request bodies.
+ *
+ * This can happen with large responses from slow backends like
+ * Tomcat via mod_jk.
+ */
+ if (r->connection->aborted) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "disk_cache: Discarding body for URL %s "
+ "because connection has been aborted.",
+ h->cache_obj->key);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ file_cache_errorcleanup(dobj, r);
+ return APR_EGENERAL;
+ }
+ /* XXX Fixme: file_size isn't constrained by size_t. */
+ h->cache_obj->info.len = dobj->file_size;
+ }
+ else if (h->cache_obj->info.len != dobj->file_size) {
+ /* "Content-Length" and actual content disagree in size. Log that. */
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "disk_cache: URL %s failed the size check (%lu != %lu)",
+ h->cache_obj->key,
+ (unsigned long)h->cache_obj->info.len,
+ (unsigned long)dobj->file_size);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ file_cache_errorcleanup(dobj, r);
+ return APR_EGENERAL;
+ }
+ if (dobj->file_size < conf->minfs) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache_disk: URL %s failed the size check (%lu<%lu)",
+ h->cache_obj->key, (unsigned long)dobj->file_size, (unsigned long)conf->minfs);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ file_cache_errorcleanup(dobj, r);
+ return APR_EGENERAL;
+ }
+
+ /* All checks were fine. Move tempfile to final destination */
+ /* Link to the perm file, and close the descriptor */
+ file_cache_el_final(dobj, r);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "disk_cache: Body for URL %s cached.", dobj->name);
+ }
+
+ return APR_SUCCESS;
+}
+
+static void *create_config(apr_pool_t *p, server_rec *s)
+{
+ disk_cache_conf *conf = apr_pcalloc(p, sizeof(disk_cache_conf));
+
+ /* XXX: Set default values */
+ conf->dirlevels = DEFAULT_DIRLEVELS;
+ conf->dirlength = DEFAULT_DIRLENGTH;
+ conf->space = DEFAULT_CACHE_SIZE;
+ conf->maxfs = DEFAULT_MAX_FILE_SIZE;
+ conf->minfs = DEFAULT_MIN_FILE_SIZE;
+ conf->expirychk = 1;
+
+ conf->cache_root = NULL;
+ conf->cache_root_len = 0;
+
+ return conf;
+}
+
+/*
+ * mod_disk_cache configuration directives handlers.
+ */
+static const char
+*set_cache_root(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ conf->cache_root = arg;
+ conf->cache_root_len = strlen(arg);
+ /* TODO: canonicalize cache_root and strip off any trailing slashes */
+
+ return NULL;
+}
+static const char
+*set_cache_size(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ conf->space = atoi(arg);
+ return NULL;
+}
+static const char
+*set_cache_gcint(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+/*
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+*/
+ /* XXX */
+ return NULL;
+}
+/*
+ * Consider eliminating the next two directives in favor of
+ * Ian's prime number hash...
+ * key = hash_fn( r->uri)
+ * filename = "/key % prime1 /key %prime2/key %prime3"
+ */
+static const char
+*set_cache_dirlevels(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ int val = atoi(arg);
+ if (val < 1)
+ return "CacheDirLevels value must be an integer greater than 0";
+ if (val * conf->dirlength > CACHEFILE_LEN)
+ return "CacheDirLevels*CacheDirLength value must not be higher than 20";
+ conf->dirlevels = val;
+ return NULL;
+}
+static const char
+*set_cache_dirlength(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ int val = atoi(arg);
+ if (val < 1)
+ return "CacheDirLength value must be an integer greater than 0";
+ if (val * conf->dirlevels > CACHEFILE_LEN)
+ return "CacheDirLevels*CacheDirLength value must not be higher than 20";
+
+ conf->dirlength = val;
+ return NULL;
+}
+static const char
+*set_cache_exchk(cmd_parms *parms, void *in_struct_ptr, int flag)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ conf->expirychk = flag;
+
+ return NULL;
+}
+static const char
+*set_cache_minfs(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ conf->minfs = atoi(arg);
+ return NULL;
+}
+static const char
+*set_cache_maxfs(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ conf->maxfs = atoi(arg);
+ return NULL;
+}
+static const char
+*set_cache_minetm(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ /* XXX
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ */
+ return NULL;
+}
+static const char
+*set_cache_gctime(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ /* XXX
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ */
+ return NULL;
+}
+static const char
+*add_cache_gcclean(cmd_parms *parms, void *in_struct_ptr, const char *arg, const char *arg1)
+{
+ /* XXX
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ */
+ return NULL;
+}
+static const char
+*add_cache_gcclnun(cmd_parms *parms, void *in_struct_ptr, const char *arg, const char *arg1)
+{
+ /* XXX
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ */
+ return NULL;
+}
+static const char
+*set_cache_maxgcmem(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ /* XXX
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ */
+ return NULL;
+}
+
+static const command_rec disk_cache_cmds[] =
+{
+ AP_INIT_TAKE1("CacheRoot", set_cache_root, NULL, RSRC_CONF,
+ "The directory to store cache files"),
+ AP_INIT_TAKE1("CacheSize", set_cache_size, NULL, RSRC_CONF,
+ "The maximum disk space used by the cache in KB"),
+ AP_INIT_TAKE1("CacheGcInterval", set_cache_gcint, NULL, RSRC_CONF,
+ "The interval between garbage collections, in hours"),
+ AP_INIT_TAKE1("CacheDirLevels", set_cache_dirlevels, NULL, RSRC_CONF,
+ "The number of levels of subdirectories in the cache"),
+ AP_INIT_TAKE1("CacheDirLength", set_cache_dirlength, NULL, RSRC_CONF,
+ "The number of characters in subdirectory names"),
+ AP_INIT_FLAG("CacheExpiryCheck", set_cache_exchk, NULL, RSRC_CONF,
+ "on if cache observes Expires date when seeking files"),
+ AP_INIT_TAKE1("CacheMinFileSize", set_cache_minfs, NULL, RSRC_CONF,
+ "The minimum file size to cache a document"),
+ AP_INIT_TAKE1("CacheMaxFileSize", set_cache_maxfs, NULL, RSRC_CONF,
+ "The maximum file size to cache a document"),
+ AP_INIT_TAKE1("CacheTimeMargin", set_cache_minetm, NULL, RSRC_CONF,
+ "The minimum time margin to cache a document"),
+ AP_INIT_TAKE1("CacheGcDaily", set_cache_gctime, NULL, RSRC_CONF,
+ "The time of day for garbage collection (24 hour clock)"),
+ AP_INIT_TAKE2("CacheGcUnused", add_cache_gcclnun, NULL, RSRC_CONF,
+ "The time in hours to retain unused file that match a url"),
+ AP_INIT_TAKE2("CacheGcClean", add_cache_gcclean, NULL, RSRC_CONF,
+ "The time in hours to retain unchanged files that match a url"),
+ AP_INIT_TAKE1("CacheGcMemUsage", set_cache_maxgcmem, NULL, RSRC_CONF,
+ "The maximum kilobytes of memory used for garbage collection"),
+ {NULL}
+};
+
+static const cache_provider cache_disk_provider =
+{
+ &remove_entity,
+ &store_headers,
+ &store_body,
+ &recall_headers,
+ &recall_body,
+ &create_entity,
+ &open_entity,
+ &remove_url,
+};
+
+static void disk_cache_register_hook(apr_pool_t *p)
+{
+ /* cache initializer */
+ ap_register_provider(p, CACHE_PROVIDER_GROUP, "disk", "0",
+ &cache_disk_provider);
+}
+
+module AP_MODULE_DECLARE_DATA disk_cache_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ create_config, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ disk_cache_cmds, /* command apr_table_t */
+ disk_cache_register_hook /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.dsp
new file mode 100644
index 00000000..9e7bf622
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_disk_cache" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_disk_cache - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_disk_cache.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_disk_cache.mak" CFG="mod_disk_cache - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_disk_cache - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_disk_cache - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_disk_cache - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /Fd"Release\mod_disk_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_disk_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_disk_cache.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_disk_cache - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /Fd"Debug\mod_disk_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_disk_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_disk_cache.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_disk_cache - Win32 Release"
+# Name "mod_disk_cache - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_cache.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_disk_cache.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_disk_cache - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_disk_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_disk_cache.so "disk_cache_module for Apache" ../../include/ap_release.h > .\mod_disk_cache.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_disk_cache - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_disk_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_disk_cache.so "disk_cache_module for Apache" ../../include/ap_release.h > .\mod_disk_cache.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.c
new file mode 100644
index 00000000..06ac65f5
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.c
@@ -0,0 +1,215 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Originally written @ Covalent by Jim Jagielski
+ */
+
+/*
+ * mod_dumpio.c:
+ * Think of this as a filter sniffer for Apache 2.x. It logs
+ * all filter data right before and after it goes out on the
+ * wire (BUT right before SSL encoded or after SSL decoded).
+ * It can produce a *huge* amount of data.
+ */
+
+
+#include "httpd.h"
+#include "http_connection.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+
+module AP_MODULE_DECLARE_DATA dumpio_module ;
+
+typedef struct dumpio_conf_t {
+ int enable_input;
+ int enable_output;
+} dumpio_conf_t;
+
+/*
+ * Workhorse function: simply log to the current error_log
+ * info about the data in the bucket as well as the data itself
+ */
+static void dumpit(ap_filter_t *f, apr_bucket *b)
+{
+ conn_rec *c = f->c;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
+ "mod_dumpio: %s (%s-%s): %" APR_SIZE_T_FMT " bytes",
+ f->frec->name,
+ (APR_BUCKET_IS_METADATA(b)) ? "metadata" : "data",
+ b->type->name,
+ b->length) ;
+
+ if (!(APR_BUCKET_IS_METADATA(b))) {
+ const char *buf;
+ apr_size_t nbytes;
+ char *obuf;
+ if (apr_bucket_read(b, &buf, &nbytes, APR_BLOCK_READ) == APR_SUCCESS) {
+ if (nbytes) {
+ obuf = malloc(nbytes+1); /* use pool? */
+ memcpy(obuf, buf, nbytes);
+ obuf[nbytes] = '\0';
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
+ "mod_dumpio: %s (%s-%s): %s",
+ f->frec->name,
+ (APR_BUCKET_IS_METADATA(b)) ? "metadata" : "data",
+ b->type->name,
+ obuf);
+ free(obuf);
+ }
+ } else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
+ "mod_dumpio: %s (%s-%s): %s",
+ f->frec->name,
+ (APR_BUCKET_IS_METADATA(b)) ? "metadata" : "data",
+ b->type->name,
+ "error reading data");
+ }
+ }
+}
+
+#define whichmode( mode ) \
+ ( (( mode ) == AP_MODE_READBYTES) ? "readbytes" : \
+ (( mode ) == AP_MODE_GETLINE) ? "getline" : \
+ (( mode ) == AP_MODE_EATCRLF) ? "eatcrlf" : \
+ (( mode ) == AP_MODE_SPECULATIVE) ? "speculative" : \
+ (( mode ) == AP_MODE_EXHAUSTIVE) ? "exhaustive" : \
+ (( mode ) == AP_MODE_INIT) ? "init" : "unknown" \
+ )
+
+static int dumpio_input_filter (ap_filter_t *f, apr_bucket_brigade *bb,
+ ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes)
+{
+
+ apr_bucket *b;
+ apr_status_t ret;
+ conn_rec *c = f->c;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
+ "mod_dumpio: %s [%s-%s] %" APR_OFF_T_FMT " readbytes",
+ f->frec->name,
+ whichmode(mode),
+ ((block) == APR_BLOCK_READ) ? "blocking" : "nonblocking",
+ readbytes) ;
+
+ ret = ap_get_brigade(f->next, bb, mode, block, readbytes);
+
+ if (ret == APR_SUCCESS) {
+ for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
+ dumpit(f, b);
+ }
+ } else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
+ "mod_dumpio: %s - %d", f->frec->name, ret) ;
+ }
+
+ return APR_SUCCESS ;
+}
+
+static int dumpio_output_filter (ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+ conn_rec *c = f->c;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, "mod_dumpio: %s", f->frec->name) ;
+
+ for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
+ /*
+ * If we ever see an EOS, make sure to FLUSH.
+ */
+ if (APR_BUCKET_IS_EOS(b)) {
+ apr_bucket *flush = apr_bucket_flush_create(f->c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(b, flush);
+ }
+ dumpit(f, b);
+ }
+
+ return ap_pass_brigade(f->next, bb) ;
+}
+
+static int dumpio_pre_conn(conn_rec *c, void *csd)
+{
+ dumpio_conf_t *ptr =
+ (dumpio_conf_t *) ap_get_module_config(c->base_server->module_config,
+ &dumpio_module);
+
+ if (ptr->enable_input)
+ ap_add_input_filter("DUMPIO_IN", NULL, NULL, c);
+ if (ptr->enable_output)
+ ap_add_output_filter("DUMPIO_OUT", NULL, NULL, c);
+ return OK;
+}
+
+static void dumpio_register_hooks(apr_pool_t *p)
+{
+/*
+ * We know that SSL is CONNECTION + 5
+ */
+ ap_register_output_filter("DUMPIO_OUT", dumpio_output_filter,
+ NULL, AP_FTYPE_CONNECTION + 3) ;
+
+ ap_register_input_filter("DUMPIO_IN", dumpio_input_filter,
+ NULL, AP_FTYPE_CONNECTION + 3) ;
+
+ ap_hook_pre_connection(dumpio_pre_conn, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+static void *dumpio_create_sconfig(apr_pool_t *p, server_rec *s)
+{
+ dumpio_conf_t *ptr = apr_pcalloc(p, sizeof *ptr);
+ ptr->enable_input = ptr->enable_output = 0;
+ return ptr;
+}
+
+static const char *dumpio_enable_input(cmd_parms *cmd, void *dummy, int arg)
+{
+ dumpio_conf_t *ptr =
+ (dumpio_conf_t *) ap_get_module_config(cmd->server->module_config,
+ &dumpio_module);
+
+ ptr->enable_input = arg;
+ return NULL;
+}
+
+static const char *dumpio_enable_output(cmd_parms *cmd, void *dummy, int arg)
+{
+ dumpio_conf_t *ptr =
+ (dumpio_conf_t *) ap_get_module_config(cmd->server->module_config,
+ &dumpio_module);
+
+ ptr->enable_output = arg;
+ return NULL;
+}
+
+static const command_rec dumpio_cmds[] = {
+ AP_INIT_FLAG("DumpIOInput", dumpio_enable_input, NULL,
+ RSRC_CONF, "Enable I/O Dump on Input Data"),
+ AP_INIT_FLAG("DumpIOOutput", dumpio_enable_output, NULL,
+ RSRC_CONF, "Enable I/O Dump on Output Data"),
+ { NULL }
+};
+
+module AP_MODULE_DECLARE_DATA dumpio_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL,
+ NULL,
+ dumpio_create_sconfig,
+ NULL,
+ dumpio_cmds,
+ dumpio_register_hooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.dsp
new file mode 100644
index 00000000..1126ddc9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_dumpio" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_dumpio - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_dumpio.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_dumpio.mak" CFG="mod_dumpio - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_dumpio - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_dumpio - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_dumpio - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_dumpio_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_dumpio.so" /base:@..\..\os\win32\BaseAddr.ref,mod_dumpio.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_dumpio.so" /base:@..\..\os\win32\BaseAddr.ref,mod_dumpio.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_dumpio - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_dumpio_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_dumpio.so" /base:@..\..\os\win32\BaseAddr.ref,mod_dumpio.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_dumpio.so" /base:@..\..\os\win32\BaseAddr.ref,mod_dumpio.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_dumpio - Win32 Release"
+# Name "mod_dumpio - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_dumpio.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_dumpio.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_dumpio - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_dumpio.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_dumpio.so "dumpio_module for Apache" ../../include/ap_release.h > .\mod_dumpio.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_dumpio - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_dumpio.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_dumpio.so "dumpio_module for Apache" ../../include/ap_release.h > .\mod_dumpio.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_example.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_example.c
new file mode 100644
index 00000000..5fae6a20
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_example.c
@@ -0,0 +1,1313 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Apache example module. Provide demonstrations of how modules do things.
+ * It is not meant to be used in a production server. Since it participates
+ * in all of the processing phases, it could conceivable interfere with
+ * the proper operation of other modules -- particularly the ones related
+ * to security.
+ *
+ * In the interest of brevity, all functions and structures internal to
+ * this module, but which may have counterparts in *real* modules, are
+ * prefixed with 'x_' instead of 'example_'.
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_script.h"
+#include "http_connection.h"
+
+#include "apr_strings.h"
+
+#include <stdio.h>
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* Data declarations. */
+/* */
+/* Here are the static cells and structure declarations private to our */
+/* module. */
+/* */
+/*--------------------------------------------------------------------------*/
+
+/*
+ * Sample configuration record. Used for both per-directory and per-server
+ * configuration data.
+ *
+ * It's perfectly reasonable to have two different structures for the two
+ * different environments. The same command handlers will be called for
+ * both, though, so the handlers need to be able to tell them apart. One
+ * possibility is for both structures to start with an int which is 0 for
+ * one and 1 for the other.
+ *
+ * Note that while the per-directory and per-server configuration records are
+ * available to most of the module handlers, they should be treated as
+ * READ-ONLY by all except the command and merge handlers. Sometimes handlers
+ * are handed a record that applies to the current location by implication or
+ * inheritance, and modifying it will change the rules for other locations.
+ */
+typedef struct x_cfg {
+ int cmode; /* Environment to which record applies
+ * (directory, server, or combination).
+ */
+#define CONFIG_MODE_SERVER 1
+#define CONFIG_MODE_DIRECTORY 2
+#define CONFIG_MODE_COMBO 3 /* Shouldn't ever happen. */
+ int local; /* Boolean: "Example" directive declared
+ * here?
+ */
+ int congenital; /* Boolean: did we inherit an "Example"? */
+ char *trace; /* Pointer to trace string. */
+ char *loc; /* Location to which this record applies. */
+} x_cfg;
+
+/*
+ * Let's set up a module-local static cell to point to the accreting callback
+ * trace. As each API callback is made to us, we'll tack on the particulars
+ * to whatever we've already recorded. To avoid massive memory bloat as
+ * directories are walked again and again, we record the routine/environment
+ * the first time (non-request context only), and ignore subsequent calls for
+ * the same routine/environment.
+ */
+static const char *trace = NULL;
+static apr_table_t *static_calls_made = NULL;
+
+/*
+ * To avoid leaking memory from pools other than the per-request one, we
+ * allocate a module-private pool, and then use a sub-pool of that which gets
+ * freed each time we modify the trace. That way previous layers of trace
+ * data don't get lost.
+ */
+static apr_pool_t *x_pool = NULL;
+static apr_pool_t *x_subpool = NULL;
+
+/*
+ * Declare ourselves so the configuration routines can find and know us.
+ * We'll fill it in at the end of the module.
+ */
+module AP_MODULE_DECLARE_DATA example_module;
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* The following pseudo-prototype declarations illustrate the parameters */
+/* passed to command handlers for the different types of directive */
+/* syntax. If an argument was specified in the directive definition */
+/* (look for "command_rec" below), it's available to the command handler */
+/* via the (void *) info field in the cmd_parms argument passed to the */
+/* handler (cmd->info for the examples below). */
+/* */
+/*--------------------------------------------------------------------------*/
+
+/*
+ * Command handler for a NO_ARGS directive. Declared in the command_rec
+ * list with
+ * AP_INIT_NO_ARGS("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_NO_ARGS(cmd_parms *cmd, void *mconfig);
+ */
+
+/*
+ * Command handler for a RAW_ARGS directive. The "args" argument is the text
+ * of the commandline following the directive itself. Declared in the
+ * command_rec list with
+ * AP_INIT_RAW_ARGS("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_RAW_ARGS(cmd_parms *cmd, void *mconfig,
+ * const char *args);
+ */
+
+/*
+ * Command handler for a FLAG directive. The single parameter is passed in
+ * "bool", which is either zero or not for Off or On respectively.
+ * Declared in the command_rec list with
+ * AP_INIT_FLAG("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_FLAG(cmd_parms *cmd, void *mconfig, int bool);
+ */
+
+/*
+ * Command handler for a TAKE1 directive. The single parameter is passed in
+ * "word1". Declared in the command_rec list with
+ * AP_INIT_TAKE1("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE1(cmd_parms *cmd, void *mconfig,
+ * char *word1);
+ */
+
+/*
+ * Command handler for a TAKE2 directive. TAKE2 commands must always have
+ * exactly two arguments. Declared in the command_rec list with
+ * AP_INIT_TAKE2("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE2(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2);
+ */
+
+/*
+ * Command handler for a TAKE3 directive. Like TAKE2, these must have exactly
+ * three arguments, or the parser complains and doesn't bother calling us.
+ * Declared in the command_rec list with
+ * AP_INIT_TAKE3("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE3(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2, char *word3);
+ */
+
+/*
+ * Command handler for a TAKE12 directive. These can take either one or two
+ * arguments.
+ * - word2 is a NULL pointer if no second argument was specified.
+ * Declared in the command_rec list with
+ * AP_INIT_TAKE12("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE12(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2);
+ */
+
+/*
+ * Command handler for a TAKE123 directive. A TAKE123 directive can be given,
+ * as might be expected, one, two, or three arguments.
+ * - word2 is a NULL pointer if no second argument was specified.
+ * - word3 is a NULL pointer if no third argument was specified.
+ * Declared in the command_rec list with
+ * AP_INIT_TAKE123("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE123(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2, char *word3);
+ */
+
+/*
+ * Command handler for a TAKE13 directive. Either one or three arguments are
+ * permitted - no two-parameters-only syntax is allowed.
+ * - word2 and word3 are NULL pointers if only one argument was specified.
+ * Declared in the command_rec list with
+ * AP_INIT_TAKE13("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE13(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2, char *word3);
+ */
+
+/*
+ * Command handler for a TAKE23 directive. At least two and as many as three
+ * arguments must be specified.
+ * - word3 is a NULL pointer if no third argument was specified.
+ * Declared in the command_rec list with
+ * AP_INIT_TAKE23("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE23(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2, char *word3);
+ */
+
+/*
+ * Command handler for a ITERATE directive.
+ * - Handler is called once for each of n arguments given to the directive.
+ * - word1 points to each argument in turn.
+ * Declared in the command_rec list with
+ * AP_INIT_ITERATE("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_ITERATE(cmd_parms *cmd, void *mconfig,
+ * char *word1);
+ */
+
+/*
+ * Command handler for a ITERATE2 directive.
+ * - Handler is called once for each of the second and subsequent arguments
+ * given to the directive.
+ * - word1 is the same for each call for a particular directive instance (the
+ * first argument).
+ * - word2 points to each of the second and subsequent arguments in turn.
+ * Declared in the command_rec list with
+ * AP_INIT_ITERATE2("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_ITERATE2(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2);
+ */
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* These routines are strictly internal to this module, and support its */
+/* operation. They are not referenced by any external portion of the */
+/* server. */
+/* */
+/*--------------------------------------------------------------------------*/
+
+/*
+ * Locate our directory configuration record for the current request.
+ */
+static x_cfg *our_dconfig(const request_rec *r)
+{
+ return (x_cfg *) ap_get_module_config(r->per_dir_config, &example_module);
+}
+
+#if 0
+/*
+ * Locate our server configuration record for the specified server.
+ */
+static x_cfg *our_sconfig(const server_rec *s)
+{
+ return (x_cfg *) ap_get_module_config(s->module_config, &example_module);
+}
+
+/*
+ * Likewise for our configuration record for the specified request.
+ */
+static x_cfg *our_rconfig(const request_rec *r)
+{
+ return (x_cfg *) ap_get_module_config(r->request_config, &example_module);
+}
+#endif
+
+/*
+ * Likewise for our configuration record for a connection.
+ */
+static x_cfg *our_cconfig(const conn_rec *c)
+{
+ return (x_cfg *) ap_get_module_config(c->conn_config, &example_module);
+}
+
+/*
+ * This routine sets up some module-wide cells if they haven't been already.
+ */
+static void setup_module_cells(void)
+{
+ /*
+ * If we haven't already allocated our module-private pool, do so now.
+ */
+ if (x_pool == NULL) {
+ apr_pool_create(&x_pool, NULL);
+ };
+ /*
+ * Likewise for the table of routine/environment pairs we visit outside of
+ * request context.
+ */
+ if (static_calls_made == NULL) {
+ static_calls_made = apr_table_make(x_pool, 16);
+ };
+}
+
+/*
+ * This routine is used to add a trace of a callback to the list. We're
+ * passed the server record (if available), the request record (if available),
+ * a pointer to our private configuration record (if available) for the
+ * environment to which the callback is supposed to apply, and some text. We
+ * turn this into a textual representation and add it to the tail of the list.
+ * The list can be displayed by the x_handler() routine.
+ *
+ * If the call occurs within a request context (i.e., we're passed a request
+ * record), we put the trace into the request apr_pool_t and attach it to the
+ * request via the notes mechanism. Otherwise, the trace gets added
+ * to the static (non-request-specific) list.
+ *
+ * Note that the r->notes table is only for storing strings; if you need to
+ * maintain per-request data of any other type, you need to use another
+ * mechanism.
+ */
+
+#define TRACE_NOTE "example-trace"
+
+static void trace_add(server_rec *s, request_rec *r, x_cfg *mconfig,
+ const char *note)
+{
+ const char *sofar;
+ char *addon;
+ char *where;
+ apr_pool_t *p;
+ const char *trace_copy;
+
+ /*
+ * Make sure our pools and tables are set up - we need 'em.
+ */
+ setup_module_cells();
+ /*
+ * Now, if we're in request-context, we use the request pool.
+ */
+ if (r != NULL) {
+ p = r->pool;
+ if ((trace_copy = apr_table_get(r->notes, TRACE_NOTE)) == NULL) {
+ trace_copy = "";
+ }
+ }
+ else {
+ /*
+ * We're not in request context, so the trace gets attached to our
+ * module-wide pool. We do the create/destroy every time we're called
+ * in non-request context; this avoids leaking memory in some of
+ * the subsequent calls that allocate memory only once (such as the
+ * key formation below).
+ *
+ * Make a new sub-pool and copy any existing trace to it. Point the
+ * trace cell at the copied value.
+ */
+ apr_pool_create(&p, x_pool);
+ if (trace != NULL) {
+ trace = apr_pstrdup(p, trace);
+ }
+ /*
+ * Now, if we have a sub-pool from before, nuke it and replace with
+ * the one we just allocated.
+ */
+ if (x_subpool != NULL) {
+ apr_pool_destroy(x_subpool);
+ }
+ x_subpool = p;
+ trace_copy = trace;
+ }
+ /*
+ * If we weren't passed a configuration record, we can't figure out to
+ * what location this call applies. This only happens for co-routines
+ * that don't operate in a particular directory or server context. If we
+ * got a valid record, extract the location (directory or server) to which
+ * it applies.
+ */
+ where = (mconfig != NULL) ? mconfig->loc : "nowhere";
+ where = (where != NULL) ? where : "";
+ /*
+ * Now, if we're not in request context, see if we've been called with
+ * this particular combination before. The apr_table_t is allocated in the
+ * module's private pool, which doesn't get destroyed.
+ */
+ if (r == NULL) {
+ char *key;
+
+ key = apr_pstrcat(p, note, ":", where, NULL);
+ if (apr_table_get(static_calls_made, key) != NULL) {
+ /*
+ * Been here, done this.
+ */
+ return;
+ }
+ else {
+ /*
+ * First time for this combination of routine and environment -
+ * log it so we don't do it again.
+ */
+ apr_table_set(static_calls_made, key, "been here");
+ }
+ }
+ addon = apr_pstrcat(p,
+ " <li>\n"
+ " <dl>\n"
+ " <dt><samp>", note, "</samp></dt>\n"
+ " <dd><samp>[", where, "]</samp></dd>\n"
+ " </dl>\n"
+ " </li>\n",
+ NULL);
+ sofar = (trace_copy == NULL) ? "" : trace_copy;
+ trace_copy = apr_pstrcat(p, sofar, addon, NULL);
+ if (r != NULL) {
+ apr_table_set(r->notes, TRACE_NOTE, trace_copy);
+ }
+ else {
+ trace = trace_copy;
+ }
+ /*
+ * You *could* change the following if you wanted to see the calling
+ * sequence reported in the server's error_log, but beware - almost all of
+ * these co-routines are called for every single request, and the impact
+ * on the size (and readability) of the error_log is considerable.
+ */
+#define EXAMPLE_LOG_EACH 0
+ if (EXAMPLE_LOG_EACH && (s != NULL)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "mod_example: %s", note);
+ }
+}
+
+/*--------------------------------------------------------------------------*/
+/* We prototyped the various syntax for command handlers (routines that */
+/* are called when the configuration parser detects a directive declared */
+/* by our module) earlier. Now we actually declare a "real" routine that */
+/* will be invoked by the parser when our "real" directive is */
+/* encountered. */
+/* */
+/* If a command handler encounters a problem processing the directive, it */
+/* signals this fact by returning a non-NULL pointer to a string */
+/* describing the problem. */
+/* */
+/* The magic return value DECLINE_CMD is used to deal with directives */
+/* that might be declared by multiple modules. If the command handler */
+/* returns NULL, the directive was processed; if it returns DECLINE_CMD, */
+/* the next module (if any) that declares the directive is given a chance */
+/* at it. If it returns any other value, it's treated as the text of an */
+/* error message. */
+/*--------------------------------------------------------------------------*/
+/*
+ * Command handler for the NO_ARGS "Example" directive. All we do is mark the
+ * call in the trace log, and flag the applicability of the directive to the
+ * current location in that location's configuration record.
+ */
+static const char *cmd_example(cmd_parms *cmd, void *mconfig)
+{
+ x_cfg *cfg = (x_cfg *) mconfig;
+
+ /*
+ * "Example Wuz Here"
+ */
+ cfg->local = 1;
+ trace_add(cmd->server, NULL, cfg, "cmd_example()");
+ return NULL;
+}
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* Now we declare our content handlers, which are invoked when the server */
+/* encounters a document which our module is supposed to have a chance to */
+/* see. (See mod_mime's SetHandler and AddHandler directives, and the */
+/* mod_info and mod_status examples, for more details.) */
+/* */
+/* Since content handlers are dumping data directly into the connection */
+/* (using the r*() routines, such as rputs() and rprintf()) without */
+/* intervention by other parts of the server, they need to make */
+/* sure any accumulated HTTP headers are sent first. This is done by */
+/* calling send_http_header(). Otherwise, no header will be sent at all, */
+/* and the output sent to the client will actually be HTTP-uncompliant. */
+/*--------------------------------------------------------------------------*/
+/*
+ * Sample content handler. All this does is display the call list that has
+ * been built up so far.
+ *
+ * The return value instructs the caller concerning what happened and what to
+ * do next:
+ * OK ("we did our thing")
+ * DECLINED ("this isn't something with which we want to get involved")
+ * HTTP_mumble ("an error status should be reported")
+ */
+static int x_handler(request_rec *r)
+{
+ x_cfg *dcfg;
+
+ if (strcmp(r->handler, "example-handler")) {
+ return DECLINED;
+ }
+
+ dcfg = our_dconfig(r);
+ trace_add(r->server, r, dcfg, "x_handler()");
+ /*
+ * We're about to start sending content, so we need to force the HTTP
+ * headers to be sent at this point. Otherwise, no headers will be sent
+ * at all. We can set any we like first, of course. **NOTE** Here's
+ * where you set the "Content-type" header, and you do so by putting it in
+ * r->content_type, *not* r->headers_out("Content-type"). If you don't
+ * set it, it will be filled in with the server's default type (typically
+ * "text/plain"). You *must* also ensure that r->content_type is lower
+ * case.
+ *
+ * We also need to start a timer so the server can know if the connexion
+ * is broken.
+ */
+ ap_set_content_type(r, "text/html");
+ /*
+ * If we're only supposed to send header information (HEAD request), we're
+ * already there.
+ */
+ if (r->header_only) {
+ return OK;
+ }
+
+ /*
+ * Now send our actual output. Since we tagged this as being
+ * "text/html", we need to embed any HTML.
+ */
+ ap_rputs(DOCTYPE_HTML_3_2, r);
+ ap_rputs("<HTML>\n", r);
+ ap_rputs(" <HEAD>\n", r);
+ ap_rputs(" <TITLE>mod_example Module Content-Handler Output\n", r);
+ ap_rputs(" </TITLE>\n", r);
+ ap_rputs(" </HEAD>\n", r);
+ ap_rputs(" <BODY>\n", r);
+ ap_rputs(" <H1><SAMP>mod_example</SAMP> Module Content-Handler Output\n", r);
+ ap_rputs(" </H1>\n", r);
+ ap_rputs(" <P>\n", r);
+ ap_rprintf(r, " Apache HTTP Server version: \"%s\"\n",
+ ap_get_server_version());
+ ap_rputs(" <BR>\n", r);
+ ap_rprintf(r, " Server built: \"%s\"\n", ap_get_server_built());
+ ap_rputs(" </P>\n", r);;
+ ap_rputs(" <P>\n", r);
+ ap_rputs(" The format for the callback trace is:\n", r);
+ ap_rputs(" </P>\n", r);
+ ap_rputs(" <DL>\n", r);
+ ap_rputs(" <DT><EM>n</EM>.<SAMP>&lt;routine-name&gt;", r);
+ ap_rputs("(&lt;routine-data&gt;)</SAMP>\n", r);
+ ap_rputs(" </DT>\n", r);
+ ap_rputs(" <DD><SAMP>[&lt;applies-to&gt;]</SAMP>\n", r);
+ ap_rputs(" </DD>\n", r);
+ ap_rputs(" </DL>\n", r);
+ ap_rputs(" <P>\n", r);
+ ap_rputs(" The <SAMP>&lt;routine-data&gt;</SAMP> is supplied by\n", r);
+ ap_rputs(" the routine when it requests the trace,\n", r);
+ ap_rputs(" and the <SAMP>&lt;applies-to&gt;</SAMP> is extracted\n", r);
+ ap_rputs(" from the configuration record at the time of the trace.\n", r);
+ ap_rputs(" <STRONG>SVR()</STRONG> indicates a server environment\n", r);
+ ap_rputs(" (blank means the main or default server, otherwise it's\n", r);
+ ap_rputs(" the name of the VirtualHost); <STRONG>DIR()</STRONG>\n", r);
+ ap_rputs(" indicates a location in the URL or filesystem\n", r);
+ ap_rputs(" namespace.\n", r);
+ ap_rputs(" </P>\n", r);
+ ap_rprintf(r, " <H2>Static callbacks so far:</H2>\n <OL>\n%s </OL>\n",
+ trace);
+ ap_rputs(" <H2>Request-specific callbacks so far:</H2>\n", r);
+ ap_rprintf(r, " <OL>\n%s </OL>\n", apr_table_get(r->notes, TRACE_NOTE));
+ ap_rputs(" <H2>Environment for <EM>this</EM> call:</H2>\n", r);
+ ap_rputs(" <UL>\n", r);
+ ap_rprintf(r, " <LI>Applies-to: <SAMP>%s</SAMP>\n </LI>\n", dcfg->loc);
+ ap_rprintf(r, " <LI>\"Example\" directive declared here: %s\n </LI>\n",
+ (dcfg->local ? "YES" : "NO"));
+ ap_rprintf(r, " <LI>\"Example\" inherited: %s\n </LI>\n",
+ (dcfg->congenital ? "YES" : "NO"));
+ ap_rputs(" </UL>\n", r);
+ ap_rputs(" </BODY>\n", r);
+ ap_rputs("</HTML>\n", r);
+ /*
+ * We're all done, so cancel the timeout we set. Since this is probably
+ * the end of the request we *could* assume this would be done during
+ * post-processing - but it's possible that another handler might be
+ * called and inherit our outstanding timer. Not good; to each its own.
+ */
+ /*
+ * We did what we wanted to do, so tell the rest of the server we
+ * succeeded.
+ */
+ return OK;
+}
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* Now let's declare routines for each of the callback phase in order. */
+/* (That's the order in which they're listed in the callback list, *not */
+/* the order in which the server calls them! See the command_rec */
+/* declaration near the bottom of this file.) Note that these may be */
+/* called for situations that don't relate primarily to our function - in */
+/* other words, the fixup handler shouldn't assume that the request has */
+/* to do with "example" stuff. */
+/* */
+/* With the exception of the content handler, all of our routines will be */
+/* called for each request, unless an earlier handler from another module */
+/* aborted the sequence. */
+/* */
+/* Handlers that are declared as "int" can return the following: */
+/* */
+/* OK Handler accepted the request and did its thing with it. */
+/* DECLINED Handler took no action. */
+/* HTTP_mumble Handler looked at request and found it wanting. */
+/* */
+/* What the server does after calling a module handler depends upon the */
+/* handler's return value. In all cases, if the handler returns */
+/* DECLINED, the server will continue to the next module with an handler */
+/* for the current phase. However, if the handler return a non-OK, */
+/* non-DECLINED status, the server aborts the request right there. If */
+/* the handler returns OK, the server's next action is phase-specific; */
+/* see the individual handler comments below for details. */
+/* */
+/*--------------------------------------------------------------------------*/
+/*
+ * This function is called during server initialisation. Any information
+ * that needs to be recorded must be in static cells, since there's no
+ * configuration record.
+ *
+ * There is no return value.
+ */
+
+/*
+ * This function is called when an heavy-weight process (such as a child) is
+ * being run down or destroyed. As with the child initialisation function,
+ * any information that needs to be recorded must be in static cells, since
+ * there's no configuration record.
+ *
+ * There is no return value.
+ */
+
+/*
+ * This function is called during server initialisation when an heavy-weight
+ * process (such as a child) is being initialised. As with the
+ * module initialisation function, any information that needs to be recorded
+ * must be in static cells, since there's no configuration record.
+ *
+ * There is no return value.
+ */
+
+/*
+ * This function gets called to create a per-directory configuration
+ * record. This will be called for the "default" server environment, and for
+ * each directory for which the parser finds any of our directives applicable.
+ * If a directory doesn't have any of our directives involved (i.e., they
+ * aren't in the .htaccess file, or a <Location>, <Directory>, or related
+ * block), this routine will *not* be called - the configuration for the
+ * closest ancestor is used.
+ *
+ * The return value is a pointer to the created module-specific
+ * structure.
+ */
+static void *x_create_dir_config(apr_pool_t *p, char *dirspec)
+{
+ x_cfg *cfg;
+ char *dname = dirspec;
+
+ /*
+ * Allocate the space for our record from the pool supplied.
+ */
+ cfg = (x_cfg *) apr_pcalloc(p, sizeof(x_cfg));
+ /*
+ * Now fill in the defaults. If there are any `parent' configuration
+ * records, they'll get merged as part of a separate callback.
+ */
+ cfg->local = 0;
+ cfg->congenital = 0;
+ cfg->cmode = CONFIG_MODE_DIRECTORY;
+ /*
+ * Finally, add our trace to the callback list.
+ */
+ dname = (dname != NULL) ? dname : "";
+ cfg->loc = apr_pstrcat(p, "DIR(", dname, ")", NULL);
+ trace_add(NULL, NULL, cfg, "x_create_dir_config()");
+ return (void *) cfg;
+}
+
+/*
+ * This function gets called to merge two per-directory configuration
+ * records. This is typically done to cope with things like .htaccess files
+ * or <Location> directives for directories that are beneath one for which a
+ * configuration record was already created. The routine has the
+ * responsibility of creating a new record and merging the contents of the
+ * other two into it appropriately. If the module doesn't declare a merge
+ * routine, the record for the closest ancestor location (that has one) is
+ * used exclusively.
+ *
+ * The routine MUST NOT modify any of its arguments!
+ *
+ * The return value is a pointer to the created module-specific structure
+ * containing the merged values.
+ */
+static void *x_merge_dir_config(apr_pool_t *p, void *parent_conf,
+ void *newloc_conf)
+{
+
+ x_cfg *merged_config = (x_cfg *) apr_pcalloc(p, sizeof(x_cfg));
+ x_cfg *pconf = (x_cfg *) parent_conf;
+ x_cfg *nconf = (x_cfg *) newloc_conf;
+ char *note;
+
+ /*
+ * Some things get copied directly from the more-specific record, rather
+ * than getting merged.
+ */
+ merged_config->local = nconf->local;
+ merged_config->loc = apr_pstrdup(p, nconf->loc);
+ /*
+ * Others, like the setting of the `congenital' flag, get ORed in. The
+ * setting of that particular flag, for instance, is TRUE if it was ever
+ * true anywhere in the upstream configuration.
+ */
+ merged_config->congenital = (pconf->congenital | pconf->local);
+ /*
+ * If we're merging records for two different types of environment (server
+ * and directory), mark the new record appropriately. Otherwise, inherit
+ * the current value.
+ */
+ merged_config->cmode =
+ (pconf->cmode == nconf->cmode) ? pconf->cmode : CONFIG_MODE_COMBO;
+ /*
+ * Now just record our being called in the trace list. Include the
+ * locations we were asked to merge.
+ */
+ note = apr_pstrcat(p, "x_merge_dir_config(\"", pconf->loc, "\",\"",
+ nconf->loc, "\")", NULL);
+ trace_add(NULL, NULL, merged_config, note);
+ return (void *) merged_config;
+}
+
+/*
+ * This function gets called to create a per-server configuration
+ * record. It will always be called for the "default" server.
+ *
+ * The return value is a pointer to the created module-specific
+ * structure.
+ */
+static void *x_create_server_config(apr_pool_t *p, server_rec *s)
+{
+
+ x_cfg *cfg;
+ char *sname = s->server_hostname;
+
+ /*
+ * As with the x_create_dir_config() reoutine, we allocate and fill
+ * in an empty record.
+ */
+ cfg = (x_cfg *) apr_pcalloc(p, sizeof(x_cfg));
+ cfg->local = 0;
+ cfg->congenital = 0;
+ cfg->cmode = CONFIG_MODE_SERVER;
+ /*
+ * Note that we were called in the trace list.
+ */
+ sname = (sname != NULL) ? sname : "";
+ cfg->loc = apr_pstrcat(p, "SVR(", sname, ")", NULL);
+ trace_add(s, NULL, cfg, "x_create_server_config()");
+ return (void *) cfg;
+}
+
+/*
+ * This function gets called to merge two per-server configuration
+ * records. This is typically done to cope with things like virtual hosts and
+ * the default server configuration The routine has the responsibility of
+ * creating a new record and merging the contents of the other two into it
+ * appropriately. If the module doesn't declare a merge routine, the more
+ * specific existing record is used exclusively.
+ *
+ * The routine MUST NOT modify any of its arguments!
+ *
+ * The return value is a pointer to the created module-specific structure
+ * containing the merged values.
+ */
+static void *x_merge_server_config(apr_pool_t *p, void *server1_conf,
+ void *server2_conf)
+{
+
+ x_cfg *merged_config = (x_cfg *) apr_pcalloc(p, sizeof(x_cfg));
+ x_cfg *s1conf = (x_cfg *) server1_conf;
+ x_cfg *s2conf = (x_cfg *) server2_conf;
+ char *note;
+
+ /*
+ * Our inheritance rules are our own, and part of our module's semantics.
+ * Basically, just note whence we came.
+ */
+ merged_config->cmode =
+ (s1conf->cmode == s2conf->cmode) ? s1conf->cmode : CONFIG_MODE_COMBO;
+ merged_config->local = s2conf->local;
+ merged_config->congenital = (s1conf->congenital | s1conf->local);
+ merged_config->loc = apr_pstrdup(p, s2conf->loc);
+ /*
+ * Trace our call, including what we were asked to merge.
+ */
+ note = apr_pstrcat(p, "x_merge_server_config(\"", s1conf->loc, "\",\"",
+ s2conf->loc, "\")", NULL);
+ trace_add(NULL, NULL, merged_config, note);
+ return (void *) merged_config;
+}
+
+/*
+ * This routine is called before the server processes the configuration
+ * files. There is no return value.
+ */
+static int x_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptemp)
+{
+ /*
+ * Log the call and exit.
+ */
+ trace_add(NULL, NULL, NULL, "x_pre_config()");
+
+ return OK;
+}
+
+/*
+ * This routine is called to perform any module-specific fixing of header
+ * fields, et cetera. It is invoked just before any content-handler.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static int x_post_config(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ /*
+ * Log the call and exit.
+ */
+ trace_add(NULL, NULL, NULL, "x_post_config()");
+ return OK;
+}
+
+/*
+ * This routine is called to perform any module-specific log file
+ * openings. It is invoked just before the post_config phase
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static int x_open_logs(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ /*
+ * Log the call and exit.
+ */
+ trace_add(s, NULL, NULL, "x_open_logs()");
+ return OK;
+}
+
+/*
+ * All our process-death routine does is add its trace to the log.
+ */
+static apr_status_t x_child_exit(void *data)
+{
+ char *note;
+ server_rec *s = data;
+ char *sname = s->server_hostname;
+
+ /*
+ * The arbitrary text we add to our trace entry indicates for which server
+ * we're being called.
+ */
+ sname = (sname != NULL) ? sname : "";
+ note = apr_pstrcat(s->process->pool, "x_child_exit(", sname, ")", NULL);
+ trace_add(s, NULL, NULL, note);
+ return APR_SUCCESS;
+}
+
+/*
+ * All our process initialiser does is add its trace to the log.
+ */
+static void x_child_init(apr_pool_t *p, server_rec *s)
+{
+ char *note;
+ char *sname = s->server_hostname;
+
+ /*
+ * Set up any module cells that ought to be initialised.
+ */
+ setup_module_cells();
+ /*
+ * The arbitrary text we add to our trace entry indicates for which server
+ * we're being called.
+ */
+ sname = (sname != NULL) ? sname : "";
+ note = apr_pstrcat(p, "x_child_init(", sname, ")", NULL);
+ trace_add(s, NULL, NULL, note);
+
+ apr_pool_cleanup_register(p, s, x_child_exit, x_child_exit);
+}
+
+/*
+ * XXX: This routine is called XXX
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+#if 0
+static const char *x_http_method(const request_rec *r)
+{
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call and exit.
+ */
+ trace_add(r->server, NULL, cfg, "x_http_method()");
+ return "foo";
+}
+
+/*
+ * XXX: This routine is called XXX
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static apr_port_t x_default_port(const request_rec *r)
+{
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call and exit.
+ */
+ trace_add(r->server, NULL, cfg, "x_default_port()");
+ return 80;
+}
+#endif /*0*/
+
+/*
+ * XXX: This routine is called XXX
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static void x_insert_filter(request_rec *r)
+{
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call and exit.
+ */
+ trace_add(r->server, NULL, cfg, "x_insert_filter()");
+}
+
+/*
+ * XXX: This routine is called XXX
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static int x_quick_handler(request_rec *r, int lookup_uri)
+{
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call and exit.
+ */
+ trace_add(r->server, NULL, cfg, "x_post_config()");
+ return DECLINED;
+}
+
+/*
+ * This routine is called just after the server accepts the connection,
+ * but before it is handed off to a protocol module to be served. The point
+ * of this hook is to allow modules an opportunity to modify the connection
+ * as soon as possible. The core server uses this phase to setup the
+ * connection record based on the type of connection that is being used.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static int x_pre_connection(conn_rec *c, void *csd)
+{
+ x_cfg *cfg;
+
+ cfg = our_cconfig(c);
+#if 0
+ /*
+ * Log the call and exit.
+ */
+ trace_add(r->server, NULL, cfg, "x_post_config()");
+#endif
+ return OK;
+}
+
+/* This routine is used to actually process the connection that was received.
+ * Only protocol modules should implement this hook, as it gives them an
+ * opportunity to replace the standard HTTP processing with processing for
+ * some other protocol. Both echo and POP3 modules are available as
+ * examples.
+ *
+ * The return VALUE is OK, DECLINED, or HTTP_mumble. If we return OK, no
+ * further modules are called for this phase.
+ */
+static int x_process_connection(conn_rec *c)
+{
+ return DECLINED;
+}
+
+/*
+ * This routine is called after the request has been read but before any other
+ * phases have been processed. This allows us to make decisions based upon
+ * the input header fields.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, no
+ * further modules are called for this phase.
+ */
+static int x_post_read_request(request_rec *r)
+{
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * We don't actually *do* anything here, except note the fact that we were
+ * called.
+ */
+ trace_add(r->server, r, cfg, "x_post_read_request()");
+ return DECLINED;
+}
+
+/*
+ * This routine gives our module an opportunity to translate the URI into an
+ * actual filename. If we don't do anything special, the server's default
+ * rules (Alias directives and the like) will continue to be followed.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, no
+ * further modules are called for this phase.
+ */
+static int x_translate_handler(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * We don't actually *do* anything here, except note the fact that we were
+ * called.
+ */
+ trace_add(r->server, r, cfg, "x_translate_handler()");
+ return DECLINED;
+}
+
+/*
+ * this routine gives our module another chance to examine the request
+ * headers and to take special action. This is the first phase whose
+ * hooks' configuration directives can appear inside the <Directory>
+ * and similar sections, because at this stage the URI has been mapped
+ * to the filename. For example this phase can be used to block evil
+ * clients, while little resources were wasted on these.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK,
+ * the server will still call any remaining modules with an handler
+ * for this phase.
+ */
+static int x_header_parser_handler(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * We don't actually *do* anything here, except note the fact that we were
+ * called.
+ */
+ trace_add(r->server, r, cfg, "header_parser_handler()");
+ return DECLINED;
+}
+
+
+/*
+ * This routine is called to check the authentication information sent with
+ * the request (such as looking up the user in a database and verifying that
+ * the [encrypted] password sent matches the one in the database).
+ *
+ * The return value is OK, DECLINED, or some HTTP_mumble error (typically
+ * HTTP_UNAUTHORIZED). If we return OK, no other modules are given a chance
+ * at the request during this phase.
+ */
+static int x_check_user_id(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Don't do anything except log the call.
+ */
+ trace_add(r->server, r, cfg, "x_check_user_id()");
+ return DECLINED;
+}
+
+/*
+ * This routine is called to check to see if the resource being requested
+ * requires authorisation.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, no
+ * other modules are called during this phase.
+ *
+ * If *all* modules return DECLINED, the request is aborted with a server
+ * error.
+ */
+static int x_auth_checker(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call and return OK, or access will be denied (even though we
+ * didn't actually do anything).
+ */
+ trace_add(r->server, r, cfg, "x_auth_checker()");
+ return DECLINED;
+}
+
+/*
+ * This routine is called to check for any module-specific restrictions placed
+ * upon the requested resource. (See the mod_access module for an example.)
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. All modules with an
+ * handler for this phase are called regardless of whether their predecessors
+ * return OK or DECLINED. The first one to return any other status, however,
+ * will abort the sequence (and the request) as usual.
+ */
+static int x_access_checker(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ trace_add(r->server, r, cfg, "x_access_checker()");
+ return DECLINED;
+}
+
+/*
+ * This routine is called to determine and/or set the various document type
+ * information bits, like Content-type (via r->content_type), language, et
+ * cetera.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, no
+ * further modules are given a chance at the request for this phase.
+ */
+static int x_type_checker(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call, but don't do anything else - and report truthfully that
+ * we didn't do anything.
+ */
+ trace_add(r->server, r, cfg, "x_type_checker()");
+ return DECLINED;
+}
+
+/*
+ * This routine is called to perform any module-specific fixing of header
+ * fields, et cetera. It is invoked just before any content-handler.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static int x_fixer_upper(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call and exit.
+ */
+ trace_add(r->server, r, cfg, "x_fixer_upper()");
+ return OK;
+}
+
+/*
+ * This routine is called to perform any module-specific logging activities
+ * over and above the normal server things.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, any
+ * remaining modules with an handler for this phase will still be called.
+ */
+static int x_logger(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ trace_add(r->server, r, cfg, "x_logger()");
+ return DECLINED;
+}
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* Which functions are responsible for which hooks in the server. */
+/* */
+/*--------------------------------------------------------------------------*/
+/*
+ * Each function our module provides to handle a particular hook is
+ * specified here. The functions are registered using
+ * ap_hook_foo(name, predecessors, successors, position)
+ * where foo is the name of the hook.
+ *
+ * The args are as follows:
+ * name -> the name of the function to call.
+ * predecessors -> a list of modules whose calls to this hook must be
+ * invoked before this module.
+ * successors -> a list of modules whose calls to this hook must be
+ * invoked after this module.
+ * position -> The relative position of this module. One of
+ * APR_HOOK_FIRST, APR_HOOK_MIDDLE, or APR_HOOK_LAST.
+ * Most modules will use APR_HOOK_MIDDLE. If multiple
+ * modules use the same relative position, Apache will
+ * determine which to call first.
+ * If your module relies on another module to run first,
+ * or another module running after yours, use the
+ * predecessors and/or successors.
+ *
+ * The number in brackets indicates the order in which the routine is called
+ * during request processing. Note that not all routines are necessarily
+ * called (such as if a resource doesn't have access restrictions).
+ * The actual delivery of content to the browser [9] is not handled by
+ * a hook; see the handler declarations below.
+ */
+static void x_register_hooks(apr_pool_t *p)
+{
+ ap_hook_pre_config(x_pre_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_config(x_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_open_logs(x_open_logs, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_child_init(x_child_init, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_handler(x_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_quick_handler(x_quick_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_pre_connection(x_pre_connection, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_process_connection(x_process_connection, NULL, NULL, APR_HOOK_MIDDLE);
+ /* [1] post read_request handling */
+ ap_hook_post_read_request(x_post_read_request, NULL, NULL,
+ APR_HOOK_MIDDLE);
+ ap_hook_log_transaction(x_logger, NULL, NULL, APR_HOOK_MIDDLE);
+#if 0
+ ap_hook_http_method(x_http_method, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_default_port(x_default_port, NULL, NULL, APR_HOOK_MIDDLE);
+#endif
+ ap_hook_translate_name(x_translate_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_header_parser(x_header_parser_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_check_user_id(x_check_user_id, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_fixups(x_fixer_upper, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_type_checker(x_type_checker, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_access_checker(x_access_checker, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_auth_checker(x_auth_checker, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_insert_filter(x_insert_filter, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* All of the routines have been declared now. Here's the list of */
+/* directives specific to our module, and information about where they */
+/* may appear and how the command parser should pass them to us for */
+/* processing. Note that care must be taken to ensure that there are NO */
+/* collisions of directive names between modules. */
+/* */
+/*--------------------------------------------------------------------------*/
+/*
+ * List of directives specific to our module.
+ */
+static const command_rec x_cmds[] =
+{
+ AP_INIT_NO_ARGS(
+ "Example", /* directive name */
+ cmd_example, /* config action routine */
+ NULL, /* argument to include in call */
+ OR_OPTIONS, /* where available */
+ "Example directive - no arguments" /* directive description */
+ ),
+ {NULL}
+};
+/*--------------------------------------------------------------------------*/
+/* */
+/* Finally, the list of callback routines and data structures that provide */
+/* the static hooks into our module from the other parts of the server. */
+/* */
+/*--------------------------------------------------------------------------*/
+/*
+ * Module definition for configuration. If a particular callback is not
+ * needed, replace its routine name below with the word NULL.
+ */
+module AP_MODULE_DECLARE_DATA example_module =
+{
+ STANDARD20_MODULE_STUFF,
+ x_create_dir_config, /* per-directory config creator */
+ x_merge_dir_config, /* dir config merger */
+ x_create_server_config, /* server config creator */
+ x_merge_server_config, /* server config merger */
+ x_cmds, /* command table */
+ x_register_hooks, /* set up other request processing hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.c
new file mode 100644
index 00000000..0812b976
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.c
@@ -0,0 +1,1198 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Rules for managing obj->refcount:
+ * refcount should be incremented when an object is placed in the cache. Insertion
+ * of an object into the cache and the refcount increment should happen under
+ * protection of the sconf->lock.
+ *
+ * refcount should be decremented when the object is removed from the cache.
+ * Object should be removed from the cache and the refcount decremented while
+ * under protection of the sconf->lock.
+ *
+ * refcount should be incremented when an object is retrieved from the cache
+ * by a worker thread. The retrieval/find operation and refcount increment
+ * should occur under protection of the sconf->lock
+ *
+ * refcount can be atomically decremented w/o protection of the sconf->lock
+ * by worker threads.
+ *
+ * Any object whose refcount drops to 0 should be freed/cleaned up. A refcount
+ * of 0 means the object is not in the cache and no worker threads are accessing
+ * it.
+ */
+#define CORE_PRIVATE
+#include "mod_cache.h"
+#include "cache_pqueue.h"
+#include "cache_cache.h"
+#include "ap_provider.h"
+#include "ap_mpm.h"
+#include "apr_thread_mutex.h"
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#if !APR_HAS_THREADS
+#error This module does not currently compile unless you have a thread-capable APR. Sorry!
+#endif
+
+module AP_MODULE_DECLARE_DATA mem_cache_module;
+
+typedef enum {
+ CACHE_TYPE_FILE = 1,
+ CACHE_TYPE_HEAP,
+ CACHE_TYPE_MMAP
+} cache_type_e;
+
+typedef struct {
+ char* hdr;
+ char* val;
+} cache_header_tbl_t;
+
+typedef struct mem_cache_object {
+ cache_type_e type;
+ apr_ssize_t num_header_out;
+ apr_ssize_t num_err_header_out;
+ apr_ssize_t num_subprocess_env;
+ apr_ssize_t num_notes;
+ apr_ssize_t num_req_hdrs;
+ cache_header_tbl_t *header_out;
+ cache_header_tbl_t *err_header_out;
+ cache_header_tbl_t *subprocess_env;
+ cache_header_tbl_t *notes;
+ cache_header_tbl_t *req_hdrs; /* for Vary negotiation */
+ apr_size_t m_len;
+ void *m;
+ apr_os_file_t fd;
+ apr_int32_t flags; /* File open flags */
+ long priority; /**< the priority of this entry */
+ long total_refs; /**< total number of references this entry has had */
+
+ apr_uint32_t pos; /**< the position of this entry in the cache */
+
+} mem_cache_object_t;
+
+typedef struct {
+ apr_thread_mutex_t *lock;
+ cache_cache_t *cache_cache;
+
+ /* Fields set by config directives */
+ apr_size_t min_cache_object_size; /* in bytes */
+ apr_size_t max_cache_object_size; /* in bytes */
+ apr_size_t max_cache_size; /* in bytes */
+ apr_size_t max_object_cnt;
+ cache_pqueue_set_priority cache_remove_algorithm;
+
+ /* maximum amount of data to buffer on a streamed response where
+ * we haven't yet seen EOS */
+ apr_off_t max_streaming_buffer_size;
+} mem_cache_conf;
+static mem_cache_conf *sconf;
+
+#define DEFAULT_MAX_CACHE_SIZE 100*1024
+#define DEFAULT_MIN_CACHE_OBJECT_SIZE 0
+#define DEFAULT_MAX_CACHE_OBJECT_SIZE 10000
+#define DEFAULT_MAX_OBJECT_CNT 1009
+#define DEFAULT_MAX_STREAMING_BUFFER_SIZE 100000
+#define CACHEFILE_LEN 20
+
+/* Forward declarations */
+static int remove_entity(cache_handle_t *h);
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *i);
+static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r);
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+
+static void cleanup_cache_object(cache_object_t *obj);
+
+static long memcache_get_priority(void*a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+
+ return mobj->priority;
+}
+
+static void memcache_inc_frequency(void*a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+
+ mobj->total_refs++;
+ mobj->priority = 0;
+}
+
+static void memcache_set_pos(void *a, apr_ssize_t pos)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+
+ apr_atomic_set(&mobj->pos, pos);
+}
+static apr_ssize_t memcache_get_pos(void *a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+
+ return apr_atomic_read(&mobj->pos);
+}
+
+static apr_size_t memcache_cache_get_size(void*a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+ return mobj->m_len;
+}
+/** callback to get the key of a item */
+static const char* memcache_cache_get_key(void*a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ return obj->key;
+}
+/**
+ * memcache_cache_free()
+ * memcache_cache_free is a callback that is only invoked by a thread
+ * running in cache_insert(). cache_insert() runs under protection
+ * of sconf->lock. By the time this function has been entered, the cache_object
+ * has been ejected from the cache. decrement the refcount and if the refcount drops
+ * to 0, cleanup the cache object.
+ */
+static void memcache_cache_free(void*a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+
+ /* Decrement the refcount to account for the object being ejected
+ * from the cache. If the refcount is 0, free the object.
+ */
+ if (!apr_atomic_dec(&obj->refcount)) {
+ cleanup_cache_object(obj);
+ }
+}
+/*
+ * functions return a 'negative' score since priority queues
+ * dequeue the object with the highest value first
+ */
+static long memcache_lru_algorithm(long queue_clock, void *a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+ if (mobj->priority == 0)
+ mobj->priority = queue_clock - mobj->total_refs;
+
+ /*
+ * a 'proper' LRU function would just be
+ * mobj->priority = mobj->total_refs;
+ */
+ return mobj->priority;
+}
+
+static long memcache_gdsf_algorithm(long queue_clock, void *a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+
+ if (mobj->priority == 0)
+ mobj->priority = queue_clock -
+ (long)(mobj->total_refs*1000 / mobj->m_len);
+
+ return mobj->priority;
+}
+
+static void cleanup_cache_object(cache_object_t *obj)
+{
+ mem_cache_object_t *mobj = obj->vobj;
+
+ /* TODO:
+ * We desperately need a more efficient way of allocating objects. We're
+ * making way too many malloc calls to create a fully populated
+ * cache object...
+ */
+
+ /* Cleanup the cache_object_t */
+ if (obj->key) {
+ free(obj->key);
+ }
+ if (obj->info.content_type) {
+ free(obj->info.content_type);
+ }
+ if (obj->info.etag) {
+ free(obj->info.etag);
+ }
+ if (obj->info.lastmods) {
+ free(obj->info.lastmods);
+ }
+ if (obj->info.filename) {
+ free(obj->info.filename);
+ }
+
+ free(obj);
+
+ /* Cleanup the mem_cache_object_t */
+ if (mobj) {
+ if (mobj->type == CACHE_TYPE_HEAP && mobj->m) {
+ free(mobj->m);
+ }
+ if (mobj->type == CACHE_TYPE_FILE && mobj->fd) {
+#ifdef WIN32
+ CloseHandle(mobj->fd);
+#else
+ close(mobj->fd);
+#endif
+ }
+ if (mobj->header_out) {
+ if (mobj->header_out[0].hdr)
+ free(mobj->header_out[0].hdr);
+ free(mobj->header_out);
+ }
+ if (mobj->err_header_out) {
+ if (mobj->err_header_out[0].hdr)
+ free(mobj->err_header_out[0].hdr);
+ free(mobj->err_header_out);
+ }
+ if (mobj->subprocess_env) {
+ if (mobj->subprocess_env[0].hdr)
+ free(mobj->subprocess_env[0].hdr);
+ free(mobj->subprocess_env);
+ }
+ if (mobj->notes) {
+ if (mobj->notes[0].hdr)
+ free(mobj->notes[0].hdr);
+ free(mobj->notes);
+ }
+ if (mobj->req_hdrs) {
+ if (mobj->req_hdrs[0].hdr)
+ free(mobj->req_hdrs[0].hdr);
+ free(mobj->req_hdrs);
+ }
+ free(mobj);
+ }
+}
+static apr_status_t decrement_refcount(void *arg)
+{
+ cache_object_t *obj = (cache_object_t *) arg;
+
+ /* If obj->complete is not set, the cache update failed and the
+ * object needs to be removed from the cache then cleaned up.
+ * The garbage collector may have ejected the object from the
+ * cache already, so make sure it is really still in the cache
+ * before attempting to remove it.
+ */
+ if (!obj->complete) {
+ cache_object_t *tobj = NULL;
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+ tobj = cache_find(sconf->cache_cache, obj->key);
+ if (tobj == obj) {
+ cache_remove(sconf->cache_cache, obj);
+ apr_atomic_dec(&obj->refcount);
+ }
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+ }
+
+ /* If the refcount drops to 0, cleanup the cache object */
+ if (!apr_atomic_dec(&obj->refcount)) {
+ cleanup_cache_object(obj);
+ }
+ return APR_SUCCESS;
+}
+static apr_status_t cleanup_cache_mem(void *sconfv)
+{
+ cache_object_t *obj;
+ mem_cache_conf *co = (mem_cache_conf*) sconfv;
+
+ if (!co) {
+ return APR_SUCCESS;
+ }
+ if (!co->cache_cache) {
+ return APR_SUCCESS;
+ }
+
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+ obj = cache_pop(co->cache_cache);
+ while (obj) {
+ /* Iterate over the cache and clean up each unreferenced entry */
+ if (!apr_atomic_dec(&obj->refcount)) {
+ cleanup_cache_object(obj);
+ }
+ obj = cache_pop(co->cache_cache);
+ }
+
+ /* Cache is empty, free the cache table */
+ cache_free(co->cache_cache);
+
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+ return APR_SUCCESS;
+}
+/*
+ * TODO: enable directives to be overridden in various containers
+ */
+static void *create_cache_config(apr_pool_t *p, server_rec *s)
+{
+ sconf = apr_pcalloc(p, sizeof(mem_cache_conf));
+
+ sconf->min_cache_object_size = DEFAULT_MIN_CACHE_OBJECT_SIZE;
+ sconf->max_cache_object_size = DEFAULT_MAX_CACHE_OBJECT_SIZE;
+ /* Number of objects in the cache */
+ sconf->max_object_cnt = DEFAULT_MAX_OBJECT_CNT;
+ /* Size of the cache in bytes */
+ sconf->max_cache_size = DEFAULT_MAX_CACHE_SIZE;
+ sconf->cache_cache = NULL;
+ sconf->cache_remove_algorithm = memcache_gdsf_algorithm;
+ sconf->max_streaming_buffer_size = DEFAULT_MAX_STREAMING_BUFFER_SIZE;
+
+ return sconf;
+}
+
+static int create_entity(cache_handle_t *h, cache_type_e type_e,
+ request_rec *r, const char *key, apr_off_t len)
+{
+ cache_object_t *obj, *tmp_obj;
+ mem_cache_object_t *mobj;
+ apr_size_t key_len;
+
+ if (len == -1) {
+ /* Caching a streaming response. Assume the response is
+ * less than or equal to max_streaming_buffer_size. We will
+ * correct all the cache size counters in store_body once
+ * we know exactly know how much we are caching.
+ */
+ len = sconf->max_streaming_buffer_size;
+ }
+
+ /* Note: cache_insert() will automatically garbage collect
+ * objects from the cache if the max_cache_size threshold is
+ * exceeded. This means mod_mem_cache does not need to implement
+ * max_cache_size checks.
+ */
+ if (len < sconf->min_cache_object_size ||
+ len > sconf->max_cache_object_size) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "mem_cache: URL %s failed the size check and will not be cached.",
+ key);
+ return DECLINED;
+ }
+
+ if (type_e == CACHE_TYPE_FILE) {
+ /* CACHE_TYPE_FILE is only valid for local content handled by the
+ * default handler. Need a better way to check if the file is
+ * local or not.
+ */
+ if (!r->filename) {
+ return DECLINED;
+ }
+ }
+
+ /* Allocate and initialize cache_object_t */
+ obj = calloc(1, sizeof(*obj));
+ if (!obj) {
+ return DECLINED;
+ }
+ key_len = strlen(key) + 1;
+ obj->key = malloc(key_len);
+ if (!obj->key) {
+ cleanup_cache_object(obj);
+ return DECLINED;
+ }
+ memcpy(obj->key, key, key_len);
+ /* Safe cast: We tested < sconf->max_cache_object_size above */
+ obj->info.len = (apr_size_t)len;
+
+ /* Allocate and init mem_cache_object_t */
+ mobj = calloc(1, sizeof(*mobj));
+ if (!mobj) {
+ cleanup_cache_object(obj);
+ return DECLINED;
+ }
+
+ /* Finish initing the cache object */
+ apr_atomic_set(&obj->refcount, 1);
+ mobj->total_refs = 1;
+ obj->complete = 0;
+ obj->vobj = mobj;
+ /* Safe cast: We tested < sconf->max_cache_object_size above */
+ mobj->m_len = (apr_size_t)len;
+ mobj->type = type_e;
+
+ /* Place the cache_object_t into the hash table.
+ * Note: Perhaps we should wait to put the object in the
+ * hash table when the object is complete? I add the object here to
+ * avoid multiple threads attempting to cache the same content only
+ * to discover at the very end that only one of them will succeed.
+ * Furthermore, adding the cache object to the table at the end could
+ * open up a subtle but easy to exploit DoS hole: someone could request
+ * a very large file with multiple requests. Better to detect this here
+ * rather than after the cache object has been completely built and
+ * initialized...
+ * XXX Need a way to insert into the cache w/o such coarse grained locking
+ */
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+ tmp_obj = (cache_object_t *) cache_find(sconf->cache_cache, key);
+
+ if (!tmp_obj) {
+ cache_insert(sconf->cache_cache, obj);
+ /* Add a refcount to account for the reference by the
+ * hashtable in the cache. Refcount should be 2 now, one
+ * for this thread, and one for the cache.
+ */
+ apr_atomic_inc(&obj->refcount);
+ }
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+
+ if (tmp_obj) {
+ /* This thread collided with another thread loading the same object
+ * into the cache at the same time. Defer to the other thread which
+ * is further along.
+ */
+ cleanup_cache_object(obj);
+ return DECLINED;
+ }
+
+ apr_pool_cleanup_register(r->pool, obj, decrement_refcount,
+ apr_pool_cleanup_null);
+
+ /* Populate the cache handle */
+ h->cache_obj = obj;
+
+ return OK;
+}
+
+static int create_mem_entity(cache_handle_t *h, request_rec *r,
+ const char *key, apr_off_t len)
+{
+ return create_entity(h, CACHE_TYPE_HEAP, r, key, len);
+}
+
+static int create_fd_entity(cache_handle_t *h, request_rec *r,
+ const char *key, apr_off_t len)
+{
+ return create_entity(h, CACHE_TYPE_FILE, r, key, len);
+}
+
+static int open_entity(cache_handle_t *h, request_rec *r, const char *key)
+{
+ cache_object_t *obj;
+
+ /* Look up entity keyed to 'url' */
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+ obj = (cache_object_t *) cache_find(sconf->cache_cache, key);
+ if (obj) {
+ if (obj->complete) {
+ request_rec *rmain=r, *rtmp;
+ apr_atomic_inc(&obj->refcount);
+ /* cache is worried about overall counts, not 'open' ones */
+ cache_update(sconf->cache_cache, obj);
+
+ /* If this is a subrequest, register the cleanup against
+ * the main request. This will prevent the cache object
+ * from being cleaned up from under the request after the
+ * subrequest is destroyed.
+ */
+ rtmp = r;
+ while (rtmp) {
+ rmain = rtmp;
+ rtmp = rmain->main;
+ }
+ apr_pool_cleanup_register(rmain->pool, obj, decrement_refcount,
+ apr_pool_cleanup_null);
+ }
+ else {
+ obj = NULL;
+ }
+ }
+
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+
+ if (!obj) {
+ return DECLINED;
+ }
+
+ /* Initialize the cache_handle */
+ h->cache_obj = obj;
+ h->req_hdrs = NULL; /* Pick these up in recall_headers() */
+ return OK;
+}
+
+/* remove_entity()
+ * Notes:
+ * refcount should be at least 1 upon entry to this function to account
+ * for this thread's reference to the object. If the refcount is 1, then
+ * object has been removed from the cache by another thread and this thread
+ * is the last thread accessing the object.
+ */
+static int remove_entity(cache_handle_t *h)
+{
+ cache_object_t *obj = h->cache_obj;
+ cache_object_t *tobj = NULL;
+
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+
+ /* If the entity is still in the cache, remove it and decrement the
+ * refcount. If the entity is not in the cache, do nothing. In both cases
+ * decrement_refcount called by the last thread referencing the object will
+ * trigger the cleanup.
+ */
+ tobj = cache_find(sconf->cache_cache, obj->key);
+ if (tobj == obj) {
+ cache_remove(sconf->cache_cache, obj);
+ apr_atomic_dec(&obj->refcount);
+ }
+
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+
+ return OK;
+}
+static apr_status_t serialize_table(cache_header_tbl_t **obj,
+ apr_ssize_t *nelts,
+ apr_table_t *table)
+{
+ const apr_array_header_t *elts_arr = apr_table_elts(table);
+ apr_table_entry_t *elts = (apr_table_entry_t *) elts_arr->elts;
+ apr_ssize_t i;
+ apr_size_t len = 0;
+ apr_size_t idx = 0;
+ char *buf;
+
+ *nelts = elts_arr->nelts;
+ if (*nelts == 0 ) {
+ *obj=NULL;
+ return APR_SUCCESS;
+ }
+ *obj = malloc(sizeof(cache_header_tbl_t) * elts_arr->nelts);
+ if (NULL == *obj) {
+ return APR_ENOMEM;
+ }
+ for (i = 0; i < elts_arr->nelts; ++i) {
+ len += strlen(elts[i].key);
+ len += strlen(elts[i].val);
+ len += 2; /* Extra space for NULL string terminator for key and val */
+ }
+
+ /* Transfer the headers into a contiguous memory block */
+ buf = malloc(len);
+ if (!buf) {
+ *obj = NULL;
+ return APR_ENOMEM;
+ }
+
+ for (i = 0; i < *nelts; ++i) {
+ (*obj)[i].hdr = &buf[idx];
+ len = strlen(elts[i].key) + 1; /* Include NULL terminator */
+ memcpy(&buf[idx], elts[i].key, len);
+ idx+=len;
+
+ (*obj)[i].val = &buf[idx];
+ len = strlen(elts[i].val) + 1;
+ memcpy(&buf[idx], elts[i].val, len);
+ idx+=len;
+ }
+ return APR_SUCCESS;
+}
+static int unserialize_table( cache_header_tbl_t *ctbl,
+ int num_headers,
+ apr_table_t *t )
+{
+ int i;
+
+ for (i = 0; i < num_headers; ++i) {
+ apr_table_addn(t, ctbl[i].hdr, ctbl[i].val);
+ }
+
+ return APR_SUCCESS;
+}
+/* Define request processing hook handlers */
+/* remove_url()
+ * Notes:
+ */
+static int remove_url(const char *key)
+{
+ cache_object_t *obj;
+ int cleanup = 0;
+
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+
+ obj = cache_find(sconf->cache_cache, key);
+ if (obj) {
+ cache_remove(sconf->cache_cache, obj);
+ /* For performance, cleanup cache object after releasing the lock */
+ cleanup = !apr_atomic_dec(&obj->refcount);
+ }
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+
+ if (cleanup) {
+ cleanup_cache_object(obj);
+ }
+
+ return OK;
+}
+
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r)
+{
+ int rc;
+ mem_cache_object_t *mobj = (mem_cache_object_t*) h->cache_obj->vobj;
+
+ h->req_hdrs = apr_table_make(r->pool, mobj->num_req_hdrs);
+ h->resp_hdrs = apr_table_make(r->pool, mobj->num_header_out);
+ h->resp_err_hdrs = apr_table_make(r->pool, mobj->num_err_header_out);
+ /* ### FIXME: These two items should not be saved. */
+ r->subprocess_env = apr_table_make(r->pool, mobj->num_subprocess_env);
+ r->notes = apr_table_make(r->pool, mobj->num_notes);
+
+ rc = unserialize_table(mobj->req_hdrs,
+ mobj->num_req_hdrs,
+ h->req_hdrs);
+ rc = unserialize_table( mobj->header_out,
+ mobj->num_header_out,
+ h->resp_hdrs);
+ rc = unserialize_table( mobj->err_header_out,
+ mobj->num_err_header_out,
+ h->resp_err_hdrs);
+ rc = unserialize_table( mobj->subprocess_env,
+ mobj->num_subprocess_env,
+ r->subprocess_env);
+ rc = unserialize_table( mobj->notes,
+ mobj->num_notes,
+ r->notes);
+
+ /* Content-Type: header may not be set if content is local since
+ * CACHE_IN runs before header filters....
+ */
+ h->content_type = h->cache_obj->info.content_type;
+ h->status = h->cache_obj->info.status;
+
+ return rc;
+}
+
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+ mem_cache_object_t *mobj = (mem_cache_object_t*) h->cache_obj->vobj;
+
+ if (mobj->type == CACHE_TYPE_FILE) {
+ /* CACHE_TYPE_FILE */
+ apr_file_t *file;
+ apr_os_file_put(&file, &mobj->fd, mobj->flags, p);
+ b = apr_bucket_file_create(file, 0, mobj->m_len, p, bb->bucket_alloc);
+ }
+ else {
+ /* CACHE_TYPE_HEAP */
+ b = apr_bucket_immortal_create(mobj->m, mobj->m_len, bb->bucket_alloc);
+ }
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_eos_create(bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ return APR_SUCCESS;
+}
+
+
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *info)
+{
+ cache_object_t *obj = h->cache_obj;
+ mem_cache_object_t *mobj = (mem_cache_object_t*) obj->vobj;
+ int rc;
+
+ /*
+ * The cache needs to keep track of the following information:
+ * - Date, LastMod, Version, ReqTime, RespTime, ContentLength
+ * - The original request headers (for Vary)
+ * - The original response headers (for returning with a cached response)
+ * - The body of the message
+ */
+ rc = serialize_table(&mobj->req_hdrs,
+ &mobj->num_req_hdrs,
+ r->headers_in);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+
+ /* Precompute how much storage we need to hold the headers */
+ rc = serialize_table(&mobj->header_out,
+ &mobj->num_header_out,
+ ap_cache_cacheable_hdrs_out(r->pool, r->headers_out,
+ r->server));
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+ rc = serialize_table(&mobj->err_header_out,
+ &mobj->num_err_header_out,
+ ap_cache_cacheable_hdrs_out(r->pool,
+ r->err_headers_out,
+ r->server));
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+ rc = serialize_table(&mobj->subprocess_env,
+ &mobj->num_subprocess_env,
+ r->subprocess_env );
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+
+ rc = serialize_table(&mobj->notes, &mobj->num_notes, r->notes);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+
+ /* Init the info struct */
+ obj->info.status = info->status;
+ if (info->date) {
+ obj->info.date = info->date;
+ }
+ if (info->lastmod) {
+ obj->info.lastmod = info->lastmod;
+ }
+ if (info->response_time) {
+ obj->info.response_time = info->response_time;
+ }
+ if (info->request_time) {
+ obj->info.request_time = info->request_time;
+ }
+ if (info->expire) {
+ obj->info.expire = info->expire;
+ }
+ if (info->content_type) {
+ apr_size_t len = strlen(info->content_type) + 1;
+ obj->info.content_type = (char*) malloc(len);
+ if (!obj->info.content_type) {
+ return APR_ENOMEM;
+ }
+ memcpy(obj->info.content_type, info->content_type, len);
+ }
+ if (info->etag) {
+ apr_size_t len = strlen(info->etag) + 1;
+ obj->info.etag = (char*) malloc(len);
+ if (!obj->info.etag) {
+ return APR_ENOMEM;
+ }
+ memcpy(obj->info.etag, info->etag, len);
+ }
+ if (info->lastmods) {
+ apr_size_t len = strlen(info->lastmods) + 1;
+ obj->info.lastmods = (char*) malloc(len);
+ if (!obj->info.lastmods) {
+ return APR_ENOMEM;
+ }
+ memcpy(obj->info.lastmods, info->lastmods, len);
+ }
+ if ( info->filename) {
+ apr_size_t len = strlen(info->filename) + 1;
+ obj->info.filename = (char*) malloc(len);
+ if (!obj->info.filename ) {
+ return APR_ENOMEM;
+ }
+ memcpy(obj->info.filename, info->filename, len);
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b)
+{
+ apr_status_t rv;
+ cache_object_t *obj = h->cache_obj;
+ cache_object_t *tobj = NULL;
+ mem_cache_object_t *mobj = (mem_cache_object_t*) obj->vobj;
+ apr_read_type_e eblock = APR_BLOCK_READ;
+ apr_bucket *e;
+ char *cur;
+ int eos = 0;
+
+ if (mobj->type == CACHE_TYPE_FILE) {
+ apr_file_t *file = NULL;
+ int fd = 0;
+ int other = 0;
+
+ /* We can cache an open file descriptor if:
+ * - the brigade contains one and only one file_bucket &&
+ * - the brigade is complete &&
+ * - the file_bucket is the last data bucket in the brigade
+ */
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (APR_BUCKET_IS_EOS(e)) {
+ eos = 1;
+ }
+ else if (APR_BUCKET_IS_FILE(e)) {
+ apr_bucket_file *a = e->data;
+ fd++;
+ file = a->fd;
+ }
+ else {
+ other++;
+ }
+ }
+ if (fd == 1 && !other && eos) {
+ apr_file_t *tmpfile;
+ const char *name;
+ /* Open a new XTHREAD handle to the file */
+ apr_file_name_get(&name, file);
+ mobj->flags = ((APR_SENDFILE_ENABLED & apr_file_flags_get(file))
+ | APR_READ | APR_BINARY | APR_XTHREAD | APR_FILE_NOCLEANUP);
+ rv = apr_file_open(&tmpfile, name, mobj->flags,
+ APR_OS_DEFAULT, r->pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ apr_file_inherit_unset(tmpfile);
+ apr_os_file_get(&(mobj->fd), tmpfile);
+
+ /* Open for business */
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "mem_cache: Cached file: %s with key: %s", name, obj->key);
+ obj->complete = 1;
+ return APR_SUCCESS;
+ }
+
+ /* Content not suitable for fd caching. Cache in-memory instead. */
+ mobj->type = CACHE_TYPE_HEAP;
+ }
+
+ /*
+ * FD cacheing is not enabled or the content was not
+ * suitable for fd caching.
+ */
+ if (mobj->m == NULL) {
+ mobj->m = malloc(mobj->m_len);
+ if (mobj->m == NULL) {
+ return APR_ENOMEM;
+ }
+ obj->count = 0;
+ }
+ cur = (char*) mobj->m + obj->count;
+
+ /* Iterate accross the brigade and populate the cache storage */
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ const char *s;
+ apr_size_t len;
+
+ if (APR_BUCKET_IS_EOS(e)) {
+ if (mobj->m_len > obj->count) {
+ /* Caching a streamed response. Reallocate a buffer of the
+ * correct size and copy the streamed response into that
+ * buffer */
+ char *buf = malloc(obj->count);
+ if (!buf) {
+ return APR_ENOMEM;
+ }
+ memcpy(buf, mobj->m, obj->count);
+ free(mobj->m);
+ mobj->m = buf;
+
+ /* Now comes the crufty part... there is no way to tell the
+ * cache that the size of the object has changed. We need
+ * to remove the object, update the size and re-add the
+ * object, all under protection of the lock.
+ */
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+ /* Has the object been ejected from the cache?
+ */
+ tobj = (cache_object_t *) cache_find(sconf->cache_cache, obj->key);
+ if (tobj == obj) {
+ /* Object is still in the cache, remove it, update the len field then
+ * replace it under protection of sconf->lock.
+ */
+ cache_remove(sconf->cache_cache, obj);
+ /* For illustration, cache no longer has reference to the object
+ * so decrement the refcount
+ * apr_atomic_dec(&obj->refcount);
+ */
+ mobj->m_len = obj->count;
+
+ cache_insert(sconf->cache_cache, obj);
+ /* For illustration, cache now has reference to the object, so
+ * increment the refcount
+ * apr_atomic_inc(&obj->refcount);
+ */
+ }
+ else if (tobj) {
+ /* Different object with the same key found in the cache. Doing nothing
+ * here will cause the object refcount to drop to 0 in decrement_refcount
+ * and the object will be cleaned up.
+ */
+
+ } else {
+ /* Object has been ejected from the cache, add it back to the cache */
+ mobj->m_len = obj->count;
+ cache_insert(sconf->cache_cache, obj);
+ apr_atomic_inc(&obj->refcount);
+ }
+
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+ }
+ /* Open for business */
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "mem_cache: Cached url: %s", obj->key);
+ obj->complete = 1;
+ break;
+ }
+ rv = apr_bucket_read(e, &s, &len, eblock);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ if (len) {
+ /* Check for buffer overflow */
+ if ((obj->count + len) > mobj->m_len) {
+ return APR_ENOMEM;
+ }
+ else {
+ memcpy(cur, s, len);
+ cur+=len;
+ obj->count+=len;
+ }
+ }
+ /* This should not fail, but if it does, we are in BIG trouble
+ * cause we just stomped all over the heap.
+ */
+ AP_DEBUG_ASSERT(obj->count <= mobj->m_len);
+ }
+ return APR_SUCCESS;
+}
+/**
+ * Configuration and start-up
+ */
+static int mem_cache_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ int threaded_mpm;
+
+ /* Sanity check the cache configuration */
+ if (sconf->min_cache_object_size >= sconf->max_cache_object_size) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "MCacheMaxObjectSize must be greater than MCacheMinObjectSize");
+ return DONE;
+ }
+ if (sconf->max_cache_object_size >= sconf->max_cache_size) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "MCacheSize must be greater than MCacheMaxObjectSize");
+ return DONE;
+ }
+ if (sconf->max_streaming_buffer_size > sconf->max_cache_object_size) {
+ /* Issue a notice only if something other than the default config
+ * is being used */
+ if (sconf->max_streaming_buffer_size != DEFAULT_MAX_STREAMING_BUFFER_SIZE &&
+ sconf->max_cache_object_size != DEFAULT_MAX_CACHE_OBJECT_SIZE) {
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "MCacheMaxStreamingBuffer must be less than or equal to MCacheMaxObjectSize. "
+ "Resetting MCacheMaxStreamingBuffer to MCacheMaxObjectSize.");
+ }
+ sconf->max_streaming_buffer_size = sconf->max_cache_object_size;
+ }
+ if (sconf->max_streaming_buffer_size < sconf->min_cache_object_size) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "MCacheMaxStreamingBuffer must be greater than or equal to MCacheMinObjectSize. "
+ "Resetting MCacheMaxStreamingBuffer to MCacheMinObjectSize.");
+ sconf->max_streaming_buffer_size = sconf->min_cache_object_size;
+ }
+ ap_mpm_query(AP_MPMQ_IS_THREADED, &threaded_mpm);
+ if (threaded_mpm) {
+ apr_thread_mutex_create(&sconf->lock, APR_THREAD_MUTEX_DEFAULT, p);
+ }
+
+ sconf->cache_cache = cache_init(sconf->max_object_cnt,
+ sconf->max_cache_size,
+ memcache_get_priority,
+ sconf->cache_remove_algorithm,
+ memcache_get_pos,
+ memcache_set_pos,
+ memcache_inc_frequency,
+ memcache_cache_get_size,
+ memcache_cache_get_key,
+ memcache_cache_free);
+ apr_pool_cleanup_register(p, sconf, cleanup_cache_mem, apr_pool_cleanup_null);
+
+ if (sconf->cache_cache)
+ return OK;
+
+ return -1;
+
+}
+
+static const char
+*set_max_cache_size(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ apr_size_t val;
+
+ if (sscanf(arg, "%" APR_SIZE_T_FMT, &val) != 1) {
+ return "MCacheSize argument must be an integer representing the max cache size in KBytes.";
+ }
+ sconf->max_cache_size = val*1024;
+ return NULL;
+}
+static const char
+*set_min_cache_object_size(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ apr_size_t val;
+
+ if (sscanf(arg, "%" APR_SIZE_T_FMT, &val) != 1) {
+ return "MCacheMinObjectSize value must be an integer (bytes)";
+ }
+ sconf->min_cache_object_size = val;
+ return NULL;
+}
+static const char
+*set_max_cache_object_size(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ apr_size_t val;
+
+ if (sscanf(arg, "%" APR_SIZE_T_FMT, &val) != 1) {
+ return "MCacheMaxObjectSize value must be an integer (bytes)";
+ }
+ sconf->max_cache_object_size = val;
+ return NULL;
+}
+static const char
+*set_max_object_count(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ apr_size_t val;
+
+ if (sscanf(arg, "%" APR_SIZE_T_FMT, &val) != 1) {
+ return "MCacheMaxObjectCount value must be an integer";
+ }
+ sconf->max_object_cnt = val;
+ return NULL;
+}
+
+static const char
+*set_cache_removal_algorithm(cmd_parms *parms, void *name, const char *arg)
+{
+ if (strcasecmp("LRU", arg)) {
+ sconf->cache_remove_algorithm = memcache_lru_algorithm;
+ }
+ else {
+ if (strcasecmp("GDSF", arg)) {
+ sconf->cache_remove_algorithm = memcache_gdsf_algorithm;
+ }
+ else {
+ return "currently implemented algorithms are LRU and GDSF";
+ }
+ }
+ return NULL;
+}
+
+static const char *set_max_streaming_buffer(cmd_parms *parms, void *dummy,
+ const char *arg)
+{
+#if 0
+ char *err;
+ if (apr_strtoff(&sconf->max_streaming_buffer_size, arg, &err, 10) || *err) {
+ return "MCacheMaxStreamingBuffer value must be a number";
+ }
+#else
+ sconf->max_streaming_buffer_size = apr_atoi64(arg);
+#endif
+ return NULL;
+}
+
+static const command_rec cache_cmds[] =
+{
+ AP_INIT_TAKE1("MCacheSize", set_max_cache_size, NULL, RSRC_CONF,
+ "The maximum amount of memory used by the cache in KBytes"),
+ AP_INIT_TAKE1("MCacheMaxObjectCount", set_max_object_count, NULL, RSRC_CONF,
+ "The maximum number of objects allowed to be placed in the cache"),
+ AP_INIT_TAKE1("MCacheMinObjectSize", set_min_cache_object_size, NULL, RSRC_CONF,
+ "The minimum size (in bytes) of an object to be placed in the cache"),
+ AP_INIT_TAKE1("MCacheMaxObjectSize", set_max_cache_object_size, NULL, RSRC_CONF,
+ "The maximum size (in bytes) of an object to be placed in the cache"),
+ AP_INIT_TAKE1("MCacheRemovalAlgorithm", set_cache_removal_algorithm, NULL, RSRC_CONF,
+ "The algorithm used to remove entries from the cache (default: GDSF)"),
+ AP_INIT_TAKE1("MCacheMaxStreamingBuffer", set_max_streaming_buffer, NULL, RSRC_CONF,
+ "Maximum number of bytes of content to buffer for a streamed response"),
+ {NULL}
+};
+
+static const cache_provider cache_mem_provider =
+{
+ &remove_entity,
+ &store_headers,
+ &store_body,
+ &recall_headers,
+ &recall_body,
+ &create_mem_entity,
+ &open_entity,
+ &remove_url,
+};
+
+static const cache_provider cache_fd_provider =
+{
+ &remove_entity,
+ &store_headers,
+ &store_body,
+ &recall_headers,
+ &recall_body,
+ &create_fd_entity,
+ &open_entity,
+ &remove_url,
+};
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(mem_cache_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ /* cache initializer */
+ /* cache_hook_init(cache_mem_init, NULL, NULL, APR_HOOK_MIDDLE); */
+ /*
+ cache_hook_create_entity(create_entity, NULL, NULL, APR_HOOK_MIDDLE);
+ cache_hook_open_entity(open_entity, NULL, NULL, APR_HOOK_MIDDLE);
+ cache_hook_remove_url(remove_url, NULL, NULL, APR_HOOK_MIDDLE);
+ */
+ ap_register_provider(p, CACHE_PROVIDER_GROUP, "mem", "0",
+ &cache_mem_provider);
+ ap_register_provider(p, CACHE_PROVIDER_GROUP, "fd", "0",
+ &cache_fd_provider);
+}
+
+module AP_MODULE_DECLARE_DATA mem_cache_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ create_cache_config, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ cache_cmds, /* command apr_table_t */
+ register_hooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.dsp
new file mode 100644
index 00000000..98a27b76
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_mem_cache" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_mem_cache - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mem_cache.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mem_cache.mak" CFG="mod_mem_cache - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_mem_cache - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_mem_cache - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_mem_cache - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /D "_USRDLL" /D "mod_mem_cache_EXPORTS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_mem_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_mem_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mem_cache.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_mem_cache - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_mem_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_mem_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mem_cache.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_mem_cache - Win32 Release"
+# Name "mod_mem_cache - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_cache.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_mem_cache.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_mem_cache - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_mem_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_mem_cache.so "mem_cache_module for Apache" ../../include/ap_release.h > .\mod_mem_cache.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_mem_cache - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_mem_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_mem_cache.so "mem_cache_module for Apache" ../../include/ap_release.h > .\mod_mem_cache.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/modules.mk b/rubbos/app/httpd-2.0.64/modules/experimental/modules.mk
new file mode 100644
index 00000000..ceb52a1b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/modules.mk
@@ -0,0 +1,3 @@
+DISTCLEAN_TARGETS = modules.mk
+static =
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.c b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.c
new file mode 100644
index 00000000..adaccb32
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.c
@@ -0,0 +1,1758 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * util_ldap.c: LDAP things
+ *
+ * Original code from auth_ldap module for Apache v1.3:
+ * Copyright 1998, 1999 Enbridge Pipelines Inc.
+ * Copyright 1999-2001 Dave Carrigan
+ */
+
+#include <apr_ldap.h>
+#include <apr_strings.h>
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_ldap.h"
+#include "util_ldap_cache.h"
+
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifndef APU_HAS_LDAP
+#error mod_ldap requires APR-util to have LDAP support built in
+#endif
+
+#if !defined(OS2) && !defined(WIN32) && !defined(BEOS) && !defined(NETWARE)
+#include "unixd.h"
+#define UTIL_LDAP_SET_MUTEX_PERMS
+#endif
+
+ /* defines for certificate file types
+ */
+#define LDAP_CA_TYPE_UNKNOWN 0
+#define LDAP_CA_TYPE_DER 1
+#define LDAP_CA_TYPE_BASE64 2
+#define LDAP_CA_TYPE_CERT7_DB 3
+
+
+module AP_MODULE_DECLARE_DATA ldap_module;
+
+int util_ldap_handler(request_rec *r);
+void *util_ldap_create_config(apr_pool_t *p, server_rec *s);
+
+
+/*
+ * Some definitions to help between various versions of apache.
+ */
+
+#ifndef DOCTYPE_HTML_2_0
+#define DOCTYPE_HTML_2_0 "<!DOCTYPE HTML PUBLIC \"-//IETF//" \
+ "DTD HTML 2.0//EN\">\n"
+#endif
+
+#ifndef DOCTYPE_HTML_3_2
+#define DOCTYPE_HTML_3_2 "<!DOCTYPE HTML PUBLIC \"-//W3C//" \
+ "DTD HTML 3.2 Final//EN\">\n"
+#endif
+
+#ifndef DOCTYPE_HTML_4_0S
+#define DOCTYPE_HTML_4_0S "<!DOCTYPE HTML PUBLIC \"-//W3C//" \
+ "DTD HTML 4.0//EN\"\n" \
+ "\"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
+#endif
+
+#ifndef DOCTYPE_HTML_4_0T
+#define DOCTYPE_HTML_4_0T "<!DOCTYPE HTML PUBLIC \"-//W3C//" \
+ "DTD HTML 4.0 Transitional//EN\"\n" \
+ "\"http://www.w3.org/TR/REC-html40/loose.dtd\">\n"
+#endif
+
+#ifndef DOCTYPE_HTML_4_0F
+#define DOCTYPE_HTML_4_0F "<!DOCTYPE HTML PUBLIC \"-//W3C//" \
+ "DTD HTML 4.0 Frameset//EN\"\n" \
+ "\"http://www.w3.org/TR/REC-html40/frameset.dtd\">\n"
+#endif
+
+#define LDAP_CACHE_LOCK() \
+ if (st->util_ldap_cache_lock) \
+ apr_global_mutex_lock(st->util_ldap_cache_lock)
+#define LDAP_CACHE_UNLOCK() \
+ if (st->util_ldap_cache_lock) \
+ apr_global_mutex_unlock(st->util_ldap_cache_lock)
+
+
+static void util_ldap_strdup (char **str, const char *newstr)
+{
+ if (*str) {
+ free(*str);
+ *str = NULL;
+ }
+
+ if (newstr) {
+ *str = calloc(1, strlen(newstr)+1);
+ strcpy (*str, newstr);
+ }
+}
+
+/*
+ * Status Handler
+ * --------------
+ *
+ * This handler generates a status page about the current performance of
+ * the LDAP cache. It is enabled as follows:
+ *
+ * <Location /ldap-status>
+ * SetHandler ldap-status
+ * </Location>
+ *
+ */
+int util_ldap_handler(request_rec *r)
+{
+ util_ldap_state_t *st = (util_ldap_state_t *)ap_get_module_config(r->server->module_config, &ldap_module);
+
+ r->allowed |= (1 << M_GET);
+ if (r->method_number != M_GET)
+ return DECLINED;
+
+ if (strcmp(r->handler, "ldap-status")) {
+ return DECLINED;
+ }
+
+ r->content_type = "text/html; charset=ISO-8859-1";
+ if (r->header_only)
+ return OK;
+
+ ap_rputs(DOCTYPE_HTML_3_2
+ "<html><head><title>LDAP Cache Information</title></head>\n", r);
+ ap_rputs("<body bgcolor='#ffffff'><h1 align=center>LDAP Cache Information</h1>\n", r);
+
+ util_ald_cache_display(r, st);
+
+ return OK;
+}
+
+/* ------------------------------------------------------------------ */
+
+
+/*
+ * Closes an LDAP connection by unlocking it. The next time
+ * util_ldap_connection_find() is called this connection will be
+ * available for reuse.
+ */
+LDAP_DECLARE(void) util_ldap_connection_close(util_ldap_connection_t *ldc)
+{
+
+ /*
+ * QUESTION:
+ *
+ * Is it safe leaving bound connections floating around between the
+ * different modules? Keeping the user bound is a performance boost,
+ * but it is also a potential security problem - maybe.
+ *
+ * For now we unbind the user when we finish with a connection, but
+ * we don't have to...
+ */
+
+ /* mark our connection as available for reuse */
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(ldc->lock);
+#endif
+}
+
+
+/*
+ * Destroys an LDAP connection by unbinding and closing the connection to
+ * the LDAP server. It is used to bring the connection back to a known
+ * state after an error, and during pool cleanup.
+ */
+LDAP_DECLARE_NONSTD(apr_status_t) util_ldap_connection_unbind(void *param)
+{
+ util_ldap_connection_t *ldc = param;
+
+ if (ldc) {
+ if (ldc->ldap) {
+ ldap_unbind_s(ldc->ldap);
+ ldc->ldap = NULL;
+ }
+ ldc->bound = 0;
+ }
+
+ return APR_SUCCESS;
+}
+
+
+/*
+ * Clean up an LDAP connection by unbinding and unlocking the connection.
+ * This function is registered with the pool cleanup function - causing
+ * the LDAP connections to be shut down cleanly on graceful restart.
+ */
+LDAP_DECLARE_NONSTD(apr_status_t) util_ldap_connection_cleanup(void *param)
+{
+ util_ldap_connection_t *ldc = param;
+
+ if (ldc) {
+
+ /* unbind and disconnect from the LDAP server */
+ util_ldap_connection_unbind(ldc);
+
+ /* free the username and password */
+ if (ldc->bindpw) {
+ free((void*)ldc->bindpw);
+ }
+ if (ldc->binddn) {
+ free((void*)ldc->binddn);
+ }
+
+ /* unlock this entry */
+ util_ldap_connection_close(ldc);
+
+ }
+
+ return APR_SUCCESS;
+}
+
+
+/*
+ * Connect to the LDAP server and binds. Does not connect if already
+ * connected (i.e. ldc->ldap is non-NULL.) Does not bind if already bound.
+ *
+ * Returns LDAP_SUCCESS on success; and an error code on failure
+ */
+LDAP_DECLARE(int) util_ldap_connection_open(request_rec *r,
+ util_ldap_connection_t *ldc)
+{
+ int result = 0;
+ int failures = 0;
+ int version = LDAP_VERSION3;
+ int rc = LDAP_SUCCESS;
+ struct timeval timeOut = {10,0}; /* 10 second connection timeout */
+
+ util_ldap_state_t *st = (util_ldap_state_t *)ap_get_module_config(
+ r->server->module_config, &ldap_module);
+
+ /* If the connection is already bound, return
+ */
+ if (ldc->bound)
+ {
+ ldc->reason = "LDAP: connection open successful (already bound)";
+ return LDAP_SUCCESS;
+ }
+
+ /* create the ldap session handle
+ */
+ if (NULL == ldc->ldap)
+ {
+ /* clear connection requested */
+ if (!ldc->secure)
+ {
+ ldc->ldap = ldap_init(const_cast(ldc->host), ldc->port);
+ }
+ else /* ssl connnection requested */
+ {
+ /* check configuration to make sure it supports SSL
+ */
+ if (st->ssl_support)
+ {
+ #if APR_HAS_LDAP_SSL
+
+ #if APR_HAS_NOVELL_LDAPSDK
+ ldc->ldap = ldapssl_init(ldc->host, ldc->port, 1);
+
+ #elif APR_HAS_NETSCAPE_LDAPSDK
+ ldc->ldap = ldapssl_init(ldc->host, ldc->port, 1);
+
+ #elif APR_HAS_OPENLDAP_LDAPSDK
+ ldc->ldap = ldap_init(ldc->host, ldc->port);
+ if (NULL != ldc->ldap)
+ {
+ int SSLmode = LDAP_OPT_X_TLS_HARD;
+ result = ldap_set_option(ldc->ldap, LDAP_OPT_X_TLS, &SSLmode);
+ if (LDAP_SUCCESS != result)
+ {
+ ldap_unbind_s(ldc->ldap);
+ ldc->reason = "LDAP: ldap_set_option - LDAP_OPT_X_TLS_HARD failed";
+ ldc->ldap = NULL;
+ }
+ }
+
+ #elif APR_HAS_MICROSOFT_LDAPSDK
+ ldc->ldap = ldap_sslinit(const_cast(ldc->host), ldc->port, 1);
+
+ #else
+ ldc->reason = "LDAP: ssl connections not supported";
+ #endif /* APR_HAS_NOVELL_LDAPSDK */
+
+ #endif /* APR_HAS_LDAP_SSL */
+ }
+ else
+ ldc->reason = "LDAP: ssl connections not supported";
+ }
+
+ if (NULL == ldc->ldap)
+ {
+ ldc->bound = 0;
+ if (NULL == ldc->reason)
+ ldc->reason = "LDAP: ldap initialization failed";
+ return(-1);
+ }
+
+ /* Set the alias dereferencing option */
+ ldap_set_option(ldc->ldap, LDAP_OPT_DEREF, &(ldc->deref));
+
+ /* always default to LDAP V3 */
+ ldap_set_option(ldc->ldap, LDAP_OPT_PROTOCOL_VERSION, &version);
+
+#ifdef LDAP_OPT_NETWORK_TIMEOUT
+ if (st->connectionTimeout > 0) {
+ timeOut.tv_sec = st->connectionTimeout;
+ }
+
+ if (st->connectionTimeout >= 0) {
+ rc = ldap_set_option(ldc->ldap, LDAP_OPT_NETWORK_TIMEOUT, (void *)&timeOut);
+ if (APR_SUCCESS != rc) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "LDAP: Could not set the connection timeout" );
+ }
+ }
+#endif
+ }
+
+
+ /* loop trying to bind up to 10 times if LDAP_SERVER_DOWN error is
+ * returned. Break out of the loop on Success or any other error.
+ *
+ * NOTE: Looping is probably not a great idea. If the server isn't
+ * responding the chances it will respond after a few tries are poor.
+ * However, the original code looped and it only happens on
+ * the error condition.
+ */
+ for (failures=0; failures<10; failures++)
+ {
+ result = ldap_simple_bind_s(ldc->ldap, const_cast(ldc->binddn), const_cast(ldc->bindpw));
+ if (LDAP_SERVER_DOWN != result)
+ break;
+ }
+
+ /* free the handle if there was an error
+ */
+ if (LDAP_SUCCESS != result)
+ {
+ ldap_unbind_s(ldc->ldap);
+ ldc->ldap = NULL;
+ ldc->bound = 0;
+ ldc->reason = "LDAP: ldap_simple_bind_s() failed";
+ }
+ else {
+ ldc->bound = 1;
+ ldc->reason = "LDAP: connection open successful";
+ }
+
+ return(result);
+}
+
+
+/*
+ * Find an existing ldap connection struct that matches the
+ * provided ldap connection parameters.
+ *
+ * If not found in the cache, a new ldc structure will be allocated from st->pool
+ * and returned to the caller. If found in the cache, a pointer to the existing
+ * ldc structure will be returned.
+ */
+LDAP_DECLARE(util_ldap_connection_t *)util_ldap_connection_find(request_rec *r, const char *host, int port,
+ const char *binddn, const char *bindpw, deref_options deref,
+ int secure )
+{
+ struct util_ldap_connection_t *l, *p; /* To traverse the linked list */
+
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(r->server->module_config,
+ &ldap_module);
+
+
+#if APR_HAS_THREADS
+ /* mutex lock this function */
+ apr_thread_mutex_lock(st->mutex);
+#endif
+
+ /* Search for an exact connection match in the list that is not
+ * being used.
+ */
+ for (l=st->connections,p=NULL; l; l=l->next) {
+#if APR_HAS_THREADS
+ if (APR_SUCCESS == apr_thread_mutex_trylock(l->lock)) {
+#endif
+ if ((l->port == port) && (strcmp(l->host, host) == 0) &&
+ ((!l->binddn && !binddn) || (l->binddn && binddn && !strcmp(l->binddn, binddn))) &&
+ ((!l->bindpw && !bindpw) || (l->bindpw && bindpw && !strcmp(l->bindpw, bindpw))) &&
+ (l->deref == deref) && (l->secure == secure)) {
+
+ break;
+ }
+#if APR_HAS_THREADS
+ /* If this connection didn't match the criteria, then we
+ * need to unlock the mutex so it is available to be reused.
+ */
+ apr_thread_mutex_unlock(l->lock);
+ }
+#endif
+ p = l;
+ }
+
+ /* If nothing found, search again, but we don't care about the
+ * binddn and bindpw this time.
+ */
+ if (!l) {
+ for (l=st->connections,p=NULL; l; l=l->next) {
+#if APR_HAS_THREADS
+ if (APR_SUCCESS == apr_thread_mutex_trylock(l->lock)) {
+
+#endif
+ if ((l->port == port) && (strcmp(l->host, host) == 0) &&
+ (l->deref == deref) && (l->secure == secure)) {
+
+ /* the bind credentials have changed */
+ l->bound = 0;
+ util_ldap_strdup((char**)&(l->binddn), binddn);
+ util_ldap_strdup((char**)&(l->bindpw), bindpw);
+ break;
+ }
+#if APR_HAS_THREADS
+ /* If this connection didn't match the criteria, then we
+ * need to unlock the mutex so it is available to be reused.
+ */
+ apr_thread_mutex_unlock(l->lock);
+ }
+#endif
+ p = l;
+ }
+ }
+
+/* artificially disable cache */
+/* l = NULL; */
+
+ /* If no connection what found after the second search, we
+ * must create one.
+ */
+ if (!l) {
+
+ /*
+ * Add the new connection entry to the linked list. Note that we
+ * don't actually establish an LDAP connection yet; that happens
+ * the first time authentication is requested.
+ */
+ /* create the details to the pool in st */
+ l = apr_pcalloc(st->pool, sizeof(util_ldap_connection_t));
+#if APR_HAS_THREADS
+ apr_thread_mutex_create(&l->lock, APR_THREAD_MUTEX_DEFAULT, st->pool);
+ apr_thread_mutex_lock(l->lock);
+#endif
+ l->pool = st->pool;
+ l->bound = 0;
+ l->host = apr_pstrdup(st->pool, host);
+ l->port = port;
+ l->deref = deref;
+ util_ldap_strdup((char**)&(l->binddn), binddn);
+ util_ldap_strdup((char**)&(l->bindpw), bindpw);
+ l->secure = secure;
+
+ /* add the cleanup to the pool */
+ apr_pool_cleanup_register(l->pool, l,
+ util_ldap_connection_cleanup,
+ apr_pool_cleanup_null);
+
+ if (p) {
+ p->next = l;
+ }
+ else {
+ st->connections = l;
+ }
+ }
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(st->mutex);
+#endif
+ return l;
+}
+
+/* ------------------------------------------------------------------ */
+
+/*
+ * Compares two DNs to see if they're equal. The only way to do this correctly is to
+ * search for the dn and then do ldap_get_dn() on the result. This should match the
+ * initial dn, since it would have been also retrieved with ldap_get_dn(). This is
+ * expensive, so if the configuration value compare_dn_on_server is
+ * false, just does an ordinary strcmp.
+ *
+ * The lock for the ldap cache should already be acquired.
+ */
+LDAP_DECLARE(int) util_ldap_cache_comparedn(request_rec *r, util_ldap_connection_t *ldc,
+ const char *url, const char *dn, const char *reqdn,
+ int compare_dn_on_server)
+{
+ int result = 0;
+ util_url_node_t *curl;
+ util_url_node_t curnode;
+ util_dn_compare_node_t *node;
+ util_dn_compare_node_t newnode;
+ int failures = 0;
+ LDAPMessage *res, *entry;
+ char *searchdn;
+
+ util_ldap_state_t *st = (util_ldap_state_t *)ap_get_module_config(r->server->module_config, &ldap_module);
+
+ /* get cache entry (or create one) */
+ LDAP_CACHE_LOCK();
+
+ curnode.url = url;
+ curl = util_ald_cache_fetch(st->util_ldap_cache, &curnode);
+ if (curl == NULL) {
+ curl = util_ald_create_caches(st, url);
+ }
+ LDAP_CACHE_UNLOCK();
+
+ /* a simple compare? */
+ if (!compare_dn_on_server) {
+ /* unlock this read lock */
+ if (strcmp(dn, reqdn)) {
+ ldc->reason = "DN Comparison FALSE (direct strcmp())";
+ return LDAP_COMPARE_FALSE;
+ }
+ else {
+ ldc->reason = "DN Comparison TRUE (direct strcmp())";
+ return LDAP_COMPARE_TRUE;
+ }
+ }
+
+ if (curl) {
+ /* no - it's a server side compare */
+ LDAP_CACHE_LOCK();
+
+ /* is it in the compare cache? */
+ newnode.reqdn = (char *)reqdn;
+ node = util_ald_cache_fetch(curl->dn_compare_cache, &newnode);
+ if (node != NULL) {
+ /* If it's in the cache, it's good */
+ /* unlock this read lock */
+ LDAP_CACHE_UNLOCK();
+ ldc->reason = "DN Comparison TRUE (cached)";
+ return LDAP_COMPARE_TRUE;
+ }
+
+ /* unlock this read lock */
+ LDAP_CACHE_UNLOCK();
+ }
+
+start_over:
+ if (failures++ > 10) {
+ /* too many failures */
+ return result;
+ }
+
+ /* make a server connection */
+ if (LDAP_SUCCESS != (result = util_ldap_connection_open(r, ldc))) {
+ /* connect to server failed */
+ return result;
+ }
+
+ /* search for reqdn */
+ if ((result = ldap_search_ext_s(ldc->ldap, const_cast(reqdn), LDAP_SCOPE_BASE,
+ "(objectclass=*)", NULL, 1,
+ NULL, NULL, NULL, -1, &res)) == LDAP_SERVER_DOWN) {
+ ldc->reason = "DN Comparison ldap_search_ext_s() failed with server down";
+ util_ldap_connection_unbind(ldc);
+ goto start_over;
+ }
+ if (result != LDAP_SUCCESS) {
+ /* search for reqdn failed - no match */
+ ldc->reason = "DN Comparison ldap_search_ext_s() failed";
+ return result;
+ }
+
+ entry = ldap_first_entry(ldc->ldap, res);
+ searchdn = ldap_get_dn(ldc->ldap, entry);
+
+ ldap_msgfree(res);
+ if (strcmp(dn, searchdn) != 0) {
+ /* compare unsuccessful */
+ ldc->reason = "DN Comparison FALSE (checked on server)";
+ result = LDAP_COMPARE_FALSE;
+ }
+ else {
+ if (curl) {
+ /* compare successful - add to the compare cache */
+ LDAP_CACHE_LOCK();
+ newnode.reqdn = (char *)reqdn;
+ newnode.dn = (char *)dn;
+
+ node = util_ald_cache_fetch(curl->dn_compare_cache, &newnode);
+ if ((node == NULL) ||
+ (strcmp(reqdn, node->reqdn) != 0) || (strcmp(dn, node->dn) != 0)) {
+
+ util_ald_cache_insert(curl->dn_compare_cache, &newnode);
+ }
+ LDAP_CACHE_UNLOCK();
+ }
+ ldc->reason = "DN Comparison TRUE (checked on server)";
+ result = LDAP_COMPARE_TRUE;
+ }
+ ldap_memfree(searchdn);
+ return result;
+
+}
+
+/*
+ * Does an generic ldap_compare operation. It accepts a cache that it will use
+ * to lookup the compare in the cache. We cache two kinds of compares
+ * (require group compares) and (require user compares). Each compare has a different
+ * cache node: require group includes the DN; require user does not because the
+ * require user cache is owned by the
+ *
+ */
+LDAP_DECLARE(int) util_ldap_cache_compare(request_rec *r, util_ldap_connection_t *ldc,
+ const char *url, const char *dn,
+ const char *attrib, const char *value)
+{
+ int result = 0;
+ util_url_node_t *curl;
+ util_url_node_t curnode;
+ util_compare_node_t *compare_nodep;
+ util_compare_node_t the_compare_node;
+ apr_time_t curtime = 0; /* silence gcc -Wall */
+ int failures = 0;
+
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(r->server->module_config,
+ &ldap_module);
+
+ /* get cache entry (or create one) */
+ LDAP_CACHE_LOCK();
+ curnode.url = url;
+ curl = util_ald_cache_fetch(st->util_ldap_cache, &curnode);
+ if (curl == NULL) {
+ curl = util_ald_create_caches(st, url);
+ }
+ LDAP_CACHE_UNLOCK();
+
+ if (curl) {
+ /* make a comparison to the cache */
+ LDAP_CACHE_LOCK();
+ curtime = apr_time_now();
+
+ the_compare_node.dn = (char *)dn;
+ the_compare_node.attrib = (char *)attrib;
+ the_compare_node.value = (char *)value;
+ the_compare_node.result = 0;
+
+ compare_nodep = util_ald_cache_fetch(curl->compare_cache, &the_compare_node);
+
+ if (compare_nodep != NULL) {
+ /* found it... */
+ if (curtime - compare_nodep->lastcompare > st->compare_cache_ttl) {
+ /* ...but it is too old */
+ util_ald_cache_remove(curl->compare_cache, compare_nodep);
+ }
+ else {
+ /* ...and it is good */
+ /* unlock this read lock */
+ LDAP_CACHE_UNLOCK();
+ if (LDAP_COMPARE_TRUE == compare_nodep->result) {
+ ldc->reason = "Comparison true (cached)";
+ return compare_nodep->result;
+ }
+ else if (LDAP_COMPARE_FALSE == compare_nodep->result) {
+ ldc->reason = "Comparison false (cached)";
+ return compare_nodep->result;
+ }
+ else if (LDAP_NO_SUCH_ATTRIBUTE == compare_nodep->result) {
+ ldc->reason = "Comparison no such attribute (cached)";
+ return compare_nodep->result;
+ }
+ else {
+ ldc->reason = "Comparison undefined (cached)";
+ return compare_nodep->result;
+ }
+ }
+ }
+ /* unlock this read lock */
+ LDAP_CACHE_UNLOCK();
+ }
+
+start_over:
+ if (failures++ > 10) {
+ /* too many failures */
+ return result;
+ }
+ if (LDAP_SUCCESS != (result = util_ldap_connection_open(r, ldc))) {
+ /* connect failed */
+ return result;
+ }
+
+ if ((result = ldap_compare_s(ldc->ldap, const_cast(dn), const_cast(attrib), const_cast(value)))
+ == LDAP_SERVER_DOWN) {
+ /* connection failed - try again */
+ ldc->reason = "ldap_compare_s() failed with server down";
+ util_ldap_connection_unbind(ldc);
+ goto start_over;
+ }
+
+ ldc->reason = "Comparison complete";
+ if ((LDAP_COMPARE_TRUE == result) ||
+ (LDAP_COMPARE_FALSE == result) ||
+ (LDAP_NO_SUCH_ATTRIBUTE == result)) {
+ if (curl) {
+ /* compare completed; caching result */
+ LDAP_CACHE_LOCK();
+ the_compare_node.lastcompare = curtime;
+ the_compare_node.result = result;
+
+ /* If the node doesn't exist then insert it, otherwise just update it with
+ the last results */
+ compare_nodep = util_ald_cache_fetch(curl->compare_cache, &the_compare_node);
+ if ((compare_nodep == NULL) ||
+ (strcmp(the_compare_node.dn, compare_nodep->dn) != 0) ||
+ (strcmp(the_compare_node.attrib, compare_nodep->attrib) != 0) ||
+ (strcmp(the_compare_node.value, compare_nodep->value) != 0)) {
+
+ util_ald_cache_insert(curl->compare_cache, &the_compare_node);
+ }
+ else {
+ compare_nodep->lastcompare = curtime;
+ compare_nodep->result = result;
+ }
+ LDAP_CACHE_UNLOCK();
+ }
+ if (LDAP_COMPARE_TRUE == result) {
+ ldc->reason = "Comparison true (adding to cache)";
+ return LDAP_COMPARE_TRUE;
+ }
+ else if (LDAP_COMPARE_FALSE == result) {
+ ldc->reason = "Comparison false (adding to cache)";
+ return LDAP_COMPARE_FALSE;
+ }
+ else {
+ ldc->reason = "Comparison no such attribute (adding to cache)";
+ return LDAP_NO_SUCH_ATTRIBUTE;
+ }
+ }
+ return result;
+}
+
+LDAP_DECLARE(int) util_ldap_cache_checkuserid(request_rec *r, util_ldap_connection_t *ldc,
+ const char *url, const char *basedn, int scope, char **attrs,
+ const char *filter, const char *bindpw, const char **binddn,
+ const char ***retvals)
+{
+ const char **vals = NULL;
+ int numvals = 0;
+ int result = 0;
+ LDAPMessage *res, *entry;
+ char *dn;
+ int count;
+ int failures = 0;
+ util_url_node_t *curl; /* Cached URL node */
+ util_url_node_t curnode;
+ util_search_node_t *search_nodep; /* Cached search node */
+ util_search_node_t the_search_node;
+ apr_time_t curtime;
+
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(r->server->module_config,
+ &ldap_module);
+
+ /* Get the cache node for this url */
+ LDAP_CACHE_LOCK();
+ curnode.url = url;
+ curl = (util_url_node_t *)util_ald_cache_fetch(st->util_ldap_cache, &curnode);
+ if (curl == NULL) {
+ curl = util_ald_create_caches(st, url);
+ }
+ LDAP_CACHE_UNLOCK();
+
+ if (curl) {
+ LDAP_CACHE_LOCK();
+ the_search_node.username = filter;
+ search_nodep = util_ald_cache_fetch(curl->search_cache, &the_search_node);
+ if (search_nodep != NULL) {
+
+ /* found entry in search cache... */
+ curtime = apr_time_now();
+
+ /*
+ * Remove this item from the cache if its expired.
+ * If the sent password doesn't match the stored password,
+ * the entry will be removed and readded later if the
+ * credentials pass authentication.
+ */
+ if ((curtime - search_nodep->lastbind) > st->search_cache_ttl) {
+ /* ...but entry is too old */
+ util_ald_cache_remove(curl->search_cache, search_nodep);
+ }
+ else if ((search_nodep->bindpw) &&
+ (search_nodep->bindpw[0] != '\0') &&
+ (strcmp(search_nodep->bindpw, bindpw) == 0)) {
+ /* ...and entry is valid */
+ *binddn = search_nodep->dn;
+ *retvals = search_nodep->vals;
+ LDAP_CACHE_UNLOCK();
+ ldc->reason = "Authentication successful (cached)";
+ return LDAP_SUCCESS;
+ }
+ }
+ /* unlock this read lock */
+ LDAP_CACHE_UNLOCK();
+ }
+
+ /*
+ * At this point, there is no valid cached search, so lets do the search.
+ */
+
+ /*
+ * If any LDAP operation fails due to LDAP_SERVER_DOWN, control returns here.
+ */
+start_over:
+ if (failures++ > 10) {
+ return result;
+ }
+ if (LDAP_SUCCESS != (result = util_ldap_connection_open(r, ldc))) {
+ return result;
+ }
+
+ /* try do the search */
+ if ((result = ldap_search_ext_s(ldc->ldap,
+ const_cast(basedn), scope,
+ const_cast(filter), attrs, 0,
+ NULL, NULL, NULL, -1, &res)) == LDAP_SERVER_DOWN) {
+ ldc->reason = "ldap_search_ext_s() for user failed with server down";
+ util_ldap_connection_unbind(ldc);
+ goto start_over;
+ }
+
+ /* if there is an error (including LDAP_NO_SUCH_OBJECT) return now */
+ if (result != LDAP_SUCCESS) {
+ ldc->reason = "ldap_search_ext_s() for user failed";
+ return result;
+ }
+
+ /*
+ * We should have found exactly one entry; to find a different
+ * number is an error.
+ */
+ count = ldap_count_entries(ldc->ldap, res);
+ if (count != 1)
+ {
+ if (count == 0 )
+ ldc->reason = "User not found";
+ else
+ ldc->reason = "User is not unique (search found two or more matches)";
+ ldap_msgfree(res);
+ return LDAP_NO_SUCH_OBJECT;
+ }
+
+ entry = ldap_first_entry(ldc->ldap, res);
+
+ /* Grab the dn, copy it into the pool, and free it again */
+ dn = ldap_get_dn(ldc->ldap, entry);
+ *binddn = apr_pstrdup(r->pool, dn);
+ ldap_memfree(dn);
+
+ /*
+ * A bind to the server with an empty password always succeeds, so
+ * we check to ensure that the password is not empty. This implies
+ * that users who actually do have empty passwords will never be
+ * able to authenticate with this module. I don't see this as a big
+ * problem.
+ */
+ if (!bindpw || strlen(bindpw) <= 0) {
+ ldap_msgfree(res);
+ ldc->reason = "Empty password not allowed";
+ return LDAP_INVALID_CREDENTIALS;
+ }
+
+ /*
+ * Attempt to bind with the retrieved dn and the password. If the bind
+ * fails, it means that the password is wrong (the dn obviously
+ * exists, since we just retrieved it)
+ */
+ if ((result =
+ ldap_simple_bind_s(ldc->ldap, const_cast(*binddn), const_cast(bindpw))) ==
+ LDAP_SERVER_DOWN) {
+ ldc->reason = "ldap_simple_bind_s() to check user credentials failed with server down";
+ ldap_msgfree(res);
+ util_ldap_connection_unbind(ldc);
+ goto start_over;
+ }
+
+ /* failure? if so - return */
+ if (result != LDAP_SUCCESS) {
+ ldc->reason = "ldap_simple_bind_s() to check user credentials failed";
+ ldap_msgfree(res);
+ util_ldap_connection_unbind(ldc);
+ return result;
+ }
+ else {
+ /*
+ * We have just bound the connection to a different user and password
+ * combination, which might be reused unintentionally next time this
+ * connection is used from the connection pool. To ensure no confusion,
+ * we mark the connection as unbound.
+ */
+ ldc->bound = 0;
+ }
+
+ /*
+ * Get values for the provided attributes.
+ */
+ if (attrs) {
+ int k = 0;
+ int i = 0;
+ while (attrs[k++]);
+ vals = apr_pcalloc(r->pool, sizeof(char *) * (k+1));
+ numvals = k;
+ while (attrs[i]) {
+ char **values;
+ int j = 0;
+ char *str = NULL;
+ /* get values */
+ values = ldap_get_values(ldc->ldap, entry, attrs[i]);
+ while (values && values[j]) {
+ str = str ? apr_pstrcat(r->pool, str, "; ", values[j], NULL) : apr_pstrdup(r->pool, values[j]);
+ j++;
+ }
+ ldap_value_free(values);
+ vals[i] = str;
+ i++;
+ }
+ *retvals = vals;
+ }
+
+ /*
+ * Add the new username to the search cache.
+ */
+ if (curl) {
+ LDAP_CACHE_LOCK();
+ the_search_node.username = filter;
+ the_search_node.dn = *binddn;
+ the_search_node.bindpw = bindpw;
+ the_search_node.lastbind = apr_time_now();
+ the_search_node.vals = vals;
+ the_search_node.numvals = numvals;
+
+ /* Search again to make sure that another thread didn't ready insert this node
+ into the cache before we got here. If it does exist then update the lastbind */
+ search_nodep = util_ald_cache_fetch(curl->search_cache, &the_search_node);
+ if ((search_nodep == NULL) ||
+ (strcmp(*binddn, search_nodep->dn) != 0)) {
+
+ /* Nothing in cache, insert new entry */
+ util_ald_cache_insert(curl->search_cache, &the_search_node);
+ }
+ else if ((!search_nodep->bindpw) ||
+ (strcmp(bindpw, search_nodep->bindpw) != 0)) {
+
+ /* Entry in cache is invalid, remove it and insert new one */
+ util_ald_cache_remove(curl->search_cache, search_nodep);
+ util_ald_cache_insert(curl->search_cache, &the_search_node);
+ }
+ else {
+ /* Cache entry is valid, update lastbind */
+ search_nodep->lastbind = the_search_node.lastbind;
+ }
+ LDAP_CACHE_UNLOCK();
+ }
+ ldap_msgfree(res);
+
+ ldc->reason = "Authentication successful";
+ return LDAP_SUCCESS;
+}
+
+/*
+ * This function will return the DN of the entry matching userid.
+ * It is used to get the DN in case some other module than mod_auth_ldap
+ * has authenticated the user.
+ * The function is basically a copy of util_ldap_cache_checkuserid
+ * with password checking removed.
+ */
+LDAP_DECLARE(int) util_ldap_cache_getuserdn(request_rec *r, util_ldap_connection_t *ldc,
+ const char *url, const char *basedn, int scope, char **attrs,
+ const char *filter, const char **binddn,
+ const char ***retvals)
+{
+ const char **vals = NULL;
+ int numvals = 0;
+ int result = 0;
+ LDAPMessage *res, *entry;
+ char *dn;
+ int count;
+ int failures = 0;
+ util_url_node_t *curl; /* Cached URL node */
+ util_url_node_t curnode;
+ util_search_node_t *search_nodep; /* Cached search node */
+ util_search_node_t the_search_node;
+ apr_time_t curtime;
+
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(r->server->module_config,
+ &ldap_module);
+
+ /* Get the cache node for this url */
+ LDAP_CACHE_LOCK();
+ curnode.url = url;
+ curl = (util_url_node_t *)util_ald_cache_fetch(st->util_ldap_cache, &curnode);
+ if (curl == NULL) {
+ curl = util_ald_create_caches(st, url);
+ }
+ LDAP_CACHE_UNLOCK();
+
+ if (curl) {
+ LDAP_CACHE_LOCK();
+ the_search_node.username = filter;
+ search_nodep = util_ald_cache_fetch(curl->search_cache, &the_search_node);
+ if (search_nodep != NULL) {
+
+ /* found entry in search cache... */
+ curtime = apr_time_now();
+
+ /*
+ * Remove this item from the cache if its expired.
+ */
+ if ((curtime - search_nodep->lastbind) > st->search_cache_ttl) {
+ /* ...but entry is too old */
+ util_ald_cache_remove(curl->search_cache, search_nodep);
+ }
+ else {
+ /* ...and entry is valid */
+ *binddn = search_nodep->dn;
+ *retvals = search_nodep->vals;
+ LDAP_CACHE_UNLOCK();
+ ldc->reason = "Search successful (cached)";
+ return LDAP_SUCCESS;
+ }
+ }
+ /* unlock this read lock */
+ LDAP_CACHE_UNLOCK();
+ }
+
+ /*
+ * At this point, there is no valid cached search, so lets do the search.
+ */
+
+ /*
+ * If any LDAP operation fails due to LDAP_SERVER_DOWN, control returns here.
+ */
+start_over:
+ if (failures++ > 10) {
+ return result;
+ }
+ if (LDAP_SUCCESS != (result = util_ldap_connection_open(r, ldc))) {
+ return result;
+ }
+
+ /* try do the search */
+ if ((result = ldap_search_ext_s(ldc->ldap,
+ const_cast(basedn), scope,
+ const_cast(filter), attrs, 0,
+ NULL, NULL, NULL, -1, &res)) == LDAP_SERVER_DOWN) {
+ ldc->reason = "ldap_search_ext_s() for user failed with server down";
+ util_ldap_connection_unbind(ldc);
+ goto start_over;
+ }
+
+ /* if there is an error (including LDAP_NO_SUCH_OBJECT) return now */
+ if (result != LDAP_SUCCESS) {
+ ldc->reason = "ldap_search_ext_s() for user failed";
+ return result;
+ }
+
+ /*
+ * We should have found exactly one entry; to find a different
+ * number is an error.
+ */
+ count = ldap_count_entries(ldc->ldap, res);
+ if (count != 1)
+ {
+ if (count == 0 )
+ ldc->reason = "User not found";
+ else
+ ldc->reason = "User is not unique (search found two or more matches)";
+ ldap_msgfree(res);
+ return LDAP_NO_SUCH_OBJECT;
+ }
+
+ entry = ldap_first_entry(ldc->ldap, res);
+
+ /* Grab the dn, copy it into the pool, and free it again */
+ dn = ldap_get_dn(ldc->ldap, entry);
+ *binddn = apr_pstrdup(r->pool, dn);
+ ldap_memfree(dn);
+
+ /*
+ * Get values for the provided attributes.
+ */
+ if (attrs) {
+ int k = 0;
+ int i = 0;
+ while (attrs[k++]);
+ vals = apr_pcalloc(r->pool, sizeof(char *) * (k+1));
+ numvals = k;
+ while (attrs[i]) {
+ char **values;
+ int j = 0;
+ char *str = NULL;
+ /* get values */
+ values = ldap_get_values(ldc->ldap, entry, attrs[i]);
+ while (values && values[j]) {
+ str = str ? apr_pstrcat(r->pool, str, "; ", values[j], NULL) : apr_pstrdup(r->pool, values[j]);
+ j++;
+ }
+ ldap_value_free(values);
+ vals[i] = str;
+ i++;
+ }
+ *retvals = vals;
+ }
+
+ /*
+ * Add the new username to the search cache.
+ */
+ if (curl) {
+ LDAP_CACHE_LOCK();
+ the_search_node.username = filter;
+ the_search_node.dn = *binddn;
+ the_search_node.bindpw = NULL;
+ the_search_node.lastbind = apr_time_now();
+ the_search_node.vals = vals;
+ the_search_node.numvals = numvals;
+
+ /* Search again to make sure that another thread didn't ready insert this node
+ into the cache before we got here. If it does exist then update the lastbind */
+ search_nodep = util_ald_cache_fetch(curl->search_cache, &the_search_node);
+ if ((search_nodep == NULL) ||
+ (strcmp(*binddn, search_nodep->dn) != 0)) {
+
+ /* Nothing in cache, insert new entry */
+ util_ald_cache_insert(curl->search_cache, &the_search_node);
+ }
+ /*
+ * Don't update lastbind on entries with bindpw because
+ * we haven't verified that password. It's OK to update
+ * the entry if there is no password in it.
+ */
+ else if (!search_nodep->bindpw) {
+ /* Cache entry is valid, update lastbind */
+ search_nodep->lastbind = the_search_node.lastbind;
+ }
+ LDAP_CACHE_UNLOCK();
+ }
+ ldap_msgfree(res);
+
+ ldc->reason = "Search successful";
+ return LDAP_SUCCESS;
+}
+
+/*
+ * Reports if ssl support is enabled
+ *
+ * 1 = enabled, 0 = not enabled
+ */
+LDAP_DECLARE(int) util_ldap_ssl_supported(request_rec *r)
+{
+ util_ldap_state_t *st = (util_ldap_state_t *)ap_get_module_config(
+ r->server->module_config, &ldap_module);
+
+ return(st->ssl_support);
+}
+
+
+/* ---------------------------------------- */
+/* config directives */
+
+
+static const char *util_ldap_set_cache_bytes(cmd_parms *cmd, void *dummy, const char *bytes)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+
+ st->cache_bytes = atol(bytes);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "[%" APR_PID_T_FMT "] ldap cache: Setting shared memory "
+ " cache size to %" APR_SIZE_T_FMT " bytes.",
+ getpid(), st->cache_bytes);
+
+ return NULL;
+}
+
+static const char *util_ldap_set_cache_file(cmd_parms *cmd, void *dummy, const char *file)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+
+ if (file) {
+ st->cache_file = ap_server_root_relative(st->pool, file);
+ }
+ else {
+ st->cache_file = NULL;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "LDAP cache: Setting shared memory cache file to %s bytes.",
+ st->cache_file);
+
+ return NULL;
+}
+
+static const char *util_ldap_set_cache_ttl(cmd_parms *cmd, void *dummy, const char *ttl)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+
+ st->search_cache_ttl = atol(ttl) * 1000000;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "[%d] ldap cache: Setting cache TTL to %ld microseconds.",
+ getpid(), st->search_cache_ttl);
+
+ return NULL;
+}
+
+static const char *util_ldap_set_cache_entries(cmd_parms *cmd, void *dummy, const char *size)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+
+
+ st->search_cache_size = atol(size);
+ if (st->search_cache_size < 0) {
+ st->search_cache_size = 0;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "[%d] ldap cache: Setting search cache size to %ld entries.",
+ getpid(), st->search_cache_size);
+
+ return NULL;
+}
+
+static const char *util_ldap_set_opcache_ttl(cmd_parms *cmd, void *dummy, const char *ttl)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+
+ st->compare_cache_ttl = atol(ttl) * 1000000;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "[%d] ldap cache: Setting operation cache TTL to %ld microseconds.",
+ getpid(), st->compare_cache_ttl);
+
+ return NULL;
+}
+
+static const char *util_ldap_set_opcache_entries(cmd_parms *cmd, void *dummy, const char *size)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+
+ st->compare_cache_size = atol(size);
+ if (st->compare_cache_size < 0) {
+ st->compare_cache_size = 0;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "[%d] ldap cache: Setting operation cache size to %ld entries.",
+ getpid(), st->compare_cache_size);
+
+ return NULL;
+}
+
+static const char *util_ldap_set_cert_auth(cmd_parms *cmd, void *dummy, const char *file)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ apr_finfo_t finfo;
+ apr_status_t rv;
+
+ if (err != NULL) {
+ return err;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "LDAP: SSL trusted certificate authority file - %s",
+ file);
+
+ st->cert_auth_file = ap_server_root_relative(cmd->pool, file);
+
+ if (st->cert_auth_file &&
+ ((rv = apr_stat (&finfo, st->cert_auth_file, APR_FINFO_MIN, cmd->pool)) != APR_SUCCESS))
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, cmd->server,
+ "LDAP: Could not open SSL trusted certificate authority file - %s",
+ st->cert_auth_file == NULL ? file : st->cert_auth_file);
+ return "Invalid file path";
+ }
+
+ return(NULL);
+}
+
+
+static const char *util_ldap_set_cert_type(cmd_parms *cmd, void *dummy, const char *Type)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (err != NULL) {
+ return err;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "LDAP: SSL trusted certificate authority file type - %s",
+ Type);
+
+ if (0 == strcmp("DER_FILE", Type))
+ st->cert_file_type = LDAP_CA_TYPE_DER;
+
+ else if (0 == strcmp("BASE64_FILE", Type))
+ st->cert_file_type = LDAP_CA_TYPE_BASE64;
+
+ else if (0 == strcmp("CERT7_DB_PATH", Type))
+ st->cert_file_type = LDAP_CA_TYPE_CERT7_DB;
+
+ else
+ st->cert_file_type = LDAP_CA_TYPE_UNKNOWN;
+
+ return(NULL);
+}
+
+static const char *util_ldap_set_connection_timeout(cmd_parms *cmd, void *dummy, const char *ttl)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+
+ if (err != NULL) {
+ return err;
+ }
+
+#ifdef LDAP_OPT_NETWORK_TIMEOUT
+ st->connectionTimeout = atol(ttl);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "[%d] ldap connection: Setting connection timeout to %ld seconds.",
+ getpid(), st->connectionTimeout);
+#else
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, cmd->server,
+ "LDAP: Connection timout option not supported by the LDAP SDK in use." );
+#endif
+
+ return NULL;
+}
+
+void *util_ldap_create_config(apr_pool_t *p, server_rec *s)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)apr_pcalloc(p, sizeof(util_ldap_state_t));
+
+ /* Create a per vhost pool for mod_ldap to use, serialized with
+ * st->mutex (also one per vhost)
+ */
+ apr_pool_create(&st->pool, p);
+#if APR_HAS_THREADS
+ apr_thread_mutex_create(&st->mutex, APR_THREAD_MUTEX_DEFAULT, st->pool);
+#endif
+
+ st->cache_bytes = 100000;
+ st->search_cache_ttl = 600000000;
+ st->search_cache_size = 1024;
+ st->compare_cache_ttl = 600000000;
+ st->compare_cache_size = 1024;
+ st->connections = NULL;
+ st->cert_auth_file = NULL;
+ st->cert_file_type = LDAP_CA_TYPE_UNKNOWN;
+ st->ssl_support = 0;
+ st->connectionTimeout = 10;
+
+ return st;
+}
+
+static apr_status_t util_ldap_cleanup_module(void *data)
+{
+#if APR_HAS_LDAP_SSL && APR_HAS_NOVELL_LDAPSDK
+ server_rec *s = data;
+ util_ldap_state_t *st = (util_ldap_state_t *)ap_get_module_config(
+ s->module_config, &ldap_module);
+
+ if (st->ssl_support)
+ ldapssl_client_deinit();
+
+#endif
+ return APR_SUCCESS;
+}
+
+static int util_ldap_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ int rc = LDAP_SUCCESS;
+ apr_status_t result;
+ char buf[MAX_STRING_LEN];
+ server_rec *s_vhost;
+ util_ldap_state_t *st_vhost;
+
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(s->module_config, &ldap_module);
+
+ void *data;
+ const char *userdata_key = "util_ldap_init";
+
+ /* util_ldap_post_config() will be called twice. Don't bother
+ * going through all of the initialization on the first call
+ * because it will just be thrown away.*/
+ apr_pool_userdata_get(&data, userdata_key, s->process->pool);
+ if (!data) {
+ apr_pool_userdata_set((const void *)1, userdata_key,
+ apr_pool_cleanup_null, s->process->pool);
+
+#if APR_HAS_SHARED_MEMORY
+ /* If the cache file already exists then delete it. Otherwise we are
+ * going to run into problems creating the shared memory. */
+ if (st->cache_file) {
+ char *lck_file = apr_pstrcat (ptemp, st->cache_file, ".lck", NULL);
+ apr_file_remove(st->cache_file, ptemp);
+ apr_file_remove(lck_file, ptemp);
+ }
+#endif
+ return OK;
+ }
+
+#if APR_HAS_SHARED_MEMORY
+ /* initializing cache if shared memory size is not zero and we already don't have shm address */
+ if (!st->cache_shm && st->cache_bytes > 0) {
+#endif
+ result = util_ldap_cache_init(p, st);
+ if (result != APR_SUCCESS) {
+ apr_strerror(result, buf, sizeof(buf));
+ ap_log_error(APLOG_MARK, APLOG_ERR, result, s,
+ "LDAP cache: error while creating a shared memory segment: %s", buf);
+ }
+
+
+#if APR_HAS_SHARED_MEMORY
+ if (st->cache_file) {
+ st->lock_file = apr_pstrcat (st->pool, st->cache_file, ".lck", NULL);
+ }
+ else
+#endif
+ st->lock_file = ap_server_root_relative(st->pool, tmpnam(NULL));
+
+ result = apr_global_mutex_create(&st->util_ldap_cache_lock, st->lock_file, APR_LOCK_DEFAULT, st->pool);
+ if (result != APR_SUCCESS) {
+ return result;
+ }
+
+#ifdef UTIL_LDAP_SET_MUTEX_PERMS
+ result = unixd_set_global_mutex_perms(st->util_ldap_cache_lock);
+ if (result != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, result, s,
+ "LDAP cache: failed to set mutex permissions");
+ return result;
+ }
+#endif
+
+ /* merge config in all vhost */
+ s_vhost = s->next;
+ while (s_vhost) {
+ st_vhost = (util_ldap_state_t *)ap_get_module_config(s_vhost->module_config, &ldap_module);
+
+#if APR_HAS_SHARED_MEMORY
+ st_vhost->cache_shm = st->cache_shm;
+ st_vhost->cache_rmm = st->cache_rmm;
+ st_vhost->cache_file = st->cache_file;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, result, s,
+ "LDAP merging Shared Cache conf: shm=0x%pp rmm=0x%pp for VHOST: %s",
+ st->cache_shm, st->cache_rmm, s_vhost->server_hostname);
+#endif
+ st_vhost->lock_file = st->lock_file;
+ s_vhost = s_vhost->next;
+ }
+#if APR_HAS_SHARED_MEMORY
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "LDAP cache: LDAPSharedCacheSize is zero, disabling shared memory cache");
+ }
+#endif
+
+ /* log the LDAP SDK used
+ */
+ #if APR_HAS_NETSCAPE_LDAPSDK
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: Built with Netscape LDAP SDK" );
+
+ #elif APR_HAS_NOVELL_LDAPSDK
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: Built with Novell LDAP SDK" );
+
+ #elif APR_HAS_OPENLDAP_LDAPSDK
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: Built with OpenLDAP LDAP SDK" );
+
+ #elif APR_HAS_MICROSOFT_LDAPSDK
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: Built with Microsoft LDAP SDK" );
+ #else
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: Built with unknown LDAP SDK" );
+
+ #endif /* APR_HAS_NETSCAPE_LDAPSDK */
+
+
+
+ apr_pool_cleanup_register(p, s, util_ldap_cleanup_module,
+ util_ldap_cleanup_module);
+
+ /* initialize SSL support if requested
+ */
+ if (st->cert_auth_file)
+ {
+ #if APR_HAS_LDAP_SSL /* compiled with ssl support */
+
+ #if APR_HAS_NETSCAPE_LDAPSDK
+
+ /* Netscape sdk only supports a cert7.db file
+ */
+ if (st->cert_file_type == LDAP_CA_TYPE_CERT7_DB)
+ {
+ rc = ldapssl_client_init(st->cert_auth_file, NULL);
+ }
+ else
+ {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "LDAP: Invalid LDAPTrustedCAType directive - "
+ "CERT7_DB_PATH type required");
+ rc = -1;
+ }
+
+ #elif APR_HAS_NOVELL_LDAPSDK
+
+ /* Novell SDK supports DER or BASE64 files
+ */
+ if (st->cert_file_type == LDAP_CA_TYPE_DER ||
+ st->cert_file_type == LDAP_CA_TYPE_BASE64 )
+ {
+ rc = ldapssl_client_init(NULL, NULL);
+ if (LDAP_SUCCESS == rc)
+ {
+ if (st->cert_file_type == LDAP_CA_TYPE_BASE64)
+ rc = ldapssl_add_trusted_cert(st->cert_auth_file,
+ LDAPSSL_CERT_FILETYPE_B64);
+ else
+ rc = ldapssl_add_trusted_cert(st->cert_auth_file,
+ LDAPSSL_CERT_FILETYPE_DER);
+
+ if (LDAP_SUCCESS != rc)
+ ldapssl_client_deinit();
+ }
+ }
+ else
+ {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "LDAP: Invalid LDAPTrustedCAType directive - "
+ "DER_FILE or BASE64_FILE type required");
+ rc = -1;
+ }
+
+ #elif APR_HAS_OPENLDAP_LDAPSDK
+
+ /* OpenLDAP SDK supports BASE64 files
+ */
+ if (st->cert_file_type == LDAP_CA_TYPE_BASE64)
+ {
+ rc = ldap_set_option(NULL, LDAP_OPT_X_TLS_CACERTFILE, st->cert_auth_file);
+ }
+ else
+ {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "LDAP: Invalid LDAPTrustedCAType directive - "
+ "BASE64_FILE type required");
+ rc = -1;
+ }
+
+
+ #elif APR_HAS_MICROSOFT_LDAPSDK
+
+ /* Microsoft SDK use the registry certificate store - always
+ * assume support is always available
+ */
+ rc = LDAP_SUCCESS;
+
+ #else
+ rc = -1;
+ #endif /* APR_HAS_NETSCAPE_LDAPSDK */
+
+ #else /* not compiled with SSL Support */
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: Not built with SSL support." );
+ rc = -1;
+
+ #endif /* APR_HAS_LDAP_SSL */
+
+ if (LDAP_SUCCESS == rc)
+ {
+ st->ssl_support = 1;
+ }
+ else
+ {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "LDAP: SSL initialization failed");
+ st->ssl_support = 0;
+ }
+ }
+
+ /* The Microsoft SDK uses the registry certificate store -
+ * always assume support is available
+ */
+ #if APR_HAS_MICROSOFT_LDAPSDK
+ st->ssl_support = 1;
+ #endif
+
+
+ /* log SSL status - If SSL isn't available it isn't necessarily
+ * an error because the modules asking for LDAP connections
+ * may not ask for SSL support
+ */
+ if (st->ssl_support)
+ {
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: SSL support available" );
+ }
+ else
+ {
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: SSL support unavailable" );
+ }
+
+ return(OK);
+}
+
+static void util_ldap_child_init(apr_pool_t *p, server_rec *s)
+{
+ apr_status_t sts;
+ util_ldap_state_t *st = ap_get_module_config(s->module_config, &ldap_module);
+
+ if (!st->util_ldap_cache_lock) return;
+
+ sts = apr_global_mutex_child_init(&st->util_ldap_cache_lock, st->lock_file, p);
+ if (sts != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, sts, s,
+ "Failed to initialise global mutex %s in child process %"
+ APR_PID_T_FMT
+ ".",
+ st->lock_file, getpid());
+ return;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, s,
+ "Initialisation of global mutex %s in child process %"
+ APR_PID_T_FMT
+ " successful.",
+ st->lock_file, getpid());
+ }
+}
+
+command_rec util_ldap_cmds[] = {
+ AP_INIT_TAKE1("LDAPSharedCacheSize", util_ldap_set_cache_bytes, NULL, RSRC_CONF,
+ "Sets the size of the shared memory cache in bytes. "
+ "Zero means disable the shared memory cache. Defaults to 100KB."),
+
+ AP_INIT_TAKE1("LDAPSharedCacheFile", util_ldap_set_cache_file, NULL, RSRC_CONF,
+ "Sets the file of the shared memory cache."
+ "Nothing means disable the shared memory cache."),
+
+ AP_INIT_TAKE1("LDAPCacheEntries", util_ldap_set_cache_entries, NULL, RSRC_CONF,
+ "Sets the maximum number of entries that are possible in the LDAP "
+ "search cache. "
+ "Zero means no limit; -1 disables the cache. Defaults to 1024 entries."),
+
+ AP_INIT_TAKE1("LDAPCacheTTL", util_ldap_set_cache_ttl, NULL, RSRC_CONF,
+ "Sets the maximum time (in seconds) that an item can be cached in the LDAP "
+ "search cache. Zero means no limit. Defaults to 600 seconds (10 minutes)."),
+
+ AP_INIT_TAKE1("LDAPOpCacheEntries", util_ldap_set_opcache_entries, NULL, RSRC_CONF,
+ "Sets the maximum number of entries that are possible in the LDAP "
+ "compare cache. "
+ "Zero means no limit; -1 disables the cache. Defaults to 1024 entries."),
+
+ AP_INIT_TAKE1("LDAPOpCacheTTL", util_ldap_set_opcache_ttl, NULL, RSRC_CONF,
+ "Sets the maximum time (in seconds) that an item is cached in the LDAP "
+ "operation cache. Zero means no limit. Defaults to 600 seconds (10 minutes)."),
+
+ AP_INIT_TAKE1("LDAPTrustedCA", util_ldap_set_cert_auth, NULL, RSRC_CONF,
+ "Sets the file containing the trusted Certificate Authority certificate. "
+ "Used to validate the LDAP server certificate for SSL connections."),
+
+ AP_INIT_TAKE1("LDAPTrustedCAType", util_ldap_set_cert_type, NULL, RSRC_CONF,
+ "Specifies the type of the Certificate Authority file. "
+ "The following types are supported: "
+ " DER_FILE - file in binary DER format "
+ " BASE64_FILE - file in Base64 format "
+ " CERT7_DB_PATH - Netscape certificate database file "),
+
+ AP_INIT_TAKE1("LDAPConnectionTimeout", util_ldap_set_connection_timeout, NULL, RSRC_CONF,
+ "Specifies the LDAP socket connection timeout in seconds. "
+ "Default is 10 seconds. "),
+
+ {NULL}
+};
+
+static void util_ldap_register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(util_ldap_post_config,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_handler(util_ldap_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_child_init(util_ldap_child_init, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module ldap_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ util_ldap_create_config, /* server config */
+ NULL, /* merge server config */
+ util_ldap_cmds, /* command table */
+ util_ldap_register_hooks, /* set up request processing hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.def b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.def
new file mode 100644
index 00000000..f3ca3264
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.def
@@ -0,0 +1,7 @@
+EXPORT ldap_module
+EXPORT util_ldap_connection_find
+EXPORT util_ldap_connection_close
+EXPORT util_ldap_cache_checkuserid
+EXPORT util_ldap_cache_getuserdn
+EXPORT util_ldap_cache_compare
+EXPORT util_ldap_cache_comparedn
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.dsp
new file mode 100644
index 00000000..ad33d824
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.dsp
@@ -0,0 +1,140 @@
+# Microsoft Developer Studio Project File - Name="util_ldap" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=util_ldap - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "util_ldap.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "util_ldap.mak" CFG="util_ldap - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "util_ldap - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "util_ldap - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "util_ldap - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "LDAP_DECLARE_EXPORT" /Fd"Release\util_ldap_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/util_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,util_ldap.so
+# ADD LINK32 kernel32.lib wldap32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/util_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,util_ldap.so /opt:ref
+
+!ELSEIF "$(CFG)" == "util_ldap - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "LDAP_DECLARE_EXPORT" /Fd"Debug\util_ldap_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/util_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,util_ldap.so
+# ADD LINK32 kernel32.lib wldap32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/util_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,util_ldap.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "util_ldap - Win32 Release"
+# Name "util_ldap - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\util_ldap.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\util_ldap.rc
+# End Source File
+# Begin Source File
+
+SOURCE=.\util_ldap_cache.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\util_ldap_cache.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\util_ldap_cache_mgr.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "util_ldap - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\util_ldap.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk util_ldap.so "LDAP Utility Module for Apache" ../../include/ap_release.h > .\util_ldap.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "util_ldap - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\util_ldap.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk util_ldap.so "LDAP Utility Module for Apache" ../../include/ap_release.h > .\util_ldap.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.c b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.c
new file mode 100644
index 00000000..8f6062bb
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.c
@@ -0,0 +1,450 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * util_ldap_cache.c: LDAP cache things
+ *
+ * Original code from auth_ldap module for Apache v1.3:
+ * Copyright 1998, 1999 Enbridge Pipelines Inc.
+ * Copyright 1999-2001 Dave Carrigan
+ */
+
+#include <apr_ldap.h>
+#include <apr_strings.h>
+#include "util_ldap.h"
+#include "util_ldap_cache.h"
+
+#ifdef APU_HAS_LDAP
+
+#if APR_HAS_SHARED_MEMORY
+#define MODLDAP_SHMEM_CACHE "/tmp/mod_ldap_cache"
+#endif
+
+/* ------------------------------------------------------------------ */
+
+unsigned long util_ldap_url_node_hash(void *n)
+{
+ util_url_node_t *node = (util_url_node_t *)n;
+ return util_ald_hash_string(1, node->url);
+}
+
+int util_ldap_url_node_compare(void *a, void *b)
+{
+ util_url_node_t *na = (util_url_node_t *)a;
+ util_url_node_t *nb = (util_url_node_t *)b;
+
+ return(strcmp(na->url, nb->url) == 0);
+}
+
+void *util_ldap_url_node_copy(util_ald_cache_t *cache, void *c)
+{
+ util_url_node_t *n = (util_url_node_t *)c;
+ util_url_node_t *node = (util_url_node_t *)util_ald_alloc(cache, sizeof(util_url_node_t));
+
+ if (node) {
+ if (!(node->url = util_ald_strdup(cache, n->url))) {
+ util_ald_free(cache, node->url);
+ return NULL;
+ }
+ node->search_cache = n->search_cache;
+ node->compare_cache = n->compare_cache;
+ node->dn_compare_cache = n->dn_compare_cache;
+ return node;
+ }
+ else {
+ return NULL;
+ }
+}
+
+void util_ldap_url_node_free(util_ald_cache_t *cache, void *n)
+{
+ util_url_node_t *node = (util_url_node_t *)n;
+
+ util_ald_free(cache, node->url);
+ util_ald_destroy_cache(node->search_cache);
+ util_ald_destroy_cache(node->compare_cache);
+ util_ald_destroy_cache(node->dn_compare_cache);
+ util_ald_free(cache, node);
+}
+
+void util_ldap_url_node_display(request_rec *r, util_ald_cache_t *cache, void *n)
+{
+ util_url_node_t *node = (util_url_node_t *)n;
+ char date_str[APR_CTIME_LEN+1];
+ char *buf;
+ const char *type_str;
+ util_ald_cache_t *cache_node;
+ int x;
+
+ for (x=0;x<3;x++) {
+ switch (x) {
+ case 0:
+ cache_node = node->search_cache;
+ type_str = "Searches";
+ break;
+ case 1:
+ cache_node = node->compare_cache;
+ type_str = "Compares";
+ break;
+ case 2:
+ cache_node = node->dn_compare_cache;
+ type_str = "DN Compares";
+ break;
+ }
+
+ if (cache_node->marktime) {
+ apr_ctime(date_str, cache_node->marktime);
+ }
+ else
+ date_str[0] = 0;
+
+ buf = apr_psprintf(r->pool,
+ "<tr valign='top'>"
+ "<td nowrap>%s (%s)</td>"
+ "<td nowrap>%ld</td>"
+ "<td nowrap>%ld</td>"
+ "<td nowrap>%ld</td>"
+ "<td nowrap>%ld</td>"
+ "<td nowrap>%s</td>"
+ "<tr>",
+ node->url,
+ type_str,
+ cache_node->size,
+ cache_node->maxentries,
+ cache_node->numentries,
+ cache_node->fullmark,
+ date_str);
+
+ ap_rputs(buf, r);
+ }
+
+}
+
+/* ------------------------------------------------------------------ */
+
+/* Cache functions for search nodes */
+unsigned long util_ldap_search_node_hash(void *n)
+{
+ util_search_node_t *node = (util_search_node_t *)n;
+ return util_ald_hash_string(1, ((util_search_node_t *)(node))->username);
+}
+
+int util_ldap_search_node_compare(void *a, void *b)
+{
+ return(strcmp(((util_search_node_t *)a)->username,
+ ((util_search_node_t *)b)->username) == 0);
+}
+
+void *util_ldap_search_node_copy(util_ald_cache_t *cache, void *c)
+{
+ util_search_node_t *node = (util_search_node_t *)c;
+ util_search_node_t *newnode = util_ald_alloc(cache, sizeof(util_search_node_t));
+
+ /* safety check */
+ if (newnode) {
+
+ /* copy vals */
+ if (node->vals) {
+ int k = node->numvals;
+ int i = 0;
+ if (!(newnode->vals = util_ald_alloc(cache, sizeof(char *) * (k+1)))) {
+ util_ldap_search_node_free(cache, newnode);
+ return NULL;
+ }
+ newnode->numvals = node->numvals;
+ for (;k;k--) {
+ if (node->vals[i]) {
+ if (!(newnode->vals[i] = util_ald_strdup(cache, node->vals[i]))) {
+ util_ldap_search_node_free(cache, newnode);
+ return NULL;
+ }
+ }
+ else
+ newnode->vals[i] = NULL;
+ i++;
+ }
+ }
+ else {
+ newnode->vals = NULL;
+ }
+ if (!(newnode->username = util_ald_strdup(cache, node->username)) ||
+ !(newnode->dn = util_ald_strdup(cache, node->dn)) ) {
+ util_ldap_search_node_free(cache, newnode);
+ return NULL;
+ }
+ if(node->bindpw) {
+ if(!(newnode->bindpw = util_ald_strdup(cache, node->bindpw))) {
+ util_ldap_search_node_free(cache, newnode);
+ return NULL;
+ }
+ } else {
+ newnode->bindpw = NULL;
+ }
+ newnode->lastbind = node->lastbind;
+
+ }
+ return (void *)newnode;
+}
+
+void util_ldap_search_node_free(util_ald_cache_t *cache, void *n)
+{
+ int i = 0;
+ util_search_node_t *node = (util_search_node_t *)n;
+ int k = node->numvals;
+
+ if (node->vals) {
+ for (;k;k--,i++) {
+ if (node->vals[i]) {
+ util_ald_free(cache, node->vals[i]);
+ }
+ }
+ util_ald_free(cache, node->vals);
+ }
+ util_ald_free(cache, node->username);
+ util_ald_free(cache, node->dn);
+ util_ald_free(cache, node->bindpw);
+ util_ald_free(cache, node);
+}
+
+void util_ldap_search_node_display(request_rec *r, util_ald_cache_t *cache, void *n)
+{
+ util_search_node_t *node = (util_search_node_t *)n;
+ char date_str[APR_CTIME_LEN+1];
+ char *buf;
+
+ apr_ctime(date_str, node->lastbind);
+
+ buf = apr_psprintf(r->pool,
+ "<tr valign='top'>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<tr>",
+ node->username,
+ node->dn,
+ date_str);
+
+ ap_rputs(buf, r);
+}
+
+/* ------------------------------------------------------------------ */
+
+unsigned long util_ldap_compare_node_hash(void *n)
+{
+ util_compare_node_t *node = (util_compare_node_t *)n;
+ return util_ald_hash_string(3, node->dn, node->attrib, node->value);
+}
+
+int util_ldap_compare_node_compare(void *a, void *b)
+{
+ util_compare_node_t *na = (util_compare_node_t *)a;
+ util_compare_node_t *nb = (util_compare_node_t *)b;
+ return (strcmp(na->dn, nb->dn) == 0 &&
+ strcmp(na->attrib, nb->attrib) == 0 &&
+ strcmp(na->value, nb->value) == 0);
+}
+
+void *util_ldap_compare_node_copy(util_ald_cache_t *cache, void *c)
+{
+ util_compare_node_t *n = (util_compare_node_t *)c;
+ util_compare_node_t *node = (util_compare_node_t *)util_ald_alloc(cache, sizeof(util_compare_node_t));
+
+ if (node) {
+ if (!(node->dn = util_ald_strdup(cache, n->dn)) ||
+ !(node->attrib = util_ald_strdup(cache, n->attrib)) ||
+ !(node->value = util_ald_strdup(cache, n->value))) {
+ util_ldap_compare_node_free(cache, node);
+ return NULL;
+ }
+ node->lastcompare = n->lastcompare;
+ node->result = n->result;
+ return node;
+ }
+ else {
+ return NULL;
+ }
+}
+
+void util_ldap_compare_node_free(util_ald_cache_t *cache, void *n)
+{
+ util_compare_node_t *node = (util_compare_node_t *)n;
+ util_ald_free(cache, node->dn);
+ util_ald_free(cache, node->attrib);
+ util_ald_free(cache, node->value);
+ util_ald_free(cache, node);
+}
+
+void util_ldap_compare_node_display(request_rec *r, util_ald_cache_t *cache, void *n)
+{
+ util_compare_node_t *node = (util_compare_node_t *)n;
+ char date_str[APR_CTIME_LEN+1];
+ char *buf, *cmp_result;
+
+ apr_ctime(date_str, node->lastcompare);
+
+ if (node->result == LDAP_COMPARE_TRUE) {
+ cmp_result = "LDAP_COMPARE_TRUE";
+ }
+ else if (node->result == LDAP_COMPARE_FALSE) {
+ cmp_result = "LDAP_COMPARE_FALSE";
+ }
+ else {
+ cmp_result = apr_itoa(r->pool, node->result);
+ }
+
+ buf = apr_psprintf(r->pool,
+ "<tr valign='top'>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<tr>",
+ node->dn,
+ node->attrib,
+ node->value,
+ date_str,
+ cmp_result);
+
+ ap_rputs(buf, r);
+}
+
+/* ------------------------------------------------------------------ */
+
+unsigned long util_ldap_dn_compare_node_hash(void *n)
+{
+ return util_ald_hash_string(1, ((util_dn_compare_node_t *)n)->reqdn);
+}
+
+int util_ldap_dn_compare_node_compare(void *a, void *b)
+{
+ return (strcmp(((util_dn_compare_node_t *)a)->reqdn,
+ ((util_dn_compare_node_t *)b)->reqdn) == 0);
+}
+
+void *util_ldap_dn_compare_node_copy(util_ald_cache_t *cache, void *c)
+{
+ util_dn_compare_node_t *n = (util_dn_compare_node_t *)c;
+ util_dn_compare_node_t *node = (util_dn_compare_node_t *)util_ald_alloc(cache, sizeof(util_dn_compare_node_t));
+ if (node) {
+ if (!(node->reqdn = util_ald_strdup(cache, n->reqdn)) ||
+ !(node->dn = util_ald_strdup(cache, n->dn))) {
+ util_ldap_dn_compare_node_free(cache, node);
+ return NULL;
+ }
+ return node;
+ }
+ else {
+ return NULL;
+ }
+}
+
+void util_ldap_dn_compare_node_free(util_ald_cache_t *cache, void *n)
+{
+ util_dn_compare_node_t *node = (util_dn_compare_node_t *)n;
+ util_ald_free(cache, node->reqdn);
+ util_ald_free(cache, node->dn);
+ util_ald_free(cache, node);
+}
+
+void util_ldap_dn_compare_node_display(request_rec *r, util_ald_cache_t *cache, void *n)
+{
+ util_dn_compare_node_t *node = (util_dn_compare_node_t *)n;
+ char *buf;
+
+ buf = apr_psprintf(r->pool,
+ "<tr valign='top'>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<tr>",
+ node->reqdn,
+ node->dn);
+
+ ap_rputs(buf, r);
+}
+
+
+/* ------------------------------------------------------------------ */
+apr_status_t util_ldap_cache_child_kill(void *data);
+apr_status_t util_ldap_cache_module_kill(void *data);
+
+apr_status_t util_ldap_cache_module_kill(void *data)
+{
+ util_ldap_state_t *st = (util_ldap_state_t *)data;
+
+ util_ald_destroy_cache(st->util_ldap_cache);
+#if APR_HAS_SHARED_MEMORY
+ if (st->cache_rmm != NULL) {
+ apr_rmm_destroy (st->cache_rmm);
+ st->cache_rmm = NULL;
+ }
+ if (st->cache_shm != NULL) {
+ apr_status_t result = apr_shm_destroy(st->cache_shm);
+ st->cache_shm = NULL;
+ apr_file_remove(st->cache_file, st->pool);
+ return result;
+ }
+#endif
+ return APR_SUCCESS;
+}
+
+apr_status_t util_ldap_cache_init(apr_pool_t *pool, util_ldap_state_t *st)
+{
+#if APR_HAS_SHARED_MEMORY
+ apr_status_t result;
+ apr_size_t size;
+
+ size = APR_ALIGN_DEFAULT(st->cache_bytes);
+
+ result = apr_shm_create(&st->cache_shm, size, st->cache_file, st->pool);
+ if (result == APR_EEXIST) {
+ /*
+ * The cache could have already been created (i.e. we may be a child process). See
+ * if we can attach to the existing shared memory
+ */
+ result = apr_shm_attach(&st->cache_shm, st->cache_file, st->pool);
+ }
+ if (result != APR_SUCCESS) {
+ return result;
+ }
+
+ /* Determine the usable size of the shm segment. */
+ size = apr_shm_size_get(st->cache_shm);
+
+ /* This will create a rmm "handler" to get into the shared memory area */
+ result = apr_rmm_init(&st->cache_rmm, NULL,
+ apr_shm_baseaddr_get(st->cache_shm), size,
+ st->pool);
+ if (result != APR_SUCCESS) {
+ return result;
+ }
+
+#endif
+
+ apr_pool_cleanup_register(st->pool, st , util_ldap_cache_module_kill, apr_pool_cleanup_null);
+
+ st->util_ldap_cache =
+ util_ald_create_cache(st,
+ util_ldap_url_node_hash,
+ util_ldap_url_node_compare,
+ util_ldap_url_node_copy,
+ util_ldap_url_node_free,
+ util_ldap_url_node_display);
+ return APR_SUCCESS;
+}
+
+
+#endif /* APU_HAS_LDAP */
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.h b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.h
new file mode 100644
index 00000000..2c1c09c1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.h
@@ -0,0 +1,193 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef APU_LDAP_CACHE_H
+#define APU_LDAP_CACHE_H
+
+/*
+ * This switches LDAP support on or off.
+ */
+
+/* this whole thing disappears if LDAP is not enabled */
+#ifdef APU_HAS_LDAP
+
+
+/*
+ * LDAP Cache Manager
+ */
+
+#if APR_HAS_SHARED_MEMORY
+#include <apr_shm.h>
+#include <apr_rmm.h> /* EDD */
+#endif
+
+typedef struct util_cache_node_t {
+ void *payload; /* Pointer to the payload */
+ apr_time_t add_time; /* Time node was added to cache */
+ struct util_cache_node_t *next;
+} util_cache_node_t;
+
+typedef struct util_ald_cache util_ald_cache_t;
+
+struct util_ald_cache {
+ unsigned long size; /* Size of cache array */
+ unsigned long maxentries; /* Maximum number of cache entries */
+ unsigned long numentries; /* Current number of cache entries */
+ unsigned long fullmark; /* Used to keep track of when cache becomes 3/4 full */
+ apr_time_t marktime; /* Time that the cache became 3/4 full */
+ unsigned long (*hash)(void *); /* Func to hash the payload */
+ int (*compare)(void *, void *); /* Func to compare two payloads */
+ void * (*copy)(util_ald_cache_t *cache, void *); /* Func to alloc mem and copy payload to new mem */
+ void (*free)(util_ald_cache_t *cache, void *); /* Func to free mem used by the payload */
+ void (*display)(request_rec *r, util_ald_cache_t *cache, void *); /* Func to display the payload contents */
+ util_cache_node_t **nodes;
+
+ unsigned long numpurges; /* No. of times the cache has been purged */
+ double avg_purgetime; /* Average time to purge the cache */
+ apr_time_t last_purge; /* Time of the last purge */
+ unsigned long npurged; /* Number of elements purged in last purge. This is not
+ obvious: it won't be 3/4 the size of the cache if
+ there were a lot of expired entries. */
+
+ unsigned long fetches; /* Number of fetches */
+ unsigned long hits; /* Number of cache hits */
+ unsigned long inserts; /* Number of inserts */
+ unsigned long removes; /* Number of removes */
+
+#if APR_HAS_SHARED_MEMORY
+ apr_shm_t *shm_addr;
+ apr_rmm_t *rmm_addr;
+#endif
+
+};
+
+#ifndef WIN32
+#define ALD_MM_FILE_MODE ( S_IRUSR|S_IWUSR )
+#else
+#define ALD_MM_FILE_MODE ( _S_IREAD|_S_IWRITE )
+#endif
+
+
+/*
+ * LDAP Cache
+ */
+
+/*
+ * Maintain a cache of LDAP URLs that the server handles. Each node in
+ * the cache contains the search cache for that URL, and a compare cache
+ * for the URL. The compare cash is populated when doing require group
+ * compares.
+ */
+typedef struct util_url_node_t {
+ const char *url;
+ util_ald_cache_t *search_cache;
+ util_ald_cache_t *compare_cache;
+ util_ald_cache_t *dn_compare_cache;
+} util_url_node_t;
+
+/*
+ * We cache every successful search and bind operation, using the username
+ * as the key. Each node in the cache contains the returned DN, plus the
+ * password used to bind.
+ */
+typedef struct util_search_node_t {
+ const char *username; /* Cache key */
+ const char *dn; /* DN returned from search */
+ const char *bindpw; /* The most recently used bind password;
+ NULL if the bind failed */
+ apr_time_t lastbind; /* Time of last successful bind */
+ const char **vals; /* Values of queried attributes */
+ int numvals; /* Number of queried attributes */
+} util_search_node_t;
+
+/*
+ * We cache every successful compare operation, using the DN, attrib, and
+ * value as the key.
+ */
+typedef struct util_compare_node_t {
+ const char *dn; /* DN, attrib and value combine to be the key */
+ const char *attrib;
+ const char *value;
+ apr_time_t lastcompare;
+ int result;
+} util_compare_node_t;
+
+/*
+ * We cache every successful compare dn operation, using the dn in the require
+ * statement and the dn fetched based on the client-provided username.
+ */
+typedef struct util_dn_compare_node_t {
+ const char *reqdn; /* The DN in the require dn statement */
+ const char *dn; /* The DN found in the search */
+} util_dn_compare_node_t;
+
+
+/*
+ * Function prototypes for LDAP cache
+ */
+
+/* util_ldap_cache.c */
+unsigned long util_ldap_url_node_hash(void *n);
+int util_ldap_url_node_compare(void *a, void *b);
+void *util_ldap_url_node_copy(util_ald_cache_t *cache, void *c);
+void util_ldap_url_node_free(util_ald_cache_t *cache, void *n);
+void util_ldap_url_node_display(request_rec *r, util_ald_cache_t *cache, void *n);
+
+unsigned long util_ldap_search_node_hash(void *n);
+int util_ldap_search_node_compare(void *a, void *b);
+void *util_ldap_search_node_copy(util_ald_cache_t *cache, void *c);
+void util_ldap_search_node_free(util_ald_cache_t *cache, void *n);
+void util_ldap_search_node_display(request_rec *r, util_ald_cache_t *cache, void *n);
+
+unsigned long util_ldap_compare_node_hash(void *n);
+int util_ldap_compare_node_compare(void *a, void *b);
+void *util_ldap_compare_node_copy(util_ald_cache_t *cache, void *c);
+void util_ldap_compare_node_free(util_ald_cache_t *cache, void *n);
+void util_ldap_compare_node_display(request_rec *r, util_ald_cache_t *cache, void *n);
+
+unsigned long util_ldap_dn_compare_node_hash(void *n);
+int util_ldap_dn_compare_node_compare(void *a, void *b);
+void *util_ldap_dn_compare_node_copy(util_ald_cache_t *cache, void *c);
+void util_ldap_dn_compare_node_free(util_ald_cache_t *cache, void *n);
+void util_ldap_dn_compare_node_display(request_rec *r, util_ald_cache_t *cache, void *n);
+
+
+/* util_ldap_cache_mgr.c */
+
+/* Cache alloc and free function, dealing or not with shm */
+void util_ald_free(util_ald_cache_t *cache, const void *ptr);
+void *util_ald_alloc(util_ald_cache_t *cache, unsigned long size);
+const char *util_ald_strdup(util_ald_cache_t *cache, const char *s);
+
+/* Cache managing function */
+unsigned long util_ald_hash_string(int nstr, ...);
+void util_ald_cache_purge(util_ald_cache_t *cache);
+util_url_node_t *util_ald_create_caches(util_ldap_state_t *s, const char *url);
+util_ald_cache_t *util_ald_create_cache(util_ldap_state_t *st,
+ unsigned long (*hashfunc)(void *),
+ int (*comparefunc)(void *, void *),
+ void * (*copyfunc)(util_ald_cache_t *cache, void *),
+ void (*freefunc)(util_ald_cache_t *cache, void *),
+ void (*displayfunc)(request_rec *r, util_ald_cache_t *cache, void *));
+
+void util_ald_destroy_cache(util_ald_cache_t *cache);
+void *util_ald_cache_fetch(util_ald_cache_t *cache, void *payload);
+void *util_ald_cache_insert(util_ald_cache_t *cache, void *payload);
+void util_ald_cache_remove(util_ald_cache_t *cache, void *payload);
+char *util_ald_cache_display_stats(request_rec *r, util_ald_cache_t *cache, char *name, char *id);
+
+#endif /* APU_HAS_LDAP */
+#endif /* APU_LDAP_CACHE_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache_mgr.c b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache_mgr.c
new file mode 100644
index 00000000..178ac185
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache_mgr.c
@@ -0,0 +1,762 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * util_ldap_cache_mgr.c: LDAP cache manager things
+ *
+ * Original code from auth_ldap module for Apache v1.3:
+ * Copyright 1998, 1999 Enbridge Pipelines Inc.
+ * Copyright 1999-2001 Dave Carrigan
+ */
+
+#include <apr_ldap.h>
+#include "util_ldap.h"
+#include "util_ldap_cache.h"
+#include <apr_strings.h>
+
+#ifdef APU_HAS_LDAP
+
+/* only here until strdup is gone */
+#include <string.h>
+
+/* here till malloc is gone */
+#include <stdlib.h>
+
+static const unsigned long primes[] =
+{
+ 11,
+ 19,
+ 37,
+ 73,
+ 109,
+ 163,
+ 251,
+ 367,
+ 557,
+ 823,
+ 1237,
+ 1861,
+ 2777,
+ 4177,
+ 6247,
+ 9371,
+ 14057,
+ 21089,
+ 31627,
+ 47431,
+ 71143,
+ 106721,
+ 160073,
+ 240101,
+ 360163,
+ 540217,
+ 810343,
+ 1215497,
+ 1823231,
+ 2734867,
+ 4102283,
+ 6153409,
+ 9230113,
+ 13845163,
+ 0
+};
+
+void util_ald_free(util_ald_cache_t *cache, const void *ptr)
+{
+#if APR_HAS_SHARED_MEMORY
+ if (cache->rmm_addr) {
+ if (ptr)
+ /* Free in shared memory */
+ apr_rmm_free(cache->rmm_addr, apr_rmm_offset_get(cache->rmm_addr, (void *)ptr));
+ }
+ else {
+ if (ptr)
+ /* Cache shm is not used */
+ free((void *)ptr);
+ }
+#else
+ if (ptr)
+ free((void *)ptr);
+#endif
+}
+
+void *util_ald_alloc(util_ald_cache_t *cache, unsigned long size)
+{
+ if (0 == size)
+ return NULL;
+#if APR_HAS_SHARED_MEMORY
+ if (cache->rmm_addr) {
+ /* allocate from shared memory */
+ apr_rmm_off_t block = apr_rmm_calloc(cache->rmm_addr, size);
+ return block ? (void *)apr_rmm_addr_get(cache->rmm_addr, block) : NULL;
+ }
+ else {
+ /* Cache shm is not used */
+ return (void *)calloc(sizeof(char), size);
+ }
+#else
+ return (void *)calloc(sizeof(char), size);
+#endif
+}
+
+const char *util_ald_strdup(util_ald_cache_t *cache, const char *s)
+{
+#if APR_HAS_SHARED_MEMORY
+ if (cache->rmm_addr) {
+ /* allocate from shared memory */
+ apr_rmm_off_t block = apr_rmm_calloc(cache->rmm_addr, strlen(s)+1);
+ char *buf = block ? (char *)apr_rmm_addr_get(cache->rmm_addr, block) : NULL;
+ if (buf) {
+ strcpy(buf, s);
+ return buf;
+ }
+ else {
+ return NULL;
+ }
+ } else {
+ /* Cache shm is not used */
+ return strdup(s);
+ }
+#else
+ return strdup(s);
+#endif
+}
+
+
+/*
+ * Computes the hash on a set of strings. The first argument is the number
+ * of strings to hash, the rest of the args are strings.
+ * Algorithm taken from glibc.
+ */
+unsigned long util_ald_hash_string(int nstr, ...)
+{
+ int i;
+ va_list args;
+ unsigned long h=0, g;
+ char *str, *p;
+
+ va_start(args, nstr);
+ for (i=0; i < nstr; ++i) {
+ str = va_arg(args, char *);
+ for (p = str; *p; ++p) {
+ h = ( h << 4 ) + *p;
+ if ( ( g = h & 0xf0000000 ) ) {
+ h = h ^ (g >> 24);
+ h = h ^ g;
+ }
+ }
+ }
+ va_end(args);
+
+ return h;
+}
+
+
+/*
+ Purges a cache that has gotten full. We keep track of the time that we
+ added the entry that made the cache 3/4 full, then delete all entries
+ that were added before that time. It's pretty simplistic, but time to
+ purge is only O(n), which is more important.
+*/
+void util_ald_cache_purge(util_ald_cache_t *cache)
+{
+ unsigned long i;
+ util_cache_node_t *p, *q, **pp;
+ apr_time_t t;
+
+ if (!cache)
+ return;
+
+ cache->last_purge = apr_time_now();
+ cache->npurged = 0;
+ cache->numpurges++;
+
+ for (i=0; i < cache->size; ++i) {
+ pp = cache->nodes + i;
+ p = *pp;
+ while (p != NULL) {
+ if (p->add_time < cache->marktime) {
+ q = p->next;
+ (*cache->free)(cache, p->payload);
+ util_ald_free(cache, p);
+ cache->numentries--;
+ cache->npurged++;
+ p = *pp = q;
+ }
+ else {
+ pp = &(p->next);
+ p = *pp;
+ }
+ }
+ }
+
+ t = apr_time_now();
+ cache->avg_purgetime =
+ ((t - cache->last_purge) + (cache->avg_purgetime * (cache->numpurges-1))) /
+ cache->numpurges;
+}
+
+
+/*
+ * create caches
+ */
+util_url_node_t *util_ald_create_caches(util_ldap_state_t *st, const char *url)
+{
+ util_url_node_t curl, *newcurl = NULL;
+ util_ald_cache_t *search_cache;
+ util_ald_cache_t *compare_cache;
+ util_ald_cache_t *dn_compare_cache;
+
+ /* create the three caches */
+ search_cache = util_ald_create_cache(st,
+ util_ldap_search_node_hash,
+ util_ldap_search_node_compare,
+ util_ldap_search_node_copy,
+ util_ldap_search_node_free,
+ util_ldap_search_node_display);
+ compare_cache = util_ald_create_cache(st,
+ util_ldap_compare_node_hash,
+ util_ldap_compare_node_compare,
+ util_ldap_compare_node_copy,
+ util_ldap_compare_node_free,
+ util_ldap_compare_node_display);
+ dn_compare_cache = util_ald_create_cache(st,
+ util_ldap_dn_compare_node_hash,
+ util_ldap_dn_compare_node_compare,
+ util_ldap_dn_compare_node_copy,
+ util_ldap_dn_compare_node_free,
+ util_ldap_dn_compare_node_display);
+
+ /* check that all the caches initialised successfully */
+ if (search_cache && compare_cache && dn_compare_cache) {
+
+ /* The contents of this structure will be duplicated in shared
+ memory during the insert. So use stack memory rather than
+ pool memory to avoid a memory leak. */
+ memset (&curl, 0, sizeof(util_url_node_t));
+ curl.url = url;
+ curl.search_cache = search_cache;
+ curl.compare_cache = compare_cache;
+ curl.dn_compare_cache = dn_compare_cache;
+
+ newcurl = util_ald_cache_insert(st->util_ldap_cache, &curl);
+
+ }
+
+ return newcurl;
+}
+
+
+util_ald_cache_t *util_ald_create_cache(util_ldap_state_t *st,
+ unsigned long (*hashfunc)(void *),
+ int (*comparefunc)(void *, void *),
+ void * (*copyfunc)(util_ald_cache_t *cache, void *),
+ void (*freefunc)(util_ald_cache_t *cache, void *),
+ void (*displayfunc)(request_rec *r, util_ald_cache_t *cache, void *))
+{
+ util_ald_cache_t *cache;
+ unsigned long i;
+
+ if (st->search_cache_size <= 0)
+ return NULL;
+
+#if APR_HAS_SHARED_MEMORY
+ if (!st->cache_rmm) {
+ return NULL;
+ }
+ else {
+ apr_rmm_off_t block = apr_rmm_calloc(st->cache_rmm, sizeof(util_ald_cache_t));
+ cache = block ? (util_ald_cache_t *)apr_rmm_addr_get(st->cache_rmm, block) : NULL;
+ }
+#else
+ cache = (util_ald_cache_t *)calloc(sizeof(util_ald_cache_t), 1);
+#endif
+ if (!cache)
+ return NULL;
+
+#if APR_HAS_SHARED_MEMORY
+ cache->rmm_addr = st->cache_rmm;
+ cache->shm_addr = st->cache_shm;
+#endif
+ cache->maxentries = st->search_cache_size;
+ cache->numentries = 0;
+ cache->size = st->search_cache_size / 3;
+ if (cache->size < 64) cache->size = 64;
+ for (i = 0; primes[i] && primes[i] < cache->size; ++i) ;
+ cache->size = primes[i]? primes[i] : primes[i-1];
+
+ cache->nodes = (util_cache_node_t **)util_ald_alloc(cache, cache->size * sizeof(util_cache_node_t *));
+ if (!cache->nodes) {
+ util_ald_free(cache, cache);
+ return NULL;
+ }
+
+ for (i=0; i < cache->size; ++i)
+ cache->nodes[i] = NULL;
+
+ cache->hash = hashfunc;
+ cache->compare = comparefunc;
+ cache->copy = copyfunc;
+ cache->free = freefunc;
+ cache->display = displayfunc;
+
+ cache->fullmark = cache->maxentries / 4 * 3;
+ cache->marktime = 0;
+ cache->avg_purgetime = 0.0;
+ cache->numpurges = 0;
+ cache->last_purge = 0;
+ cache->npurged = 0;
+
+ cache->fetches = 0;
+ cache->hits = 0;
+ cache->inserts = 0;
+ cache->removes = 0;
+
+ return cache;
+}
+
+void util_ald_destroy_cache(util_ald_cache_t *cache)
+{
+ unsigned long i;
+ util_cache_node_t *p, *q;
+
+ if (cache == NULL)
+ return;
+
+ for (i = 0; i < cache->size; ++i) {
+ p = cache->nodes[i];
+ q = NULL;
+ while (p != NULL) {
+ q = p->next;
+ (*cache->free)(cache, p->payload);
+ util_ald_free(cache, p);
+ p = q;
+ }
+ }
+ util_ald_free(cache, cache->nodes);
+ util_ald_free(cache, cache);
+}
+
+void *util_ald_cache_fetch(util_ald_cache_t *cache, void *payload)
+{
+ int hashval;
+ util_cache_node_t *p;
+
+ if (cache == NULL)
+ return NULL;
+
+ cache->fetches++;
+
+ hashval = (*cache->hash)(payload) % cache->size;
+ for (p = cache->nodes[hashval];
+ p && !(*cache->compare)(p->payload, payload);
+ p = p->next) ;
+
+ if (p != NULL) {
+ cache->hits++;
+ return p->payload;
+ }
+ else {
+ return NULL;
+ }
+}
+
+/*
+ * Insert an item into the cache.
+ * *** Does not catch duplicates!!! ***
+ */
+void *util_ald_cache_insert(util_ald_cache_t *cache, void *payload)
+{
+ int hashval;
+ util_cache_node_t *node;
+
+ /* sanity check */
+ if (cache == NULL || payload == NULL) {
+ return NULL;
+ }
+
+ /* check if we are full - if so, try purge */
+ if (cache->numentries >= cache->maxentries) {
+ util_ald_cache_purge(cache);
+ if (cache->numentries >= cache->maxentries) {
+ /* if the purge was not effective, we leave now to avoid an overflow */
+ return NULL;
+ }
+ }
+
+ /* should be safe to add an entry */
+ if ((node = (util_cache_node_t *)util_ald_alloc(cache, sizeof(util_cache_node_t))) == NULL) {
+ return NULL;
+ }
+
+ /* Take a copy of the payload before proceeeding. */
+ payload = (*cache->copy)(cache, payload);
+ if (!payload) {
+ util_ald_free(cache, node);
+ return NULL;
+ }
+
+ /* populate the entry */
+ cache->inserts++;
+ hashval = (*cache->hash)(payload) % cache->size;
+ node->add_time = apr_time_now();
+ node->payload = payload;
+ node->next = cache->nodes[hashval];
+ cache->nodes[hashval] = node;
+
+ /* if we reach the full mark, note the time we did so
+ * for the benefit of the purge function
+ */
+ if (++cache->numentries == cache->fullmark) {
+ cache->marktime=apr_time_now();
+ }
+
+ return node->payload;
+}
+
+void util_ald_cache_remove(util_ald_cache_t *cache, void *payload)
+{
+ int hashval;
+ util_cache_node_t *p, *q;
+
+ if (cache == NULL)
+ return;
+
+ cache->removes++;
+ hashval = (*cache->hash)(payload) % cache->size;
+ for (p = cache->nodes[hashval], q=NULL;
+ p && !(*cache->compare)(p->payload, payload);
+ p = p->next) {
+ q = p;
+ }
+
+ /* If p is null, it means that we couldn't find the node, so just return */
+ if (p == NULL)
+ return;
+
+ if (q == NULL) {
+ /* We found the node, and it's the first in the list */
+ cache->nodes[hashval] = p->next;
+ }
+ else {
+ /* We found the node and it's not the first in the list */
+ q->next = p->next;
+ }
+ (*cache->free)(cache, p->payload);
+ util_ald_free(cache, p);
+ cache->numentries--;
+}
+
+char *util_ald_cache_display_stats(request_rec *r, util_ald_cache_t *cache, char *name, char *id)
+{
+ unsigned long i;
+ int totchainlen = 0;
+ int nchains = 0;
+ double chainlen;
+ util_cache_node_t *n;
+ char *buf, *buf2;
+ apr_pool_t *p = r->pool;
+
+ if (cache == NULL) {
+ return "";
+ }
+
+ for (i=0; i < cache->size; ++i) {
+ if (cache->nodes[i] != NULL) {
+ nchains++;
+ for (n = cache->nodes[i];
+ n != NULL && n != n->next;
+ n = n->next) {
+ totchainlen++;
+ }
+ }
+ }
+ chainlen = nchains? (double)totchainlen / (double)nchains : 0;
+
+ if (id) {
+ buf2 = apr_psprintf(p,
+ "<a href=\"%s?%s\">%s</a>",
+ r->uri,
+ id,
+ name);
+ }
+ else {
+ buf2 = name;
+ }
+
+ buf = apr_psprintf(p,
+ "<tr valign='top'>"
+ "<td nowrap>%s</td>"
+ "<td align='right' nowrap>%lu (%.0f%% full)</td>"
+ "<td align='right'>%.1f</td>"
+ "<td align='right'>%lu/%lu</td>"
+ "<td align='right'>%.0f%%</td>"
+ "<td align='right'>%lu/%lu</td>",
+ buf2,
+ cache->numentries,
+ (double)cache->numentries / (double)cache->maxentries * 100.0,
+ chainlen,
+ cache->hits,
+ cache->fetches,
+ (cache->fetches > 0 ? (double)(cache->hits) / (double)(cache->fetches) * 100.0 : 100.0),
+ cache->inserts,
+ cache->removes);
+
+ if (cache->numpurges) {
+ char str_ctime[APR_CTIME_LEN];
+
+ apr_ctime(str_ctime, cache->last_purge);
+ buf = apr_psprintf(p,
+ "%s"
+ "<td align='right'>%lu</td>\n"
+ "<td align='right' nowrap>%s</td>\n",
+ buf,
+ cache->numpurges,
+ str_ctime);
+ }
+ else {
+ buf = apr_psprintf(p,
+ "%s<td colspan='2' align='center'>(none)</td>\n",
+ buf);
+ }
+
+ buf = apr_psprintf(p, "%s<td align='right'>%.2g</td>\n</tr>", buf, cache->avg_purgetime);
+
+ return buf;
+}
+
+char *util_ald_cache_display(request_rec *r, util_ldap_state_t *st)
+{
+ unsigned long i,j;
+ char *buf, *t1, *t2, *t3;
+ char *id1, *id2, *id3;
+ char *argfmt = "cache=%s&id=%d&off=%d";
+ char *scanfmt = "cache=%4s&id=%u&off=%u%1s";
+ apr_pool_t *pool = r->pool;
+ util_cache_node_t *p = NULL;
+ util_url_node_t *n = NULL;
+
+ util_ald_cache_t *util_ldap_cache = st->util_ldap_cache;
+
+
+ if (!util_ldap_cache) {
+ return "<tr valign='top'><td nowrap colspan=7>Cache has not been enabled/initialised.</td></tr>";
+ }
+
+ if (r->args && strlen(r->args)) {
+ char cachetype[5], lint[2];
+ unsigned int id, off;
+ char date_str[APR_CTIME_LEN+1];
+
+ if ((3 == sscanf(r->args, scanfmt, cachetype, &id, &off, lint)) &&
+ (id < util_ldap_cache->size)) {
+
+ if ((p = util_ldap_cache->nodes[id]) != NULL) {
+ n = (util_url_node_t *)p->payload;
+ buf = (char*)n->url;
+ }
+ else {
+ buf = "";
+ }
+
+ ap_rputs(apr_psprintf(r->pool,
+ "<p>\n"
+ "<table border='0'>\n"
+ "<tr>\n"
+ "<td bgcolor='#000000'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Cache Name:</b></font></td>"
+ "<td bgcolor='#ffffff'><font size='-1' face='Arial,Helvetica' color='#000000'><b>%s (%s)</b></font></td>"
+ "</tr>\n"
+ "</table>\n</p>\n",
+ buf,
+ cachetype[0] == 'm'? "Main" :
+ (cachetype[0] == 's' ? "Search" :
+ (cachetype[0] == 'c' ? "Compares" : "DNCompares"))), r);
+
+ switch (cachetype[0]) {
+ case 'm':
+ if (util_ldap_cache->marktime) {
+ apr_ctime(date_str, util_ldap_cache->marktime);
+ }
+ else
+ date_str[0] = 0;
+
+ ap_rputs(apr_psprintf(r->pool,
+ "<p>\n"
+ "<table border='0'>\n"
+ "<tr>\n"
+ "<td bgcolor='#000000'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Size:</b></font></td>"
+ "<td bgcolor='#ffffff'><font size='-1' face='Arial,Helvetica' color='#000000'><b>%ld</b></font></td>"
+ "</tr>\n"
+ "<tr>\n"
+ "<td bgcolor='#000000'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Max Entries:</b></font></td>"
+ "<td bgcolor='#ffffff'><font size='-1' face='Arial,Helvetica' color='#000000'><b>%ld</b></font></td>"
+ "</tr>\n"
+ "<tr>\n"
+ "<td bgcolor='#000000'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b># Entries:</b></font></td>"
+ "<td bgcolor='#ffffff'><font size='-1' face='Arial,Helvetica' color='#000000'><b>%ld</b></font></td>"
+ "</tr>\n"
+ "<tr>\n"
+ "<td bgcolor='#000000'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Full Mark:</b></font></td>"
+ "<td bgcolor='#ffffff'><font size='-1' face='Arial,Helvetica' color='#000000'><b>%ld</b></font></td>"
+ "</tr>\n"
+ "<tr>\n"
+ "<td bgcolor='#000000'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Full Mark Time:</b></font></td>"
+ "<td bgcolor='#ffffff'><font size='-1' face='Arial,Helvetica' color='#000000'><b>%s</b></font></td>"
+ "</tr>\n"
+ "</table>\n</p>\n",
+ util_ldap_cache->size,
+ util_ldap_cache->maxentries,
+ util_ldap_cache->numentries,
+ util_ldap_cache->fullmark,
+ date_str), r);
+
+ ap_rputs("<p>\n"
+ "<table border='0'>\n"
+ "<tr bgcolor='#000000'>\n"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>LDAP URL</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Size</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Max Entries</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b># Entries</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Full Mark</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Full Mark Time</b></font></td>"
+ "</tr>\n", r
+ );
+ for (i=0; i < util_ldap_cache->size; ++i) {
+ for (p = util_ldap_cache->nodes[i]; p != NULL; p = p->next) {
+
+ (*util_ldap_cache->display)(r, util_ldap_cache, p->payload);
+ }
+ }
+ ap_rputs("</table>\n</p>\n", r);
+
+
+ break;
+ case 's':
+ ap_rputs("<p>\n"
+ "<table border='0'>\n"
+ "<tr bgcolor='#000000'>\n"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>LDAP Filter</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>User Name</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Last Bind</b></font></td>"
+ "</tr>\n", r
+ );
+ if (n) {
+ for (i=0; i < n->search_cache->size; ++i) {
+ for (p = n->search_cache->nodes[i]; p != NULL; p = p->next) {
+
+ (*n->search_cache->display)(r, n->search_cache, p->payload);
+ }
+ }
+ }
+ ap_rputs("</table>\n</p>\n", r);
+ break;
+ case 'c':
+ ap_rputs("<p>\n"
+ "<table border='0'>\n"
+ "<tr bgcolor='#000000'>\n"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>DN</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Attribute</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Value</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Last Compare</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Result</b></font></td>"
+ "</tr>\n", r
+ );
+ if (n) {
+ for (i=0; i < n->compare_cache->size; ++i) {
+ for (p = n->compare_cache->nodes[i]; p != NULL; p = p->next) {
+
+ (*n->compare_cache->display)(r, n->compare_cache, p->payload);
+ }
+ }
+ }
+ ap_rputs("</table>\n</p>\n", r);
+ break;
+ case 'd':
+ ap_rputs("<p>\n"
+ "<table border='0'>\n"
+ "<tr bgcolor='#000000'>\n"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Require DN</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Actual DN</b></font></td>"
+ "</tr>\n", r
+ );
+ if (n) {
+ for (i=0; i < n->dn_compare_cache->size; ++i) {
+ for (p = n->dn_compare_cache->nodes[i]; p != NULL; p = p->next) {
+
+ (*n->dn_compare_cache->display)(r, n->dn_compare_cache, p->payload);
+ }
+ }
+ }
+ ap_rputs("</table>\n</p>\n", r);
+ break;
+ default:
+ break;
+ }
+
+ }
+ else {
+ buf = "";
+ }
+ }
+ else {
+ ap_rputs("<p>\n"
+ "<table border='0'>\n"
+ "<tr bgcolor='#000000'>\n"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Cache Name</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Entries</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Avg. Chain Len.</b></font></td>"
+ "<td colspan='2'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Hits</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Ins/Rem</b></font></td>"
+ "<td colspan='2'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Purges</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Avg Purge Time</b></font></td>"
+ "</tr>\n", r
+ );
+
+
+ id1 = apr_psprintf(pool, argfmt, "main", 0, 0);
+ buf = util_ald_cache_display_stats(r, st->util_ldap_cache, "LDAP URL Cache", id1);
+
+ for (i=0; i < util_ldap_cache->size; ++i) {
+ for (p = util_ldap_cache->nodes[i],j=0; p != NULL; p = p->next,j++) {
+
+ n = (util_url_node_t *)p->payload;
+
+ t1 = apr_psprintf(pool, "%s (Searches)", n->url);
+ t2 = apr_psprintf(pool, "%s (Compares)", n->url);
+ t3 = apr_psprintf(pool, "%s (DNCompares)", n->url);
+ id1 = apr_psprintf(pool, argfmt, "srch", i, j);
+ id2 = apr_psprintf(pool, argfmt, "cmpr", i, j);
+ id3 = apr_psprintf(pool, argfmt, "dncp", i, j);
+
+ buf = apr_psprintf(pool, "%s\n\n"
+ "%s\n\n"
+ "%s\n\n"
+ "%s\n\n",
+ buf,
+ util_ald_cache_display_stats(r, n->search_cache, t1, id1),
+ util_ald_cache_display_stats(r, n->compare_cache, t2, id2),
+ util_ald_cache_display_stats(r, n->dn_compare_cache, t3, id3)
+ );
+ }
+ }
+ ap_rputs(buf, r);
+ ap_rputs("</table>\n</p>\n", r);
+ }
+
+ return buf;
+}
+
+#endif /* APU_HAS_LDAP */