summaryrefslogtreecommitdiffstats
path: root/rubbos/app/httpd-2.0.64/modules
diff options
context:
space:
mode:
Diffstat (limited to 'rubbos/app/httpd-2.0.64/modules')
-rw-r--r--rubbos/app/httpd-2.0.64/modules/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/Makefile11
-rw-r--r--rubbos/app/httpd-2.0.64/modules/Makefile.in6
-rw-r--r--rubbos/app/httpd-2.0.64/modules/NWGNUmakefile52
-rw-r--r--rubbos/app/httpd-2.0.64/modules/README54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/.indent.pro54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_access.abin0 -> 42550 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_access.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_access.obin0 -> 42400 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_auth.abin0 -> 45548 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_auth.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_auth.obin0 -> 45400 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/NWGNUauthanon248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/NWGNUauthdbm248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/NWGNUdigest248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/NWGNUmakefile246
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/config.m423
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_access.c304
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_access.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_access.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_access.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_access.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_access.obin0 -> 42400 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.c315
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.obin0 -> 45400 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_anon.c238
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_anon.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_anon.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_dbm.c293
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_dbm.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_dbm.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_digest.c2108
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_digest.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/aaa/modules.mk7
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/libprews.c70
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_anon.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_basic.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_dbm.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_digest.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_cache.def5
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_cern_meta.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_dav.def3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_disk_cache.def3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_echo.def2
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_expires.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_file_cache.def2
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_headers.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_info.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_logio.def2
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_mem_cache.def3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_mime_magic.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_netware.c194
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_nw_ssl.c1151
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy.def6
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy_connect.def4
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy_ftp.def4
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy_http.def7
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_rewrite.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_speling.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_status.def2
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_unique_id.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_usertrack.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/mod_vhost_alias.def2
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/netware/moddavfs.def1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/win32/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/win32/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/win32/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/win32/config.m411
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/win32/mod_isapi.c1760
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/win32/mod_isapi.dsp132
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/win32/mod_isapi.h271
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/win32/mod_win32.c553
-rw-r--r--rubbos/app/httpd-2.0.64/modules/arch/win32/modules.mk3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/cache/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/cache/.indent.pro54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/cache/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/cache/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/cache/config.m411
-rw-r--r--rubbos/app/httpd-2.0.64/modules/cache/mod_file_cache.c416
-rw-r--r--rubbos/app/httpd-2.0.64/modules/cache/mod_file_cache.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/cache/mod_file_cache.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/cache/modules.mk3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/config5.m456
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/fs/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/fs/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/fs/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/fs/NWGNUmakefile266
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/fs/config6.m423
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/fs/dbm.c753
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/fs/lock.c1517
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/fs/mod_dav_fs.c108
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/fs/mod_dav_fs.dsp152
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/fs/modules.mk3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/fs/repos.c2130
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/fs/repos.h78
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/NWGNUmakefile268
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/config5.m422
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/dav.imp64
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/liveprop.c140
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/mod_dav.c4834
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/mod_dav.dsp164
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/mod_dav.h2420
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/modules.mk3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/props.c1116
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/providers.c33
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/std_liveprop.c194
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/util.c2021
-rw-r--r--rubbos/app/httpd-2.0.64/modules/dav/main/util_lock.c791
-rw-r--r--rubbos/app/httpd-2.0.64/modules/echo/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/echo/.indent.pro54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/echo/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/echo/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/echo/NWGNUmakefile257
-rw-r--r--rubbos/app/httpd-2.0.64/modules/echo/config.m411
-rw-r--r--rubbos/app/httpd-2.0.64/modules/echo/mod_echo.c102
-rw-r--r--rubbos/app/httpd-2.0.64/modules/echo/mod_echo.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/echo/modules.mk3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/.indent.pro54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUauthldap262
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUcharsetl257
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUdsk_cach261
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUexample256
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmakefile256
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmem_cach265
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmod_cach264
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmoddumpio248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/NWGNUutilldap266
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/README41
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/README.ldap47
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.c171
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.h112
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.c290
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.h161
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.c290
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.h160
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_storage.c311
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/cache_util.c575
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/charset.conv55
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/config.m439
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.c1117
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.def6
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.c1006
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.dsp168
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.h319
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.imp10
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter.c137
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter_in.c160
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.c1082
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.dsp124
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.c963
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.c215
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_example.c1313
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.c1198
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/modules.mk3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.c1758
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.def7
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.dsp140
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.c450
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.h193
-rw-r--r--rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache_mgr.c762
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/.indent.pro54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/.libs/mod_include.abin0 -> 204368 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/.libs/mod_include.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/.libs/mod_include.obin0 -> 204216 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/NWGNUdeflate278
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/NWGNUextfiltr248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/NWGNUmakefile255
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/config.m465
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_deflate.c875
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_deflate.dsp127
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_deflate.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_ext_filter.c890
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_ext_filter.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_ext_filter.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_include.c3751
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_include.dsp132
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_include.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_include.h206
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_include.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_include.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/mod_include.obin0 -> 204216 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/filters/modules.mk5
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.indent.pro54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_asis.abin0 -> 33444 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_asis.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_asis.obin0 -> 33296 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_autoindex.abin0 -> 150722 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_autoindex.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_autoindex.obin0 -> 150568 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_cgid.abin0 -> 153612 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_cgid.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_cgid.obin0 -> 153464 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_status.abin0 -> 97328 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_status.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_status.obin0 -> 97120 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/NWGNUinfo248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/NWGNUmakefile247
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/NWGNUstatus248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/config5.m466
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_asis.c145
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_asis.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_asis.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_asis.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_asis.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_asis.obin0 -> 33296 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.c2252
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.obin0 -> 150568 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.c1235
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.dsp132
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.h62
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.c1744
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.obin0 -> 153464 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_info.c533
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_info.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_info.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_status.c857
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_status.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_status.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_status.h54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_status.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_status.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_status.obin0 -> 97120 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_suexec.c138
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/mod_suexec.h23
-rw-r--r--rubbos/app/httpd-2.0.64/modules/generators/modules.mk11
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/.indent.pro54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/.libs/http_core.obin0 -> 47552 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/.libs/http_protocol.obin0 -> 188976 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/.libs/http_request.obin0 -> 47576 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/.libs/mod_http.abin0 -> 285478 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/.libs/mod_http.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/.libs/mod_mime.abin0 -> 75212 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/.libs/mod_mime.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/.libs/mod_mime.obin0 -> 75064 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/config2.m420
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/http_core.c322
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/http_core.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/http_core.obin0 -> 47552 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/http_protocol.c3212
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/http_protocol.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/http_protocol.obin0 -> 188976 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/http_request.c548
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/http_request.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/http_request.obin0 -> 47576 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/mod_core.h80
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/mod_http.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/mod_mime.c987
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/mod_mime.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/mod_mime.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/mod_mime.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/mod_mime.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/mod_mime.obin0 -> 75064 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/http/modules.mk7
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/.indent.pro54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/.libs/mod_log_config.abin0 -> 121544 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/.libs/mod_log_config.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/.libs/mod_log_config.obin0 -> 121312 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/NWGNUforensic257
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/NWGNUmakefile247
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/NWGNUmodlogio257
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/config.m418
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.c1519
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.h63
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.obin0 -> 121312 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/mod_log_forensic.c288
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/mod_log_forensic.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/mod_log_forensic.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/mod_logio.c192
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/mod_logio.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/loggers/modules.mk5
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.indent.pro54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_actions.abin0 -> 36840 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_actions.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_actions.obin0 -> 36688 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_alias.abin0 -> 59550 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_alias.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_alias.obin0 -> 59400 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_dir.abin0 -> 37596 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_dir.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_dir.obin0 -> 37448 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_imap.abin0 -> 72260 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_imap.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_imap.obin0 -> 72112 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_negotiation.abin0 -> 149884 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_negotiation.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_negotiation.obin0 -> 149648 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_so.abin0 -> 29882 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_so.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_so.obin0 -> 29736 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_userdir.abin0 -> 40776 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_userdir.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_userdir.obin0 -> 40624 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/NWGNUmakefile247
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/NWGNUrewrite248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/NWGNUspeling248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/NWGNUvhost248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/config9.m457
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.c198
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.obin0 -> 36688 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.c484
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.obin0 -> 59400 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.c247
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.obin0 -> 37448 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.c897
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.obin0 -> 72112 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.c3096
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.obin0 -> 149648 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.c4670
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.h446
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_so.c368
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_so.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_so.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_so.obin0 -> 29736 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_speling.c532
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_speling.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_speling.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.c366
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.obin0 -> 40624 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_vhost_alias.c457
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_vhost_alias.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/mod_vhost_alias.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/mappers/modules.mk17
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/.indent.pro54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_env.abin0 -> 37500 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_env.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_env.obin0 -> 37352 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_setenvif.abin0 -> 55920 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_setenvif.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_setenvif.obin0 -> 55768 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/NWGNUcernmeta248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/NWGNUexpires248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/NWGNUheaders248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/NWGNUmakefile252
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/NWGNUmimemagi248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/NWGNUmodversion248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/NWGNUuniqueid254
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/NWGNUusertrk248
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/config.m424
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_cern_meta.c372
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_cern_meta.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_cern_meta.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_env.c179
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_env.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_env.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_env.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_env.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_env.obin0 -> 37352 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_expires.c566
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_expires.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_expires.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_headers.c620
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_headers.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_headers.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_mime_magic.c2477
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_mime_magic.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_mime_magic.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.c586
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.la35
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.lo12
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.obin0 -> 55768 bytes
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_unique_id.c367
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_unique_id.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_unique_id.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_usertrack.c454
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_usertrack.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_usertrack.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_version.c312
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_version.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/mod_version.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/metadata/modules.mk7
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/.indent.pro58
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/CHANGES223
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/NWGNUmakefile247
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxy261
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxycon254
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxyftp260
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxyhtp263
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/config.m434
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/libproxy.exp1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy.c1181
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy.dsp140
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy.h255
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy_connect.dsp136
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy_ftp.dsp136
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy_http.dsp136
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/modules.mk3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/proxy_connect.c377
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/proxy_ftp.c1936
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/proxy_http.c1824
-rw-r--r--rubbos/app/httpd-2.0.64/modules/proxy/proxy_util.c1120
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/Makefile43
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/Makefile.in38
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/README129
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/README.dsov.fig346
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/README.dsov.ps1138
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/config.m454
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/mod_ssl.c428
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/mod_ssl.dsp328
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/mod_ssl.h724
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/modules.mk3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_config.c1420
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_dh.c207
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_init.c1243
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_io.c1746
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_kernel.c1876
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_log.c101
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_mutex.c120
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_pphrase.c789
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_rand.c179
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_vars.c687
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr.c82
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr.h104
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_eval.c254
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_parse.c1081
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_parse.h27
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_parse.y148
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_scan.c1969
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_scan.l225
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache.c199
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache_dbm.c462
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache_shmcb.c1362
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache_shmht.c351
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_toolkit_compat.h239
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_util.c449
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_ssl.c574
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_ssl.h93
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_table.c2518
-rw-r--r--rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_table.h152
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/.deps0
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/.indent.pro54
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/Makefile8
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/Makefile.in3
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/README1
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/config.m410
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/mod_bucketeer.c181
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/mod_bucketeer.dsp128
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/mod_optional_fn_export.c48
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/mod_optional_fn_export.h19
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/mod_optional_fn_import.c55
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/mod_optional_hook_export.c44
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/mod_optional_hook_export.h24
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/mod_optional_hook_import.c45
-rw-r--r--rubbos/app/httpd-2.0.64/modules/test/modules.mk3
523 files changed, 124447 insertions, 0 deletions
diff --git a/rubbos/app/httpd-2.0.64/modules/.deps b/rubbos/app/httpd-2.0.64/modules/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/Makefile b/rubbos/app/httpd-2.0.64/modules/Makefile
new file mode 100644
index 00000000..3ca7d787
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/Makefile
@@ -0,0 +1,11 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules
+
+SUBDIRS = $(MODULE_DIRS)
+CLEAN_SUBDIRS = $(MODULE_CLEANDIRS)
+
+include $(top_builddir)/build/rules.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/Makefile.in b/rubbos/app/httpd-2.0.64/modules/Makefile.in
new file mode 100644
index 00000000..1320ec26
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/Makefile.in
@@ -0,0 +1,6 @@
+
+SUBDIRS = $(MODULE_DIRS)
+CLEAN_SUBDIRS = $(MODULE_CLEANDIRS)
+
+include $(top_builddir)/build/rules.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/NWGNUmakefile
new file mode 100644
index 00000000..fd795a4b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/NWGNUmakefile
@@ -0,0 +1,52 @@
+#
+# Declare the sub-directories to be built here
+#
+# To build with exerimental modules set the environment
+# variable EXPERIMENTAL=1
+
+SUBDIRS = \
+ aaa \
+ dav\main \
+ dav\fs \
+ echo \
+ generators \
+ loggers \
+ mappers \
+ metadata \
+ proxy \
+ filters \
+ $(EOLIST)
+
+#If the mod_edir directory exists then build the mod_edir module
+ifeq "$(wildcard $(AP_WORK)\modules\mod_edir)" "$(AP_WORK)\modules\mod_edir"
+SUBDIRS += mod_edir \
+ $(EOLIST)
+endif
+
+# Allow the experimental modules to be built if EXPERIMENTAL is defined
+ifdef EXPERIMENTAL
+SUBDIRS += experimental \
+ $(EOLIST)
+endif
+
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+ifeq "$(wildcard NWGNUmakefile.mak)" "NWGNUmakefile.mak"
+include NWGNUmakefile.mak
+endif
+
+#
+# You can use this target if all that is needed is to copy files to the
+# installation area
+#
+install :: nlms FORCE
+
diff --git a/rubbos/app/httpd-2.0.64/modules/README b/rubbos/app/httpd-2.0.64/modules/README
new file mode 100644
index 00000000..eab7067e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/README
@@ -0,0 +1,54 @@
+The directory structure for this level is as follows:
+
+aaa/
+ This directory contains modules dealing with authorization and
+ authentication.
+
+arch/
+
+cache/
+ This directory houses modules that implement file and data caching
+ capability.
+
+dav/
+ This directory houses modules that implement WebDAV functionality.
+
+echo/
+
+experimental/
+ In this directory we've placed some modules which we think
+ provide some pretty interesting functionality, but which
+ are still in the early stages of development and could
+ evolve radically in the future. This code isn't supported
+ officially.
+
+filters/
+ This directory houses modules that perform general inline data filtering.
+
+generators/
+ This directory houses modules that perform data generation functions.
+
+http/
+ This directory houses modules that basic HTTP protocol implementation.
+
+loggers/
+ This directory houses modules that handle logging functions.
+
+mappers/
+ This directory houses modules that handle URL mapping and
+ rewriting.
+
+metadata/
+ This directory houses modules that deal with Header metadata.
+
+proxy/
+ This houses the code for the proxy module for Apache.
+
+ssl/
+ This directory houses code for OpenSSL functionality.
+
+test/
+ This directory houses modules which test various components
+ of Apache. You should not compile these into a production
+ server.
+
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/.deps b/rubbos/app/httpd-2.0.64/modules/aaa/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/.indent.pro b/rubbos/app/httpd-2.0.64/modules/aaa/.indent.pro
new file mode 100644
index 00000000..a9fbe9f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_access.a b/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_access.a
new file mode 100644
index 00000000..a27c9f74
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_access.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_access.la b/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_access.la
new file mode 100644
index 00000000..5a1eaebf
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_access.la
@@ -0,0 +1,35 @@
+# mod_access.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_access.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_access.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_access.o b/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_access.o
new file mode 100644
index 00000000..0ec5a0db
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_access.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_auth.a b/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_auth.a
new file mode 100644
index 00000000..6f42abe3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_auth.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_auth.la b/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_auth.la
new file mode 100644
index 00000000..5ffae20b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_auth.la
@@ -0,0 +1,35 @@
+# mod_auth.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_auth.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_auth.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_auth.o b/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_auth.o
new file mode 100644
index 00000000..8a0feeac
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/.libs/mod_auth.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/Makefile b/rubbos/app/httpd-2.0.64/modules/aaa/Makefile
new file mode 100644
index 00000000..e6fa8f2f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/aaa
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/aaa
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/aaa
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/Makefile.in b/rubbos/app/httpd-2.0.64/modules/aaa/Makefile.in
new file mode 100644
index 00000000..167b343d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/Makefile.in
@@ -0,0 +1,3 @@
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/NWGNUauthanon b/rubbos/app/httpd-2.0.64/modules/aaa/NWGNUauthanon
new file mode 100644
index 00000000..6881d8a6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/NWGNUauthanon
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = authanon
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Authentication Anonymous Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = AuthAnon Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/authanon.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_auth_anon.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ auth_anon_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/NWGNUauthdbm b/rubbos/app/httpd-2.0.64/modules/aaa/NWGNUauthdbm
new file mode 100644
index 00000000..fe05c3d1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/NWGNUauthdbm
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = authdbm
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Database Authentication Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = AuthDBM Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/authdbm.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_auth_dbm.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ auth_dbm_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/NWGNUdigest b/rubbos/app/httpd-2.0.64/modules/aaa/NWGNUdigest
new file mode 100644
index 00000000..cf41db72
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/NWGNUdigest
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = digest
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Digest Authentication Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Digest Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/digest.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_auth_digest.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ auth_digest_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/aaa/NWGNUmakefile
new file mode 100644
index 00000000..fc72c735
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/NWGNUmakefile
@@ -0,0 +1,246 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME =
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION =
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME =
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE =
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/authanon.nlm \
+ $(OBJDIR)/authdbm.nlm \
+ $(OBJDIR)/digest.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/config.m4 b/rubbos/app/httpd-2.0.64/modules/aaa/config.m4
new file mode 100644
index 00000000..4440f67e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/config.m4
@@ -0,0 +1,23 @@
+dnl modules enabled in this directory by default
+
+dnl APACHE_MODULE(name, helptext[, objects[, structname[, default[, config]]]])
+
+APACHE_MODPATH_INIT(aaa)
+
+APACHE_MODULE(access, host-based access control, , , yes)
+APACHE_MODULE(auth, user-based access control, , , yes)
+APACHE_MODULE(auth_anon, anonymous user access, , , most)
+APACHE_MODULE(auth_dbm, DBM-based access databases, , , most)
+
+APACHE_MODULE(auth_digest, RFC2617 Digest authentication, , , most, [
+ APR_CHECK_APR_DEFINE(APR_HAS_RANDOM)
+ if test $ac_cv_define_APR_HAS_RANDOM = "no"; then
+ echo "You need APR random support to use mod_auth_digest."
+ echo "Look at APR configure options --with-egd and --with-devrandom."
+ enable_auth_digest="no"
+ fi
+])
+
+APR_ADDTO(LT_LDFLAGS,-export-dynamic)
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.c b/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.c
new file mode 100644
index 00000000..348289ab
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.c
@@ -0,0 +1,304 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Security options etc.
+ *
+ * Module derived from code originally written by Rob McCool
+ *
+ */
+
+#include "apr_strings.h"
+#include "apr_network_io.h"
+#include "apr_lib.h"
+
+#define APR_WANT_STRFUNC
+#define APR_WANT_BYTEFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_core.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "http_request.h"
+
+#if APR_HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+
+enum allowdeny_type {
+ T_ENV,
+ T_ALL,
+ T_IP,
+ T_HOST,
+ T_FAIL
+};
+
+typedef struct {
+ apr_int64_t limited;
+ union {
+ char *from;
+ apr_ipsubnet_t *ip;
+ } x;
+ enum allowdeny_type type;
+} allowdeny;
+
+/* things in the 'order' array */
+#define DENY_THEN_ALLOW 0
+#define ALLOW_THEN_DENY 1
+#define MUTUAL_FAILURE 2
+
+typedef struct {
+ int order[METHODS];
+ apr_array_header_t *allows;
+ apr_array_header_t *denys;
+} access_dir_conf;
+
+module AP_MODULE_DECLARE_DATA access_module;
+
+static void *create_access_dir_config(apr_pool_t *p, char *dummy)
+{
+ int i;
+ access_dir_conf *conf =
+ (access_dir_conf *)apr_pcalloc(p, sizeof(access_dir_conf));
+
+ for (i = 0; i < METHODS; ++i) {
+ conf->order[i] = DENY_THEN_ALLOW;
+ }
+ conf->allows = apr_array_make(p, 1, sizeof(allowdeny));
+ conf->denys = apr_array_make(p, 1, sizeof(allowdeny));
+
+ return (void *)conf;
+}
+
+static const char *order(cmd_parms *cmd, void *dv, const char *arg)
+{
+ access_dir_conf *d = (access_dir_conf *) dv;
+ int i, o;
+
+ if (!strcasecmp(arg, "allow,deny"))
+ o = ALLOW_THEN_DENY;
+ else if (!strcasecmp(arg, "deny,allow"))
+ o = DENY_THEN_ALLOW;
+ else if (!strcasecmp(arg, "mutual-failure"))
+ o = MUTUAL_FAILURE;
+ else
+ return "unknown order";
+
+ for (i = 0; i < METHODS; ++i)
+ if (cmd->limited & (AP_METHOD_BIT << i))
+ d->order[i] = o;
+
+ return NULL;
+}
+
+static const char *allow_cmd(cmd_parms *cmd, void *dv, const char *from,
+ const char *where_c)
+{
+ access_dir_conf *d = (access_dir_conf *) dv;
+ allowdeny *a;
+ char *where = apr_pstrdup(cmd->pool, where_c);
+ char *s;
+ char msgbuf[120];
+ apr_status_t rv;
+
+ if (strcasecmp(from, "from"))
+ return "allow and deny must be followed by 'from'";
+
+ a = (allowdeny *) apr_array_push(cmd->info ? d->allows : d->denys);
+ a->x.from = where;
+ a->limited = cmd->limited;
+
+ if (!strncasecmp(where, "env=", 4)) {
+ a->type = T_ENV;
+ a->x.from += 4;
+
+ }
+ else if (!strcasecmp(where, "all")) {
+ a->type = T_ALL;
+ }
+ else if ((s = strchr(where, '/'))) {
+ *s++ = '\0';
+ rv = apr_ipsubnet_create(&a->x.ip, where, s, cmd->pool);
+ if(APR_STATUS_IS_EINVAL(rv)) {
+ /* looked nothing like an IP address */
+ return "An IP address was expected";
+ }
+ else if (rv != APR_SUCCESS) {
+ apr_strerror(rv, msgbuf, sizeof msgbuf);
+ return apr_pstrdup(cmd->pool, msgbuf);
+ }
+ a->type = T_IP;
+ }
+ else if (!APR_STATUS_IS_EINVAL(rv = apr_ipsubnet_create(&a->x.ip, where, NULL, cmd->pool))) {
+ if (rv != APR_SUCCESS) {
+ apr_strerror(rv, msgbuf, sizeof msgbuf);
+ return apr_pstrdup(cmd->pool, msgbuf);
+ }
+ a->type = T_IP;
+ }
+ else { /* no slash, didn't look like an IP address => must be a host */
+ a->type = T_HOST;
+ }
+
+ return NULL;
+}
+
+static char its_an_allow;
+
+static const command_rec access_cmds[] =
+{
+ AP_INIT_TAKE1("order", order, NULL, OR_LIMIT,
+ "'allow,deny', 'deny,allow', or 'mutual-failure'"),
+ AP_INIT_ITERATE2("allow", allow_cmd, &its_an_allow, OR_LIMIT,
+ "'from' followed by hostnames or IP-address wildcards"),
+ AP_INIT_ITERATE2("deny", allow_cmd, NULL, OR_LIMIT,
+ "'from' followed by hostnames or IP-address wildcards"),
+ {NULL}
+};
+
+static int in_domain(const char *domain, const char *what)
+{
+ int dl = strlen(domain);
+ int wl = strlen(what);
+
+ if ((wl - dl) >= 0) {
+ if (strcasecmp(domain, &what[wl - dl]) != 0)
+ return 0;
+
+ /* Make sure we matched an *entire* subdomain --- if the user
+ * said 'allow from good.com', we don't want people from nogood.com
+ * to be able to get in.
+ */
+
+ if (wl == dl)
+ return 1; /* matched whole thing */
+ else
+ return (domain[0] == '.' || what[wl - dl - 1] == '.');
+ }
+ else
+ return 0;
+}
+
+static int find_allowdeny(request_rec *r, apr_array_header_t *a, int method)
+{
+
+ allowdeny *ap = (allowdeny *) a->elts;
+ apr_int64_t mmask = (AP_METHOD_BIT << method);
+ int i;
+ int gothost = 0;
+ const char *remotehost = NULL;
+
+ for (i = 0; i < a->nelts; ++i) {
+ if (!(mmask & ap[i].limited))
+ continue;
+
+ switch (ap[i].type) {
+ case T_ENV:
+ if (apr_table_get(r->subprocess_env, ap[i].x.from)) {
+ return 1;
+ }
+ break;
+
+ case T_ALL:
+ return 1;
+
+ case T_IP:
+ if (apr_ipsubnet_test(ap[i].x.ip, r->connection->remote_addr)) {
+ return 1;
+ }
+ break;
+
+ case T_HOST:
+ if (!gothost) {
+ int remotehost_is_ip;
+
+ remotehost = ap_get_remote_host(r->connection, r->per_dir_config,
+ REMOTE_DOUBLE_REV, &remotehost_is_ip);
+
+ if ((remotehost == NULL) || remotehost_is_ip)
+ gothost = 1;
+ else
+ gothost = 2;
+ }
+
+ if ((gothost == 2) && in_domain(ap[i].x.from, remotehost))
+ return 1;
+ break;
+
+ case T_FAIL:
+ /* do nothing? */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int check_dir_access(request_rec *r)
+{
+ int method = r->method_number;
+ int ret = OK;
+ access_dir_conf *a = (access_dir_conf *)
+ ap_get_module_config(r->per_dir_config, &access_module);
+
+ if (a->order[method] == ALLOW_THEN_DENY) {
+ ret = HTTP_FORBIDDEN;
+ if (find_allowdeny(r, a->allows, method))
+ ret = OK;
+ if (find_allowdeny(r, a->denys, method))
+ ret = HTTP_FORBIDDEN;
+ }
+ else if (a->order[method] == DENY_THEN_ALLOW) {
+ if (find_allowdeny(r, a->denys, method))
+ ret = HTTP_FORBIDDEN;
+ if (find_allowdeny(r, a->allows, method))
+ ret = OK;
+ }
+ else {
+ if (find_allowdeny(r, a->allows, method)
+ && !find_allowdeny(r, a->denys, method))
+ ret = OK;
+ else
+ ret = HTTP_FORBIDDEN;
+ }
+
+ if (ret == HTTP_FORBIDDEN
+ && (ap_satisfies(r) != SATISFY_ANY || !ap_some_auth_required(r))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "client denied by server configuration: %s",
+ r->filename);
+ }
+
+ return ret;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_access_checker(check_dir_access,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA access_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_access_dir_config, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ access_cmds,
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.dsp b/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.dsp
new file mode 100644
index 00000000..eae26a6d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_access" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_access - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_access.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_access.mak" CFG="mod_access - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_access - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_access - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_access - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_access_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_access.so" /base:@..\..\os\win32\BaseAddr.ref,mod_access.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_access.so" /base:@..\..\os\win32\BaseAddr.ref,mod_access.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_access - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_access_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_access.so" /base:@..\..\os\win32\BaseAddr.ref,mod_access.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_access.so" /base:@..\..\os\win32\BaseAddr.ref,mod_access.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_access - Win32 Release"
+# Name "mod_access - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_access.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_access.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_access - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_access.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_access.so "access_module for Apache" ../../include/ap_release.h > .\mod_access.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_access - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_access.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_access.so "access_module for Apache" ../../include/ap_release.h > .\mod_access.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.exp b/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.exp
new file mode 100644
index 00000000..f8aff339
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.exp
@@ -0,0 +1 @@
+access_module
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.la b/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.la
new file mode 100644
index 00000000..5a1eaebf
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.la
@@ -0,0 +1,35 @@
+# mod_access.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_access.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_access.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.lo b/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.lo
new file mode 100644
index 00000000..c09fcac0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.lo
@@ -0,0 +1,12 @@
+# mod_access.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_access.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_access.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.o b/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.o
new file mode 100644
index 00000000..0ec5a0db
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_access.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.c b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.c
new file mode 100644
index 00000000..43f65306
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.c
@@ -0,0 +1,315 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_auth: authentication
+ *
+ * Rob McCool
+ *
+ * Adapted to Apache by rst.
+ *
+ * dirkx - Added Authoritative control to allow passing on to lower
+ * modules if and only if the userid is not known to this
+ * module. A known user with a faulty or absent password still
+ * causes an AuthRequired. The default is 'Authoritative', i.e.
+ * no control is passed along.
+ */
+
+#include "apr_strings.h"
+#include "apr_md5.h" /* for apr_password_validate */
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#include "http_request.h"
+
+
+typedef struct {
+ char *auth_pwfile;
+ char *auth_grpfile;
+ int auth_authoritative;
+} auth_config_rec;
+
+static void *create_auth_dir_config(apr_pool_t *p, char *d)
+{
+ auth_config_rec *conf = apr_palloc(p, sizeof(*conf));
+
+ conf->auth_pwfile = NULL; /* just to illustrate the default really */
+ conf->auth_grpfile = NULL; /* unless you have a broken HP cc */
+ conf->auth_authoritative = 1; /* keep the fortress secure by default */
+ return conf;
+}
+
+static const char *set_auth_slot(cmd_parms *cmd, void *offset, const char *f,
+ const char *t)
+{
+ if (t && strcmp(t, "standard")) {
+ return apr_pstrcat(cmd->pool, "Invalid auth file type: ", t, NULL);
+ }
+
+ return ap_set_file_slot(cmd, offset, f);
+}
+
+static const command_rec auth_cmds[] =
+{
+ AP_INIT_TAKE12("AuthUserFile", set_auth_slot,
+ (void *)APR_OFFSETOF(auth_config_rec, auth_pwfile),
+ OR_AUTHCFG, "text file containing user IDs and passwords"),
+ AP_INIT_TAKE12("AuthGroupFile", set_auth_slot,
+ (void *)APR_OFFSETOF(auth_config_rec, auth_grpfile),
+ OR_AUTHCFG,
+ "text file containing group names and member user IDs"),
+ AP_INIT_FLAG("AuthAuthoritative", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(auth_config_rec, auth_authoritative),
+ OR_AUTHCFG,
+ "Set to 'no' to allow access control to be passed along to "
+ "lower modules if the UserID is not known to this module"),
+ {NULL}
+};
+
+module AP_MODULE_DECLARE_DATA auth_module;
+
+static char *get_pw(request_rec *r, char *user, char *auth_pwfile)
+{
+ ap_configfile_t *f;
+ char l[MAX_STRING_LEN];
+ const char *rpw, *w;
+ apr_status_t status;
+
+ if ((status = ap_pcfg_openfile(&f, r->pool, auth_pwfile)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r,
+ "Could not open password file: %s", auth_pwfile);
+ return NULL;
+ }
+ while (!(ap_cfg_getline(l, MAX_STRING_LEN, f))) {
+ if ((l[0] == '#') || (!l[0])) {
+ continue;
+ }
+ rpw = l;
+ w = ap_getword(r->pool, &rpw, ':');
+
+ if (!strcmp(user, w)) {
+ ap_cfg_closefile(f);
+ return ap_getword(r->pool, &rpw, ':');
+ }
+ }
+ ap_cfg_closefile(f);
+ return NULL;
+}
+
+static apr_table_t *groups_for_user(request_rec *r, char *user, char *grpfile)
+{
+ apr_pool_t *p = r->pool;
+ ap_configfile_t *f;
+ apr_table_t *grps = apr_table_make(p, 15);
+ apr_pool_t *sp;
+ char l[MAX_STRING_LEN];
+ const char *group_name, *ll, *w;
+ apr_status_t status;
+
+ if ((status = ap_pcfg_openfile(&f, p, grpfile)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r,
+ "Could not open group file: %s", grpfile);
+ return NULL;
+ }
+
+ apr_pool_create(&sp, p);
+
+ while (!(ap_cfg_getline(l, MAX_STRING_LEN, f))) {
+ if ((l[0] == '#') || (!l[0])) {
+ continue;
+ }
+ ll = l;
+ apr_pool_clear(sp);
+
+ group_name = ap_getword(sp, &ll, ':');
+
+ while (ll[0]) {
+ w = ap_getword_conf(sp, &ll);
+ if (!strcmp(w, user)) {
+ apr_table_setn(grps, apr_pstrdup(p, group_name), "in");
+ break;
+ }
+ }
+ }
+ ap_cfg_closefile(f);
+ apr_pool_destroy(sp);
+ return grps;
+}
+
+/* These functions return 0 if client is OK, and proper error status
+ * if not... either HTTP_UNAUTHORIZED, if we made a check, and it failed, or
+ * HTTP_INTERNAL_SERVER_ERROR, if things are so totally confused that we
+ * couldn't figure out how to tell if the client is authorized or not.
+ *
+ * If they return DECLINED, and all other modules also decline, that's
+ * treated by the server core as a configuration error, logged and
+ * reported as such.
+ */
+
+/* Determine user ID, and check if it really is that user, for HTTP
+ * basic authentication...
+ */
+
+static int authenticate_basic_user(request_rec *r)
+{
+ auth_config_rec *conf = ap_get_module_config(r->per_dir_config,
+ &auth_module);
+ const char *sent_pw;
+ char *real_pw;
+ apr_status_t invalid_pw;
+ int res;
+
+ if ((res = ap_get_basic_auth_pw(r, &sent_pw))) {
+ return res;
+ }
+
+ if (!conf->auth_pwfile) {
+ return DECLINED;
+ }
+
+ if (!(real_pw = get_pw(r, r->user, conf->auth_pwfile))) {
+ if (!(conf->auth_authoritative)) {
+ return DECLINED;
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "user %s not found: %s", r->user, r->uri);
+ ap_note_basic_auth_failure(r);
+ return HTTP_UNAUTHORIZED;
+ }
+ invalid_pw = apr_password_validate(sent_pw, real_pw);
+ if (invalid_pw != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "user %s: authentication failure for \"%s\": "
+ "Password Mismatch",
+ r->user, r->uri);
+ ap_note_basic_auth_failure(r);
+ return HTTP_UNAUTHORIZED;
+ }
+ return OK;
+}
+
+/* Checking ID */
+
+static int check_user_access(request_rec *r)
+{
+ auth_config_rec *conf = ap_get_module_config(r->per_dir_config,
+ &auth_module);
+ char *user = r->user;
+ int m = r->method_number;
+ int method_restricted = 0;
+ register int x;
+ const char *t, *w;
+ apr_table_t *grpstatus;
+ const apr_array_header_t *reqs_arr = ap_requires(r);
+ require_line *reqs;
+
+ /* BUG FIX: tadc, 11-Nov-1995. If there is no "requires" directive,
+ * then any user will do.
+ */
+ if (!reqs_arr) {
+ return OK;
+ }
+ reqs = (require_line *)reqs_arr->elts;
+
+ if (conf->auth_grpfile) {
+ grpstatus = groups_for_user(r, user, conf->auth_grpfile);
+ }
+ else {
+ grpstatus = NULL;
+ }
+
+ for (x = 0; x < reqs_arr->nelts; x++) {
+
+ if (!(reqs[x].method_mask & (AP_METHOD_BIT << m))) {
+ continue;
+ }
+
+ method_restricted = 1;
+
+ t = reqs[x].requirement;
+ w = ap_getword_white(r->pool, &t);
+ if (!strcmp(w, "valid-user")) {
+ return OK;
+ }
+ if (!strcmp(w, "user")) {
+ while (t[0]) {
+ w = ap_getword_conf(r->pool, &t);
+ if (!strcmp(user, w)) {
+ return OK;
+ }
+ }
+ }
+ else if (!strcmp(w, "group")) {
+ if (!grpstatus) {
+ return DECLINED; /* DBM group? Something else? */
+ }
+
+ while (t[0]) {
+ w = ap_getword_conf(r->pool, &t);
+ if (apr_table_get(grpstatus, w)) {
+ return OK;
+ }
+ }
+ }
+ else if (conf->auth_authoritative) {
+ /* if we aren't authoritative, any require directive could be
+ * valid even if we don't grok it. However, if we are
+ * authoritative, we can warn the user they did something wrong.
+ * That something could be a missing "AuthAuthoritative off", but
+ * more likely is a typo in the require directive.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "access to %s failed, reason: unknown require "
+ "directive:\"%s\"", r->uri, reqs[x].requirement);
+ }
+ }
+
+ if (!method_restricted) {
+ return OK;
+ }
+
+ if (!(conf->auth_authoritative)) {
+ return DECLINED;
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "access to %s failed, reason: user %s not allowed access",
+ r->uri, user);
+
+ ap_note_basic_auth_failure(r);
+ return HTTP_UNAUTHORIZED;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_check_user_id(authenticate_basic_user,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_auth_checker(check_user_access,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA auth_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_auth_dir_config, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ auth_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.dsp b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.dsp
new file mode 100644
index 00000000..ce7d0c77
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_auth" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_auth - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_auth.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_auth.mak" CFG="mod_auth - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_auth - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_auth - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_auth - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_auth_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_auth.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_auth.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_auth - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_auth" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_auth.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_auth.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_auth - Win32 Release"
+# Name "mod_auth - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_auth.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_auth.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_auth - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_auth.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_auth.so "auth_module for Apache" ../../include/ap_release.h > .\mod_auth.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_auth - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_auth.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_auth.so "auth_module for Apache" ../../include/ap_release.h > .\mod_auth.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.exp b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.exp
new file mode 100644
index 00000000..76adad0a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.exp
@@ -0,0 +1 @@
+auth_module
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.la b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.la
new file mode 100644
index 00000000..5ffae20b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.la
@@ -0,0 +1,35 @@
+# mod_auth.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_auth.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_auth.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.lo b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.lo
new file mode 100644
index 00000000..907d0402
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.lo
@@ -0,0 +1,12 @@
+# mod_auth.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_auth.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_auth.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.o b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.o
new file mode 100644
index 00000000..8a0feeac
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_anon.c b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_anon.c
new file mode 100644
index 00000000..b5137d5f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_anon.c
@@ -0,0 +1,238 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_auth: authentication
+ *
+ * Rob McCool & Brian Behlendorf.
+ *
+ * Adapted to Apache by rst.
+ *
+ * Version 0.5 May 1996
+ *
+ * Modified by Dirk.vanGulik@jrc.it to
+ *
+ * Adapted to allow anonymous logins, just like with Anon-FTP, when
+ * one gives the magic user name 'anonymous' and ones email address
+ * as the password.
+ *
+ * Just add the following tokes to your <directory> setup:
+ *
+ * Anonymous magic-userid [magic-userid]...
+ *
+ * Anonymous_MustGiveEmail [ on | off ] default = on
+ * Anonymous_LogEmail [ on | off ] default = on
+ * Anonymous_VerifyEmail [ on | off ] default = off
+ * Anonymous_NoUserId [ on | off ] default = off
+ * Anonymous_Authoritative [ on | off ] default = off
+ *
+ * The magic user id is something like 'anonymous', it is NOT case sensitive.
+ *
+ * The MustGiveEmail flag can be used to force users to enter something
+ * in the password field (like an email address). Default is on.
+ *
+ * Furthermore the 'NoUserID' flag can be set to allow completely empty
+ * usernames in as well; this can be is convenient as a single return
+ * in broken GUIs like W95 is often given by the user. The Default is off.
+ *
+ * Dirk.vanGulik@jrc.it; http://ewse.ceo.org; http://me-www.jrc.it/~dirkx
+ *
+ */
+
+#include "apr_strings.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_request.h"
+#include "http_protocol.h"
+
+typedef struct anon_auth {
+ char *password;
+ struct anon_auth *next;
+} anon_auth;
+
+typedef struct {
+ anon_auth *anon_auth_passwords;
+ int anon_auth_nouserid;
+ int anon_auth_logemail;
+ int anon_auth_verifyemail;
+ int anon_auth_mustemail;
+ int anon_auth_authoritative;
+} anon_auth_config_rec;
+
+static void *create_anon_auth_dir_config(apr_pool_t *p, char *d)
+{
+ anon_auth_config_rec *conf = apr_palloc(p, sizeof(*conf));
+
+ /* just to illustrate the defaults really. */
+ conf->anon_auth_passwords = NULL;
+
+ conf->anon_auth_nouserid = 0;
+ conf->anon_auth_logemail = 1;
+ conf->anon_auth_verifyemail = 0;
+ conf->anon_auth_mustemail = 1;
+ conf->anon_auth_authoritative = 0;
+ return conf;
+}
+
+static const char *anon_set_string_slots(cmd_parms *cmd,
+ void *my_config, const char *arg)
+{
+ anon_auth_config_rec *conf = my_config;
+ anon_auth *first;
+
+ if (!(*arg))
+ return "Anonymous string cannot be empty, use Anonymous_NoUserId instead";
+
+ /* squeeze in a record */
+ first = conf->anon_auth_passwords;
+
+ if (!(conf->anon_auth_passwords = apr_palloc(cmd->pool, sizeof(anon_auth))) ||
+ !(conf->anon_auth_passwords->password = apr_pstrdup(cmd->pool, arg)))
+ return "Failed to claim memory for an anonymous password...";
+
+ /* and repair the next */
+ conf->anon_auth_passwords->next = first;
+
+ return NULL;
+}
+
+static const command_rec anon_auth_cmds[] =
+{
+ AP_INIT_ITERATE("Anonymous", anon_set_string_slots, NULL, OR_AUTHCFG,
+ "a space-separated list of user IDs"),
+ AP_INIT_FLAG("Anonymous_MustGiveEmail", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(anon_auth_config_rec, anon_auth_mustemail),
+ OR_AUTHCFG, "Limited to 'on' or 'off'"),
+ AP_INIT_FLAG("Anonymous_NoUserId", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(anon_auth_config_rec, anon_auth_nouserid),
+ OR_AUTHCFG, "Limited to 'on' or 'off'"),
+ AP_INIT_FLAG("Anonymous_VerifyEmail", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(anon_auth_config_rec, anon_auth_verifyemail),
+ OR_AUTHCFG, "Limited to 'on' or 'off'"),
+ AP_INIT_FLAG("Anonymous_LogEmail", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(anon_auth_config_rec, anon_auth_logemail),
+ OR_AUTHCFG, "Limited to 'on' or 'off'"),
+ AP_INIT_FLAG("Anonymous_Authoritative", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(anon_auth_config_rec, anon_auth_authoritative),
+ OR_AUTHCFG, "Limited to 'on' or 'off'"),
+ {NULL}
+};
+
+module AP_MODULE_DECLARE_DATA auth_anon_module;
+
+static int anon_authenticate_basic_user(request_rec *r)
+{
+ anon_auth_config_rec *conf = ap_get_module_config(r->per_dir_config,
+ &auth_anon_module);
+ const char *sent_pw;
+ int res = DECLINED;
+
+ if ((res = ap_get_basic_auth_pw(r, &sent_pw))) {
+ return res;
+ }
+
+ /* Ignore if we are not configured */
+ if (!conf->anon_auth_passwords) {
+ return DECLINED;
+ }
+
+ /* Do we allow an empty userID and/or is it the magic one
+ */
+
+ if ((!(r->user[0])) && (conf->anon_auth_nouserid)) {
+ res = OK;
+ }
+ else {
+ anon_auth *p = conf->anon_auth_passwords;
+ res = DECLINED;
+ while ((res == DECLINED) && (p != NULL)) {
+ if (!(strcasecmp(r->user, p->password))) {
+ res = OK;
+ }
+ p = p->next;
+ }
+ }
+ if (
+ /* username is OK */
+ (res == OK)
+ /* password been filled out ? */
+ && ((!conf->anon_auth_mustemail) || strlen(sent_pw))
+ /* does the password look like an email address ? */
+ && ((!conf->anon_auth_verifyemail)
+ || ((strpbrk("@", sent_pw) != NULL)
+ && (strpbrk(".", sent_pw) != NULL)))) {
+ if (conf->anon_auth_logemail && ap_is_initial_req(r)) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, APR_SUCCESS, r,
+ "Anonymous: Passwd <%s> Accepted",
+ sent_pw ? sent_pw : "\'none\'");
+ }
+ return OK;
+ }
+ else {
+ if (conf->anon_auth_authoritative) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_SUCCESS, r,
+ "Anonymous: Authoritative, Passwd <%s> not accepted",
+ sent_pw ? sent_pw : "\'none\'");
+ return HTTP_UNAUTHORIZED;
+ }
+ /* Drop out the bottom to return DECLINED */
+ }
+
+ return DECLINED;
+}
+
+static int check_anon_access(request_rec *r)
+{
+#ifdef NOTYET
+ conn_rec *c = r->connection;
+ anon_auth_config_rec *conf = ap_get_module_config(r->per_dir_config,
+ &auth_anon_module);
+
+ if (!conf->anon_auth) {
+ return DECLINED;
+ }
+
+ if (strcasecmp(r->connection->user, conf->anon_auth)) {
+ return DECLINED;
+ }
+
+ return OK;
+#endif
+ return DECLINED;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_check_user_id(anon_authenticate_basic_user,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_auth_checker(check_anon_access,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA auth_anon_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_anon_auth_dir_config, /* dir config creater */
+ NULL, /* dir merger ensure strictness */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ anon_auth_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_anon.dsp b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_anon.dsp
new file mode 100644
index 00000000..9ac81797
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_anon.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_auth_anon" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_auth_anon - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_auth_anon.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_auth_anon.mak" CFG="mod_auth_anon - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_auth_anon - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_auth_anon - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_auth_anon - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_auth_anon_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_auth_anon.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_anon.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_auth_anon.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_anon.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_auth_anon - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_auth_anon_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_auth_anon.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_anon.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_auth_anon.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_anon.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_auth_anon - Win32 Release"
+# Name "mod_auth_anon - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_auth_anon.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_auth_anon.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_auth_anon - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_auth_anon.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_auth_anon.so "auth_anon_module for Apache" ../../include/ap_release.h > .\mod_auth_anon.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_auth_anon - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_auth_anon.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_auth_anon.so "auth_anon_module for Apache" ../../include/ap_release.h > .\mod_auth_anon.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_anon.exp b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_anon.exp
new file mode 100644
index 00000000..63282532
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_anon.exp
@@ -0,0 +1 @@
+auth_anon_module
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_dbm.c b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_dbm.c
new file mode 100644
index 00000000..e7c2cc9a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_dbm.c
@@ -0,0 +1,293 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_auth: authentication
+ *
+ * Rob McCool & Brian Behlendorf.
+ *
+ * Adapted to Apache by rst.
+ *
+ * dirkx - Added Authoritative control to allow passing on to lower
+ * modules if and only if the userid is not known to this
+ * module. A known user with a faulty or absent password still
+ * causes an AuthRequired. The default is 'Authoritative', i.e.
+ * no control is passed along.
+ */
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+#include "apr_strings.h"
+#include "apr_dbm.h"
+#include "apr_md5.h" /* for apr_password_validate */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#include "http_request.h" /* for ap_hook_(check_user_id | auth_checker)*/
+
+
+typedef struct {
+ char *auth_dbmpwfile;
+ char *auth_dbmgrpfile;
+ char *auth_dbmtype;
+ int auth_dbmauthoritative;
+} dbm_auth_config_rec;
+
+static void *create_dbm_auth_dir_config(apr_pool_t *p, char *d)
+{
+ dbm_auth_config_rec *conf = apr_palloc(p, sizeof(*conf));
+
+ conf->auth_dbmpwfile = NULL;
+ conf->auth_dbmgrpfile = NULL;
+ conf->auth_dbmtype = "default";
+ conf->auth_dbmauthoritative = 1; /* fortress is secure by default */
+
+ return conf;
+}
+
+static const char *set_dbm_slot(cmd_parms *cmd, void *offset,
+ const char *f, const char *t)
+{
+ if (!t || strcmp(t, "dbm"))
+ return DECLINE_CMD;
+
+ return ap_set_file_slot(cmd, offset, f);
+}
+
+static const char *set_dbm_type(cmd_parms *cmd,
+ void *dir_config,
+ const char *arg)
+{
+ dbm_auth_config_rec *conf = dir_config;
+
+ conf->auth_dbmtype = apr_pstrdup(cmd->pool, arg);
+ return NULL;
+}
+
+static const command_rec dbm_auth_cmds[] =
+{
+ AP_INIT_TAKE1("AuthDBMUserFile", ap_set_file_slot,
+ (void *)APR_OFFSETOF(dbm_auth_config_rec, auth_dbmpwfile),
+ OR_AUTHCFG, "dbm database file containing user IDs and passwords"),
+ AP_INIT_TAKE1("AuthDBMGroupFile", ap_set_file_slot,
+ (void *)APR_OFFSETOF(dbm_auth_config_rec, auth_dbmgrpfile),
+ OR_AUTHCFG, "dbm database file containing group names and member user IDs"),
+ AP_INIT_TAKE12("AuthUserFile", set_dbm_slot,
+ (void *)APR_OFFSETOF(dbm_auth_config_rec, auth_dbmpwfile),
+ OR_AUTHCFG, NULL),
+ AP_INIT_TAKE12("AuthGroupFile", set_dbm_slot,
+ (void *)APR_OFFSETOF(dbm_auth_config_rec, auth_dbmgrpfile),
+ OR_AUTHCFG, NULL),
+ AP_INIT_TAKE1("AuthDBMType", set_dbm_type,
+ NULL,
+ OR_AUTHCFG, "what type of DBM file the user file is"),
+ AP_INIT_FLAG("AuthDBMAuthoritative", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(dbm_auth_config_rec, auth_dbmauthoritative),
+ OR_AUTHCFG, "Set to 'no' to allow access control to be passed along to lower modules, if the UserID is not known in this module"),
+ {NULL}
+};
+
+module AP_MODULE_DECLARE_DATA auth_dbm_module;
+
+static char *get_dbm_pw(request_rec *r,
+ char *user,
+ char *auth_dbmpwfile,
+ char *dbtype)
+{
+ apr_dbm_t *f;
+ apr_datum_t d, q;
+ char *pw = NULL;
+ apr_status_t retval;
+ q.dptr = user;
+#ifndef NETSCAPE_DBM_COMPAT
+ q.dsize = strlen(q.dptr);
+#else
+ q.dsize = strlen(q.dptr) + 1;
+#endif
+
+ retval = apr_dbm_open_ex(&f, dbtype, auth_dbmpwfile, APR_DBM_READONLY,
+ APR_OS_DEFAULT, r->pool);
+ if (retval != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, retval, r,
+ "could not open dbm (type %s) auth file: %s", dbtype,
+ auth_dbmpwfile);
+ return NULL;
+ }
+ if (apr_dbm_fetch(f, q, &d) == APR_SUCCESS && d.dptr) {
+ pw = apr_palloc(r->pool, d.dsize + 1);
+ strncpy(pw, d.dptr, d.dsize);
+ pw[d.dsize] = '\0'; /* Terminate the string */
+ }
+
+ apr_dbm_close(f);
+ return pw;
+}
+
+/* We do something strange with the group file. If the group file
+ * contains any : we assume the format is
+ * key=username value=":"groupname [":"anything here is ignored]
+ * otherwise we now (0.8.14+) assume that the format is
+ * key=username value=groupname
+ * The first allows the password and group files to be the same
+ * physical DBM file; key=username value=password":"groupname[":"anything]
+ *
+ * mark@telescope.org, 22Sep95
+ */
+
+static char *get_dbm_grp(request_rec *r, char *user, char *auth_dbmgrpfile,
+ char *dbtype)
+{
+ char *grp_data = get_dbm_pw(r, user, auth_dbmgrpfile,dbtype);
+ char *grp_colon;
+ char *grp_colon2;
+
+ if (grp_data == NULL)
+ return NULL;
+
+ if ((grp_colon = strchr(grp_data, ':')) != NULL) {
+ grp_colon2 = strchr(++grp_colon, ':');
+ if (grp_colon2)
+ *grp_colon2 = '\0';
+ return grp_colon;
+ }
+ return grp_data;
+}
+
+static int dbm_authenticate_basic_user(request_rec *r)
+{
+ dbm_auth_config_rec *conf = ap_get_module_config(r->per_dir_config,
+ &auth_dbm_module);
+ const char *sent_pw;
+ char *real_pw, *colon_pw;
+ apr_status_t invalid_pw;
+ int res;
+
+ if ((res = ap_get_basic_auth_pw(r, &sent_pw)))
+ return res;
+
+ if (!conf->auth_dbmpwfile)
+ return DECLINED;
+
+ if (!(real_pw = get_dbm_pw(r, r->user, conf->auth_dbmpwfile,
+ conf->auth_dbmtype))) {
+ if (!(conf->auth_dbmauthoritative))
+ return DECLINED;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "DBM user %s not found: %s", r->user, r->filename);
+ ap_note_basic_auth_failure(r);
+ return HTTP_UNAUTHORIZED;
+ }
+ /* Password is up to first : if exists */
+ colon_pw = strchr(real_pw, ':');
+ if (colon_pw) {
+ *colon_pw = '\0';
+ }
+ invalid_pw = apr_password_validate(sent_pw, real_pw);
+ if (invalid_pw != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "DBM user %s: authentication failure for \"%s\": "
+ "Password Mismatch",
+ r->user, r->uri);
+ ap_note_basic_auth_failure(r);
+ return HTTP_UNAUTHORIZED;
+ }
+ return OK;
+}
+
+/* Checking ID */
+
+static int dbm_check_auth(request_rec *r)
+{
+ dbm_auth_config_rec *conf = ap_get_module_config(r->per_dir_config,
+ &auth_dbm_module);
+ char *user = r->user;
+ int m = r->method_number;
+
+ const apr_array_header_t *reqs_arr = ap_requires(r);
+ require_line *reqs = reqs_arr ? (require_line *) reqs_arr->elts : NULL;
+
+ register int x;
+ const char *t;
+ char *w;
+
+ if (!conf->auth_dbmgrpfile)
+ return DECLINED;
+ if (!reqs_arr)
+ return DECLINED;
+
+ for (x = 0; x < reqs_arr->nelts; x++) {
+
+ if (!(reqs[x].method_mask & (AP_METHOD_BIT << m)))
+ continue;
+
+ t = reqs[x].requirement;
+ w = ap_getword_white(r->pool, &t);
+
+ if (!strcmp(w, "group") && conf->auth_dbmgrpfile) {
+ const char *orig_groups, *groups;
+ char *v;
+
+ if (!(groups = get_dbm_grp(r, user, conf->auth_dbmgrpfile,
+ conf->auth_dbmtype))) {
+ if (!(conf->auth_dbmauthoritative))
+ return DECLINED;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "user %s not in DBM group file %s: %s",
+ user, conf->auth_dbmgrpfile, r->filename);
+ ap_note_basic_auth_failure(r);
+ return HTTP_UNAUTHORIZED;
+ }
+ orig_groups = groups;
+ while (t[0]) {
+ w = ap_getword_white(r->pool, &t);
+ groups = orig_groups;
+ while (groups[0]) {
+ v = ap_getword(r->pool, &groups, ',');
+ if (!strcmp(v, w))
+ return OK;
+ }
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "user %s not in right group: %s",
+ user, r->filename);
+ ap_note_basic_auth_failure(r);
+ return HTTP_UNAUTHORIZED;
+ }
+ }
+
+ return DECLINED;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_check_user_id(dbm_authenticate_basic_user, NULL, NULL,
+ APR_HOOK_MIDDLE);
+ ap_hook_auth_checker(dbm_check_auth, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA auth_dbm_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_dbm_auth_dir_config, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ dbm_auth_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_dbm.dsp b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_dbm.dsp
new file mode 100644
index 00000000..d55e0c9b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_dbm.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_auth_dbm" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_auth_dbm - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_auth_dbm.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_auth_dbm.mak" CFG="mod_auth_dbm - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_auth_dbm - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_auth_dbm - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_auth_dbm - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "AP_AUTH_DBM_USE_APR" /Fd"Release\mod_auth_dbm_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_auth_dbm.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_dbm.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_auth_dbm.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_dbm.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_auth_dbm - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "AP_AUTH_DBM_USE_APR" /Fd"Debug\mod_auth_dbm_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_auth_dbm.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_dbm.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_auth_dbm.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_dbm.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_auth_dbm - Win32 Release"
+# Name "mod_auth_dbm - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_auth_dbm.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_auth_dbm.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_auth_dbm - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_auth_dbm.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_auth_dbm.so "auth_dbm_module for Apache" ../../include/ap_release.h > .\mod_auth_dbm.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_auth_dbm - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_auth_dbm.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_auth_dbm.so "auth_dbm_module for Apache" ../../include/ap_release.h > .\mod_auth_dbm.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_dbm.exp b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_dbm.exp
new file mode 100644
index 00000000..7038e804
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_dbm.exp
@@ -0,0 +1 @@
+auth_dbm_module
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_digest.c b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_digest.c
new file mode 100644
index 00000000..9f4c655a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_digest.c
@@ -0,0 +1,2108 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_auth_digest: MD5 digest authentication
+ *
+ * Originally by Alexei Kosut <akosut@nueva.pvt.k12.ca.us>
+ * Updated to RFC-2617 by Ronald Tschalär <ronald@innovation.ch>
+ * based on mod_auth, by Rob McCool and Robert S. Thau
+ *
+ * This module an updated version of modules/standard/mod_digest.c
+ * It is still fairly new and problems may turn up - submit problem
+ * reports to the Apache bug-database, or send them directly to me
+ * at ronald@innovation.ch.
+ *
+ * Requires either /dev/random (or equivalent) or the truerand library,
+ * available for instance from
+ * ftp://research.att.com/dist/mab/librand.shar
+ *
+ * Open Issues:
+ * - qop=auth-int (when streams and trailer support available)
+ * - nonce-format configurability
+ * - Proxy-Authorization-Info header is set by this module, but is
+ * currently ignored by mod_proxy (needs patch to mod_proxy)
+ * - generating the secret takes a while (~ 8 seconds) if using the
+ * truerand library
+ * - The source of the secret should be run-time directive (with server
+ * scope: RSRC_CONF). However, that could be tricky when trying to
+ * choose truerand vs. file...
+ * - shared-mem not completely tested yet. Seems to work ok for me,
+ * but... (definitely won't work on Windoze)
+ * - Sharing a realm among multiple servers has following problems:
+ * o Server name and port can't be included in nonce-hash
+ * (we need two nonce formats, which must be configured explicitly)
+ * o Nonce-count check can't be for equal, or then nonce-count checking
+ * must be disabled. What we could do is the following:
+ * (expected < received) ? set expected = received : issue error
+ * The only problem is that it allows replay attacks when somebody
+ * captures a packet sent to one server and sends it to another
+ * one. Should we add "AuthDigestNcCheck Strict"?
+ * - expired nonces give amaya fits.
+ */
+
+#include "apr_sha1.h"
+#include "apr_base64.h"
+#include "apr_lib.h"
+#include "apr_time.h"
+#include "apr_errno.h"
+#include "apr_global_mutex.h"
+#include "apr_strings.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_request.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#include "apr_uri.h"
+#include "util_md5.h"
+#include "apr_shm.h"
+#include "apr_rmm.h"
+
+/* Disable shmem until pools/init gets sorted out
+ * remove following two lines when fixed
+ */
+#undef APR_HAS_SHARED_MEMORY
+#define APR_HAS_SHARED_MEMORY 0
+
+/* struct to hold the configuration info */
+
+typedef struct digest_config_struct {
+ const char *dir_name;
+ const char *pwfile;
+ const char *grpfile;
+ const char *realm;
+ char **qop_list;
+ apr_sha1_ctx_t nonce_ctx;
+ apr_time_t nonce_lifetime;
+ const char *nonce_format;
+ int check_nc;
+ const char *algorithm;
+ char *uri_list;
+ const char *ha1;
+} digest_config_rec;
+
+
+#define DFLT_ALGORITHM "MD5"
+
+#define DFLT_NONCE_LIFE apr_time_from_sec(300)
+#define NEXTNONCE_DELTA apr_time_from_sec(30)
+
+
+#define NONCE_TIME_LEN (((sizeof(apr_time_t)+2)/3)*4)
+#define NONCE_HASH_LEN (2*APR_SHA1_DIGESTSIZE)
+#define NONCE_LEN (int )(NONCE_TIME_LEN + NONCE_HASH_LEN)
+
+#define SECRET_LEN 20
+
+
+/* client list definitions */
+
+typedef struct hash_entry {
+ unsigned long key; /* the key for this entry */
+ struct hash_entry *next; /* next entry in the bucket */
+ unsigned long nonce_count; /* for nonce-count checking */
+ char ha1[2*MD5_DIGESTSIZE+1]; /* for algorithm=MD5-sess */
+ char last_nonce[NONCE_LEN+1]; /* for one-time nonce's */
+} client_entry;
+
+static struct hash_table {
+ client_entry **table;
+ unsigned long tbl_len;
+ unsigned long num_entries;
+ unsigned long num_created;
+ unsigned long num_removed;
+ unsigned long num_renewed;
+} *client_list;
+
+
+/* struct to hold a parsed Authorization header */
+
+enum hdr_sts { NO_HEADER, NOT_DIGEST, INVALID, VALID };
+
+typedef struct digest_header_struct {
+ const char *scheme;
+ const char *realm;
+ const char *username;
+ char *nonce;
+ const char *uri;
+ const char *method;
+ const char *digest;
+ const char *algorithm;
+ const char *cnonce;
+ const char *opaque;
+ unsigned long opaque_num;
+ const char *message_qop;
+ const char *nonce_count;
+ /* the following fields are not (directly) from the header */
+ apr_time_t nonce_time;
+ enum hdr_sts auth_hdr_sts;
+ const char *raw_request_uri;
+ apr_uri_t *psd_request_uri;
+ int needed_auth;
+ client_entry *client;
+} digest_header_rec;
+
+
+/* (mostly) nonce stuff */
+
+typedef union time_union {
+ apr_time_t time;
+ unsigned char arr[sizeof(apr_time_t)];
+} time_rec;
+
+static unsigned char secret[SECRET_LEN];
+
+/* client-list, opaque, and one-time-nonce stuff */
+
+static apr_shm_t *client_shm = NULL;
+static apr_rmm_t *client_rmm = NULL;
+static unsigned long *opaque_cntr;
+static apr_time_t *otn_counter; /* one-time-nonce counter */
+static apr_global_mutex_t *client_lock = NULL;
+static apr_global_mutex_t *opaque_lock = NULL;
+static char client_lock_name[L_tmpnam];
+static char opaque_lock_name[L_tmpnam];
+
+#define DEF_SHMEM_SIZE 1000L /* ~ 12 entries */
+#define DEF_NUM_BUCKETS 15L
+#define HASH_DEPTH 5
+
+static long shmem_size = DEF_SHMEM_SIZE;
+static long num_buckets = DEF_NUM_BUCKETS;
+
+
+module AP_MODULE_DECLARE_DATA auth_digest_module;
+
+/*
+ * initialization code
+ */
+
+static apr_status_t cleanup_tables(void *not_used)
+{
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "Digest: cleaning up shared memory");
+ fflush(stderr);
+
+ if (client_shm) {
+ apr_shm_destroy(client_shm);
+ client_shm = NULL;
+ }
+
+ if (client_lock) {
+ apr_global_mutex_destroy(client_lock);
+ client_lock = NULL;
+ }
+
+ if (opaque_lock) {
+ apr_global_mutex_destroy(opaque_lock);
+ opaque_lock = NULL;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t initialize_secret(server_rec *s)
+{
+ apr_status_t status;
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "Digest: generating secret for digest authentication ...");
+
+#if APR_HAS_RANDOM
+ status = apr_generate_random_bytes(secret, sizeof(secret));
+#else
+#error APR random number support is missing; you probably need to install the truerand library.
+#endif
+
+ if (status != APR_SUCCESS) {
+ char buf[120];
+ ap_log_error(APLOG_MARK, APLOG_CRIT, status, s,
+ "Digest: error generating secret: %s",
+ apr_strerror(status, buf, sizeof(buf)));
+ return status;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, "Digest: done");
+
+ return APR_SUCCESS;
+}
+
+static void log_error_and_cleanup(char *msg, apr_status_t sts, server_rec *s)
+{
+ ap_log_error(APLOG_MARK, APLOG_ERR, sts, s,
+ "Digest: %s - all nonce-count checking, one-time nonces, and "
+ "MD5-sess algorithm disabled", msg);
+
+ cleanup_tables(NULL);
+}
+
+#if APR_HAS_SHARED_MEMORY
+
+static void initialize_tables(server_rec *s, apr_pool_t *ctx)
+{
+ unsigned long idx;
+ apr_status_t sts;
+
+ /* set up client list */
+
+ sts = apr_shm_create(&client_shm, shmem_size, tmpnam(NULL), ctx);
+ if (sts != APR_SUCCESS) {
+ log_error_and_cleanup("failed to create shared memory segments", sts, s);
+ return;
+ }
+
+ client_list = apr_rmm_malloc(client_rmm, sizeof(*client_list) +
+ sizeof(client_entry*)*num_buckets);
+ if (!client_list) {
+ log_error_and_cleanup("failed to allocate shared memory", -1, s);
+ return;
+ }
+ client_list->table = (client_entry**) (client_list + 1);
+ for (idx = 0; idx < num_buckets; idx++) {
+ client_list->table[idx] = NULL;
+ }
+ client_list->tbl_len = num_buckets;
+ client_list->num_entries = 0;
+
+ tmpnam(client_lock_name);
+ /* FIXME: get the client_lock_name from a directive so we're portable
+ * to non-process-inheriting operating systems, like Win32. */
+ sts = apr_global_mutex_create(&client_lock, client_lock_name,
+ APR_LOCK_DEFAULT, ctx);
+ if (sts != APR_SUCCESS) {
+ log_error_and_cleanup("failed to create lock (client_lock)", sts, s);
+ return;
+ }
+
+
+ /* setup opaque */
+
+ opaque_cntr = apr_rmm_malloc(client_rmm, sizeof(*opaque_cntr));
+ if (opaque_cntr == NULL) {
+ log_error_and_cleanup("failed to allocate shared memory", -1, s);
+ return;
+ }
+ *opaque_cntr = 1UL;
+
+ tmpnam(opaque_lock_name);
+ /* FIXME: get the opaque_lock_name from a directive so we're portable
+ * to non-process-inheriting operating systems, like Win32. */
+ sts = apr_global_mutex_create(&opaque_lock, opaque_lock_name,
+ APR_LOCK_DEFAULT, ctx);
+ if (sts != APR_SUCCESS) {
+ log_error_and_cleanup("failed to create lock (opaque_lock)", sts, s);
+ return;
+ }
+
+
+ /* setup one-time-nonce counter */
+
+ otn_counter = apr_rmm_malloc(client_rmm, sizeof(*otn_counter));
+ if (otn_counter == NULL) {
+ log_error_and_cleanup("failed to allocate shared memory", -1, s);
+ return;
+ }
+ *otn_counter = 0;
+ /* no lock here */
+
+
+ /* success */
+ return;
+}
+
+#endif /* APR_HAS_SHARED_MEMORY */
+
+
+static int initialize_module(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ void *data;
+ const char *userdata_key = "auth_digest_init";
+
+ /* initialize_module() will be called twice, and if it's a DSO
+ * then all static data from the first call will be lost. Only
+ * set up our static data on the second call. */
+ apr_pool_userdata_get(&data, userdata_key, s->process->pool);
+ if (!data) {
+ apr_pool_userdata_set((const void *)1, userdata_key,
+ apr_pool_cleanup_null, s->process->pool);
+ return OK;
+ }
+ if (initialize_secret(s) != APR_SUCCESS) {
+ return !OK;
+ }
+
+#if APR_HAS_SHARED_MEMORY
+ /* Note: this stuff is currently fixed for the lifetime of the server,
+ * i.e. even across restarts. This means that A) any shmem-size
+ * configuration changes are ignored, and B) certain optimizations,
+ * such as only allocating the smallest necessary entry for each
+ * client, can't be done. However, the alternative is a nightmare:
+ * we can't call apr_shm_destroy on a graceful restart because there
+ * will be children using the tables, and we also don't know when the
+ * last child dies. Therefore we can never clean up the old stuff,
+ * creating a creeping memory leak.
+ */
+ initialize_tables(s, p);
+ apr_pool_cleanup_register(p, NULL, cleanup_tables, apr_pool_cleanup_null);
+#endif /* APR_HAS_SHARED_MEMORY */
+ return OK;
+}
+
+static void initialize_child(apr_pool_t *p, server_rec *s)
+{
+ apr_status_t sts;
+
+ if (!client_shm) {
+ return;
+ }
+
+ /* FIXME: get the client_lock_name from a directive so we're portable
+ * to non-process-inheriting operating systems, like Win32. */
+ sts = apr_global_mutex_child_init(&client_lock, client_lock_name, p);
+ if (sts != APR_SUCCESS) {
+ log_error_and_cleanup("failed to create lock (client_lock)", sts, s);
+ return;
+ }
+ /* FIXME: get the opaque_lock_name from a directive so we're portable
+ * to non-process-inheriting operating systems, like Win32. */
+ sts = apr_global_mutex_child_init(&opaque_lock, opaque_lock_name, p);
+ if (sts != APR_SUCCESS) {
+ log_error_and_cleanup("failed to create lock (opaque_lock)", sts, s);
+ return;
+ }
+}
+
+/*
+ * configuration code
+ */
+
+static void *create_digest_dir_config(apr_pool_t *p, char *dir)
+{
+ digest_config_rec *conf;
+
+ if (dir == NULL) {
+ return NULL;
+ }
+
+ conf = (digest_config_rec *) apr_pcalloc(p, sizeof(digest_config_rec));
+ if (conf) {
+ conf->qop_list = apr_palloc(p, sizeof(char*));
+ conf->qop_list[0] = NULL;
+ conf->nonce_lifetime = DFLT_NONCE_LIFE;
+ conf->dir_name = apr_pstrdup(p, dir);
+ conf->algorithm = DFLT_ALGORITHM;
+ }
+
+ return conf;
+}
+
+static const char *set_realm(cmd_parms *cmd, void *config, const char *realm)
+{
+ digest_config_rec *conf = (digest_config_rec *) config;
+
+ /* The core already handles the realm, but it's just too convenient to
+ * grab it ourselves too and cache some setups. However, we need to
+ * let the core get at it too, which is why we decline at the end -
+ * this relies on the fact that http_core is last in the list.
+ */
+ conf->realm = realm;
+
+ /* we precompute the part of the nonce hash that is constant (well,
+ * the host:port would be too, but that varies for .htaccess files
+ * and directives outside a virtual host section)
+ */
+ apr_sha1_init(&conf->nonce_ctx);
+ apr_sha1_update_binary(&conf->nonce_ctx, secret, sizeof(secret));
+ apr_sha1_update_binary(&conf->nonce_ctx, (const unsigned char *) realm,
+ strlen(realm));
+
+ return DECLINE_CMD;
+}
+
+static const char *set_digest_file(cmd_parms *cmd, void *config,
+ const char *file)
+{
+ ((digest_config_rec *) config)->pwfile = file;
+ return NULL;
+}
+
+static const char *set_group_file(cmd_parms *cmd, void *config,
+ const char *file)
+{
+ ((digest_config_rec *) config)->grpfile = file;
+ return NULL;
+}
+
+static const char *set_qop(cmd_parms *cmd, void *config, const char *op)
+{
+ digest_config_rec *conf = (digest_config_rec *) config;
+ char **tmp;
+ int cnt;
+
+ if (!strcasecmp(op, "none")) {
+ if (conf->qop_list[0] == NULL) {
+ conf->qop_list = apr_palloc(cmd->pool, 2 * sizeof(char*));
+ conf->qop_list[1] = NULL;
+ }
+ conf->qop_list[0] = "none";
+ return NULL;
+ }
+
+ if (!strcasecmp(op, "auth-int")) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server,
+ "Digest: WARNING: qop `auth-int' currently only works "
+ "correctly for responses with no entity");
+ }
+ else if (strcasecmp(op, "auth")) {
+ return apr_pstrcat(cmd->pool, "Unrecognized qop: ", op, NULL);
+ }
+
+ for (cnt = 0; conf->qop_list[cnt] != NULL; cnt++)
+ ;
+
+ tmp = apr_palloc(cmd->pool, (cnt + 2) * sizeof(char*));
+ memcpy(tmp, conf->qop_list, cnt*sizeof(char*));
+ tmp[cnt] = apr_pstrdup(cmd->pool, op);
+ tmp[cnt+1] = NULL;
+ conf->qop_list = tmp;
+
+ return NULL;
+}
+
+static const char *set_nonce_lifetime(cmd_parms *cmd, void *config,
+ const char *t)
+{
+ char *endptr;
+ long lifetime;
+
+ lifetime = strtol(t, &endptr, 10);
+ if (endptr < (t+strlen(t)) && !apr_isspace(*endptr)) {
+ return apr_pstrcat(cmd->pool,
+ "Invalid time in AuthDigestNonceLifetime: ",
+ t, NULL);
+ }
+
+ ((digest_config_rec *) config)->nonce_lifetime = apr_time_from_sec(lifetime);
+ return NULL;
+}
+
+static const char *set_nonce_format(cmd_parms *cmd, void *config,
+ const char *fmt)
+{
+ ((digest_config_rec *) config)->nonce_format = fmt;
+ return "AuthDigestNonceFormat is not implemented (yet)";
+}
+
+static const char *set_nc_check(cmd_parms *cmd, void *config, int flag)
+{
+ if (flag && !client_shm)
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0,
+ cmd->server, "Digest: WARNING: nonce-count checking "
+ "is not supported on platforms without shared-memory "
+ "support - disabling check");
+
+ ((digest_config_rec *) config)->check_nc = flag;
+ return NULL;
+}
+
+static const char *set_algorithm(cmd_parms *cmd, void *config, const char *alg)
+{
+ if (!strcasecmp(alg, "MD5-sess")) {
+ if (!client_shm) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0,
+ cmd->server, "Digest: WARNING: algorithm `MD5-sess' "
+ "is not supported on platforms without shared-memory "
+ "support - reverting to MD5");
+ alg = "MD5";
+ }
+ }
+ else if (strcasecmp(alg, "MD5")) {
+ return apr_pstrcat(cmd->pool, "Invalid algorithm in AuthDigestAlgorithm: ", alg, NULL);
+ }
+
+ ((digest_config_rec *) config)->algorithm = alg;
+ return NULL;
+}
+
+static const char *set_uri_list(cmd_parms *cmd, void *config, const char *uri)
+{
+ digest_config_rec *c = (digest_config_rec *) config;
+ if (c->uri_list) {
+ c->uri_list[strlen(c->uri_list)-1] = '\0';
+ c->uri_list = apr_pstrcat(cmd->pool, c->uri_list, " ", uri, "\"", NULL);
+ }
+ else {
+ c->uri_list = apr_pstrcat(cmd->pool, ", domain=\"", uri, "\"", NULL);
+ }
+ return NULL;
+}
+
+static const char *set_shmem_size(cmd_parms *cmd, void *config,
+ const char *size_str)
+{
+ char *endptr;
+ long size, min;
+
+ size = strtol(size_str, &endptr, 10);
+ while (apr_isspace(*endptr)) endptr++;
+ if (*endptr == '\0' || *endptr == 'b' || *endptr == 'B') {
+ ;
+ }
+ else if (*endptr == 'k' || *endptr == 'K') {
+ size *= 1024;
+ }
+ else if (*endptr == 'm' || *endptr == 'M') {
+ size *= 1048576;
+ }
+ else {
+ return apr_pstrcat(cmd->pool, "Invalid size in AuthDigestShmemSize: ",
+ size_str, NULL);
+ }
+
+ min = sizeof(*client_list) + sizeof(client_entry*) + sizeof(client_entry);
+ if (size < min) {
+ return apr_psprintf(cmd->pool, "size in AuthDigestShmemSize too small: "
+ "%ld < %ld", size, min);
+ }
+
+ shmem_size = size;
+ num_buckets = (size - sizeof(*client_list)) /
+ (sizeof(client_entry*) + HASH_DEPTH * sizeof(client_entry));
+ if (num_buckets == 0) {
+ num_buckets = 1;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server,
+ "Digest: Set shmem-size: %ld, num-buckets: %ld", shmem_size,
+ num_buckets);
+
+ return NULL;
+}
+
+static const command_rec digest_cmds[] =
+{
+ AP_INIT_TAKE1("AuthName", set_realm, NULL, OR_AUTHCFG,
+ "The authentication realm (e.g. \"Members Only\")"),
+ AP_INIT_TAKE1("AuthDigestFile", set_digest_file, NULL, OR_AUTHCFG,
+ "The name of the file containing the usernames and password hashes"),
+ AP_INIT_TAKE1("AuthDigestGroupFile", set_group_file, NULL, OR_AUTHCFG,
+ "The name of the file containing the group names and members"),
+ AP_INIT_ITERATE("AuthDigestQop", set_qop, NULL, OR_AUTHCFG,
+ "A list of quality-of-protection options"),
+ AP_INIT_TAKE1("AuthDigestNonceLifetime", set_nonce_lifetime, NULL, OR_AUTHCFG,
+ "Maximum lifetime of the server nonce (seconds)"),
+ AP_INIT_TAKE1("AuthDigestNonceFormat", set_nonce_format, NULL, OR_AUTHCFG,
+ "The format to use when generating the server nonce"),
+ AP_INIT_FLAG("AuthDigestNcCheck", set_nc_check, NULL, OR_AUTHCFG,
+ "Whether or not to check the nonce-count sent by the client"),
+ AP_INIT_TAKE1("AuthDigestAlgorithm", set_algorithm, NULL, OR_AUTHCFG,
+ "The algorithm used for the hash calculation"),
+ AP_INIT_ITERATE("AuthDigestDomain", set_uri_list, NULL, OR_AUTHCFG,
+ "A list of URI's which belong to the same protection space as the current URI"),
+ AP_INIT_TAKE1("AuthDigestShmemSize", set_shmem_size, NULL, RSRC_CONF,
+ "The amount of shared memory to allocate for keeping track of clients"),
+ {NULL}
+};
+
+
+/*
+ * client list code
+ *
+ * Each client is assigned a number, which is transfered in the opaque
+ * field of the WWW-Authenticate and Authorization headers. The number
+ * is just a simple counter which is incremented for each new client.
+ * Clients can't forge this number because it is hashed up into the
+ * server nonce, and that is checked.
+ *
+ * The clients are kept in a simple hash table, which consists of an
+ * array of client_entry's, each with a linked list of entries hanging
+ * off it. The client's number modulo the size of the array gives the
+ * bucket number.
+ *
+ * The clients are garbage collected whenever a new client is allocated
+ * but there is not enough space left in the shared memory segment. A
+ * simple semi-LRU is used for this: whenever a client entry is accessed
+ * it is moved to the beginning of the linked list in its bucket (this
+ * also makes for faster lookups for current clients). The garbage
+ * collecter then just removes the oldest entry (i.e. the one at the
+ * end of the list) in each bucket.
+ *
+ * The main advantages of the above scheme are that it's easy to implement
+ * and it keeps the hash table evenly balanced (i.e. same number of entries
+ * in each bucket). The major disadvantage is that you may be throwing
+ * entries out which are in active use. This is not tragic, as these
+ * clients will just be sent a new client id (opaque field) and nonce
+ * with a stale=true (i.e. it will just look like the nonce expired,
+ * thereby forcing an extra round trip). If the shared memory segment
+ * has enough headroom over the current client set size then this should
+ * not occur too often.
+ *
+ * To help tune the size of the shared memory segment (and see if the
+ * above algorithm is really sufficient) a set of counters is kept
+ * indicating the number of clients held, the number of garbage collected
+ * clients, and the number of erroneously purged clients. These are printed
+ * out at each garbage collection run. Note that access to the counters is
+ * not synchronized because they are just indicaters, and whether they are
+ * off by a few doesn't matter; and for the same reason no attempt is made
+ * to guarantee the num_renewed is correct in the face of clients spoofing
+ * the opaque field.
+ */
+
+/*
+ * Get the client given its client number (the key). Returns the entry,
+ * or NULL if it's not found.
+ *
+ * Access to the list itself is synchronized via locks. However, access
+ * to the entry returned by get_client() is NOT synchronized. This means
+ * that there are potentially problems if a client uses multiple,
+ * simultaneous connections to access url's within the same protection
+ * space. However, these problems are not new: when using multiple
+ * connections you have no guarantee of the order the requests are
+ * processed anyway, so you have problems with the nonce-count and
+ * one-time nonces anyway.
+ */
+static client_entry *get_client(unsigned long key, const request_rec *r)
+{
+ int bucket;
+ client_entry *entry, *prev = NULL;
+
+
+ if (!key || !client_shm) return NULL;
+
+ bucket = key % client_list->tbl_len;
+ entry = client_list->table[bucket];
+
+ apr_global_mutex_lock(client_lock);
+
+ while (entry && key != entry->key) {
+ prev = entry;
+ entry = entry->next;
+ }
+
+ if (entry && prev) { /* move entry to front of list */
+ prev->next = entry->next;
+ entry->next = client_list->table[bucket];
+ client_list->table[bucket] = entry;
+ }
+
+ apr_global_mutex_unlock(client_lock);
+
+ if (entry) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "get_client(): client %lu found", key);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "get_client(): client %lu not found", key);
+ }
+
+ return entry;
+}
+
+
+/* A simple garbage-collecter to remove unused clients. It removes the
+ * last entry in each bucket and updates the counters. Returns the
+ * number of removed entries.
+ */
+static long gc(void)
+{
+ client_entry *entry, *prev;
+ unsigned long num_removed = 0, idx;
+
+ /* garbage collect all last entries */
+
+ for (idx = 0; idx < client_list->tbl_len; idx++) {
+ entry = client_list->table[idx];
+ prev = NULL;
+ while (entry->next) { /* find last entry */
+ prev = entry;
+ entry = entry->next;
+ }
+ if (prev) {
+ prev->next = NULL; /* cut list */
+ }
+ else {
+ client_list->table[idx] = NULL;
+ }
+ if (entry) { /* remove entry */
+ apr_rmm_free(client_rmm, (apr_rmm_off_t)entry);
+ num_removed++;
+ }
+ }
+
+ /* update counters and log */
+
+ client_list->num_entries -= num_removed;
+ client_list->num_removed += num_removed;
+
+ return num_removed;
+}
+
+
+/*
+ * Add a new client to the list. Returns the entry if successful, NULL
+ * otherwise. This triggers the garbage collection if memory is low.
+ */
+static client_entry *add_client(unsigned long key, client_entry *info,
+ server_rec *s)
+{
+ int bucket;
+ client_entry *entry;
+
+
+ if (!key || !client_shm) {
+ return NULL;
+ }
+
+ bucket = key % client_list->tbl_len;
+ entry = client_list->table[bucket];
+
+ apr_global_mutex_lock(client_lock);
+
+ /* try to allocate a new entry */
+
+ entry = (client_entry *)apr_rmm_malloc(client_rmm, sizeof(client_entry));
+ if (!entry) {
+ long num_removed = gc();
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Digest: gc'd %ld client entries. Total new clients: "
+ "%ld; Total removed clients: %ld; Total renewed clients: "
+ "%ld", num_removed,
+ client_list->num_created - client_list->num_renewed,
+ client_list->num_removed, client_list->num_renewed);
+ entry = (client_entry *)apr_rmm_malloc(client_rmm, sizeof(client_entry));
+ if (!entry) {
+ return NULL; /* give up */
+ }
+ }
+
+ /* now add the entry */
+
+ memcpy(entry, info, sizeof(client_entry));
+ entry->key = key;
+ entry->next = client_list->table[bucket];
+ client_list->table[bucket] = entry;
+ client_list->num_created++;
+ client_list->num_entries++;
+
+ apr_global_mutex_unlock(client_lock);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "allocated new client %lu", key);
+
+ return entry;
+}
+
+
+/*
+ * Authorization header parser code
+ */
+
+/* Parse the Authorization header, if it exists */
+static int get_digest_rec(request_rec *r, digest_header_rec *resp)
+{
+ const char *auth_line;
+ apr_size_t l;
+ int vk = 0, vv = 0;
+ char *key, *value;
+
+ auth_line = apr_table_get(r->headers_in,
+ (PROXYREQ_PROXY == r->proxyreq)
+ ? "Proxy-Authorization"
+ : "Authorization");
+ if (!auth_line) {
+ resp->auth_hdr_sts = NO_HEADER;
+ return !OK;
+ }
+
+ resp->scheme = ap_getword_white(r->pool, &auth_line);
+ if (strcasecmp(resp->scheme, "Digest")) {
+ resp->auth_hdr_sts = NOT_DIGEST;
+ return !OK;
+ }
+
+ l = strlen(auth_line);
+
+ key = apr_palloc(r->pool, l+1);
+ value = apr_palloc(r->pool, l+1);
+
+ while (auth_line[0] != '\0') {
+
+ /* find key */
+
+ while (apr_isspace(auth_line[0])) {
+ auth_line++;
+ }
+ vk = 0;
+ while (auth_line[0] != '=' && auth_line[0] != ','
+ && auth_line[0] != '\0' && !apr_isspace(auth_line[0])) {
+ key[vk++] = *auth_line++;
+ }
+ key[vk] = '\0';
+ while (apr_isspace(auth_line[0])) {
+ auth_line++;
+ }
+
+ /* find value */
+
+ if (auth_line[0] == '=') {
+ auth_line++;
+ while (apr_isspace(auth_line[0])) {
+ auth_line++;
+ }
+
+ vv = 0;
+ if (auth_line[0] == '\"') { /* quoted string */
+ auth_line++;
+ while (auth_line[0] != '\"' && auth_line[0] != '\0') {
+ if (auth_line[0] == '\\' && auth_line[1] != '\0') {
+ auth_line++; /* escaped char */
+ }
+ value[vv++] = *auth_line++;
+ }
+ if (auth_line[0] != '\0') {
+ auth_line++;
+ }
+ }
+ else { /* token */
+ while (auth_line[0] != ',' && auth_line[0] != '\0'
+ && !apr_isspace(auth_line[0])) {
+ value[vv++] = *auth_line++;
+ }
+ }
+ value[vv] = '\0';
+ }
+
+ while (auth_line[0] != ',' && auth_line[0] != '\0') {
+ auth_line++;
+ }
+ if (auth_line[0] != '\0') {
+ auth_line++;
+ }
+
+ if (!strcasecmp(key, "username"))
+ resp->username = apr_pstrdup(r->pool, value);
+ else if (!strcasecmp(key, "realm"))
+ resp->realm = apr_pstrdup(r->pool, value);
+ else if (!strcasecmp(key, "nonce"))
+ resp->nonce = apr_pstrdup(r->pool, value);
+ else if (!strcasecmp(key, "uri"))
+ resp->uri = apr_pstrdup(r->pool, value);
+ else if (!strcasecmp(key, "response"))
+ resp->digest = apr_pstrdup(r->pool, value);
+ else if (!strcasecmp(key, "algorithm"))
+ resp->algorithm = apr_pstrdup(r->pool, value);
+ else if (!strcasecmp(key, "cnonce"))
+ resp->cnonce = apr_pstrdup(r->pool, value);
+ else if (!strcasecmp(key, "opaque"))
+ resp->opaque = apr_pstrdup(r->pool, value);
+ else if (!strcasecmp(key, "qop"))
+ resp->message_qop = apr_pstrdup(r->pool, value);
+ else if (!strcasecmp(key, "nc"))
+ resp->nonce_count = apr_pstrdup(r->pool, value);
+ }
+
+ if (!resp->username || !resp->realm || !resp->nonce || !resp->uri
+ || !resp->digest
+ || (resp->message_qop && (!resp->cnonce || !resp->nonce_count))) {
+ resp->auth_hdr_sts = INVALID;
+ return !OK;
+ }
+
+ if (resp->opaque) {
+ resp->opaque_num = (unsigned long) strtol(resp->opaque, NULL, 16);
+ }
+
+ resp->auth_hdr_sts = VALID;
+ return OK;
+}
+
+
+/* Because the browser may preemptively send auth info, incrementing the
+ * nonce-count when it does, and because the client does not get notified
+ * if the URI didn't need authentication after all, we need to be sure to
+ * update the nonce-count each time we receive an Authorization header no
+ * matter what the final outcome of the request. Furthermore this is a
+ * convenient place to get the request-uri (before any subrequests etc
+ * are initiated) and to initialize the request_config.
+ *
+ * Note that this must be called after mod_proxy had its go so that
+ * r->proxyreq is set correctly.
+ */
+static int parse_hdr_and_update_nc(request_rec *r)
+{
+ digest_header_rec *resp;
+ int res;
+
+ if (!ap_is_initial_req(r)) {
+ return DECLINED;
+ }
+
+ resp = apr_pcalloc(r->pool, sizeof(digest_header_rec));
+ resp->raw_request_uri = r->unparsed_uri;
+ resp->psd_request_uri = &r->parsed_uri;
+ resp->needed_auth = 0;
+ resp->method = r->method;
+ ap_set_module_config(r->request_config, &auth_digest_module, resp);
+
+ res = get_digest_rec(r, resp);
+ resp->client = get_client(resp->opaque_num, r);
+ if (res == OK && resp->client) {
+ resp->client->nonce_count++;
+ }
+
+ return DECLINED;
+}
+
+
+/*
+ * Nonce generation code
+ */
+
+/* The hash part of the nonce is a SHA-1 hash of the time, realm, server host
+ * and port, opaque, and our secret.
+ */
+static void gen_nonce_hash(char *hash, const char *timestr, const char *opaque,
+ const server_rec *server,
+ const digest_config_rec *conf)
+{
+ const char *hex = "0123456789abcdef";
+ unsigned char sha1[APR_SHA1_DIGESTSIZE];
+ apr_sha1_ctx_t ctx;
+ int idx;
+
+ memcpy(&ctx, &conf->nonce_ctx, sizeof(ctx));
+ /*
+ apr_sha1_update_binary(&ctx, (const unsigned char *) server->server_hostname,
+ strlen(server->server_hostname));
+ apr_sha1_update_binary(&ctx, (const unsigned char *) &server->port,
+ sizeof(server->port));
+ */
+ apr_sha1_update_binary(&ctx, (const unsigned char *) timestr, strlen(timestr));
+ if (opaque) {
+ apr_sha1_update_binary(&ctx, (const unsigned char *) opaque,
+ strlen(opaque));
+ }
+ apr_sha1_final(sha1, &ctx);
+
+ for (idx=0; idx<APR_SHA1_DIGESTSIZE; idx++) {
+ *hash++ = hex[sha1[idx] >> 4];
+ *hash++ = hex[sha1[idx] & 0xF];
+ }
+
+ *hash++ = '\0';
+}
+
+
+/* The nonce has the format b64(time)+hash .
+ */
+static const char *gen_nonce(apr_pool_t *p, apr_time_t now, const char *opaque,
+ const server_rec *server,
+ const digest_config_rec *conf)
+{
+ char *nonce = apr_palloc(p, NONCE_LEN+1);
+ int len;
+ time_rec t;
+
+ if (conf->nonce_lifetime != 0) {
+ t.time = now;
+ }
+ else if (otn_counter) {
+ /* this counter is not synch'd, because it doesn't really matter
+ * if it counts exactly.
+ */
+ t.time = (*otn_counter)++;
+ }
+ else {
+ /* XXX: WHAT IS THIS CONSTANT? */
+ t.time = 42;
+ }
+ len = apr_base64_encode_binary(nonce, t.arr, sizeof(t.arr));
+ gen_nonce_hash(nonce+NONCE_TIME_LEN, nonce, opaque, server, conf);
+
+ return nonce;
+}
+
+
+/*
+ * Opaque and hash-table management
+ */
+
+/*
+ * Generate a new client entry, add it to the list, and return the
+ * entry. Returns NULL if failed.
+ */
+static client_entry *gen_client(const request_rec *r)
+{
+ unsigned long op;
+ client_entry new_entry = { 0, NULL, 0, "", "" }, *entry;
+
+ if (!opaque_cntr) {
+ return NULL;
+ }
+
+ apr_global_mutex_lock(opaque_lock);
+ op = (*opaque_cntr)++;
+ apr_global_mutex_lock(opaque_lock);
+
+ if (!(entry = add_client(op, &new_entry, r->server))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: failed to allocate client entry - ignoring "
+ "client");
+ return NULL;
+ }
+
+ return entry;
+}
+
+
+/*
+ * MD5-sess code.
+ *
+ * If you want to use algorithm=MD5-sess you must write get_userpw_hash()
+ * yourself (see below). The dummy provided here just uses the hash from
+ * the auth-file, i.e. it is only useful for testing client implementations
+ * of MD5-sess .
+ */
+
+/*
+ * get_userpw_hash() will be called each time a new session needs to be
+ * generated and is expected to return the equivalent of
+ *
+ * h_urp = ap_md5(r->pool,
+ * apr_pstrcat(r->pool, username, ":", ap_auth_name(r), ":", passwd))
+ * ap_md5(r->pool,
+ * (unsigned char *) apr_pstrcat(r->pool, h_urp, ":", resp->nonce, ":",
+ * resp->cnonce, NULL));
+ *
+ * or put differently, it must return
+ *
+ * MD5(MD5(username ":" realm ":" password) ":" nonce ":" cnonce)
+ *
+ * If something goes wrong, the failure must be logged and NULL returned.
+ *
+ * You must implement this yourself, which will probably consist of code
+ * contacting the password server with the necessary information (typically
+ * the username, realm, nonce, and cnonce) and receiving the hash from it.
+ *
+ * TBD: This function should probably be in a seperate source file so that
+ * people need not modify mod_auth_digest.c each time they install a new
+ * version of apache.
+ */
+static const char *get_userpw_hash(const request_rec *r,
+ const digest_header_rec *resp,
+ const digest_config_rec *conf)
+{
+ return ap_md5(r->pool,
+ (unsigned char *) apr_pstrcat(r->pool, conf->ha1, ":", resp->nonce,
+ ":", resp->cnonce, NULL));
+}
+
+
+/* Retrieve current session H(A1). If there is none and "generate" is
+ * true then a new session for MD5-sess is generated and stored in the
+ * client struct; if generate is false, or a new session could not be
+ * generated then NULL is returned (in case of failure to generate the
+ * failure reason will have been logged already).
+ */
+static const char *get_session_HA1(const request_rec *r,
+ digest_header_rec *resp,
+ const digest_config_rec *conf,
+ int generate)
+{
+ const char *ha1 = NULL;
+
+ /* return the current sessions if there is one */
+ if (resp->opaque && resp->client && resp->client->ha1[0]) {
+ return resp->client->ha1;
+ }
+ else if (!generate) {
+ return NULL;
+ }
+
+ /* generate a new session */
+ if (!resp->client) {
+ resp->client = gen_client(r);
+ }
+ if (resp->client) {
+ ha1 = get_userpw_hash(r, resp, conf);
+ if (ha1) {
+ memcpy(resp->client->ha1, ha1, sizeof(resp->client->ha1));
+ }
+ }
+
+ return ha1;
+}
+
+
+static void clear_session(const digest_header_rec *resp)
+{
+ if (resp->client) {
+ resp->client->ha1[0] = '\0';
+ }
+}
+
+/*
+ * Authorization challenge generation code (for WWW-Authenticate)
+ */
+
+static const char *ltox(apr_pool_t *p, unsigned long num)
+{
+ if (num != 0) {
+ return apr_psprintf(p, "%lx", num);
+ }
+ else {
+ return "";
+ }
+}
+
+static void note_digest_auth_failure(request_rec *r,
+ const digest_config_rec *conf,
+ digest_header_rec *resp, int stale)
+{
+ const char *qop, *opaque, *opaque_param, *domain, *nonce;
+ int cnt;
+
+ /* Setup qop */
+
+ if (conf->qop_list[0] == NULL) {
+ qop = ", qop=\"auth\"";
+ }
+ else if (!strcasecmp(conf->qop_list[0], "none")) {
+ qop = "";
+ }
+ else {
+ qop = apr_pstrcat(r->pool, ", qop=\"", conf->qop_list[0], NULL);
+ for (cnt = 1; conf->qop_list[cnt] != NULL; cnt++) {
+ qop = apr_pstrcat(r->pool, qop, ",", conf->qop_list[cnt], NULL);
+ }
+ qop = apr_pstrcat(r->pool, qop, "\"", NULL);
+ }
+
+ /* Setup opaque */
+
+ if (resp->opaque == NULL) {
+ /* new client */
+ if ((conf->check_nc || conf->nonce_lifetime == 0
+ || !strcasecmp(conf->algorithm, "MD5-sess"))
+ && (resp->client = gen_client(r)) != NULL) {
+ opaque = ltox(r->pool, resp->client->key);
+ }
+ else {
+ opaque = ""; /* opaque not needed */
+ }
+ }
+ else if (resp->client == NULL) {
+ /* client info was gc'd */
+ resp->client = gen_client(r);
+ if (resp->client != NULL) {
+ opaque = ltox(r->pool, resp->client->key);
+ stale = 1;
+ client_list->num_renewed++;
+ }
+ else {
+ opaque = ""; /* ??? */
+ }
+ }
+ else {
+ opaque = resp->opaque;
+ /* we're generating a new nonce, so reset the nonce-count */
+ resp->client->nonce_count = 0;
+ }
+
+ if (opaque[0]) {
+ opaque_param = apr_pstrcat(r->pool, ", opaque=\"", opaque, "\"", NULL);
+ }
+ else {
+ opaque_param = NULL;
+ }
+
+ /* Setup nonce */
+
+ nonce = gen_nonce(r->pool, r->request_time, opaque, r->server, conf);
+ if (resp->client && conf->nonce_lifetime == 0) {
+ memcpy(resp->client->last_nonce, nonce, NONCE_LEN+1);
+ }
+
+ /* Setup MD5-sess stuff. Note that we just clear out the session
+ * info here, since we can't generate a new session until the request
+ * from the client comes in with the cnonce.
+ */
+
+ if (!strcasecmp(conf->algorithm, "MD5-sess")) {
+ clear_session(resp);
+ }
+
+ /* setup domain attribute. We want to send this attribute wherever
+ * possible so that the client won't send the Authorization header
+ * unneccessarily (it's usually > 200 bytes!).
+ */
+
+ /* don't send domain
+ * - for proxy requests
+ * - if it's no specified
+ */
+ if (r->proxyreq || !conf->uri_list) {
+ domain = NULL;
+ }
+ else {
+ domain = conf->uri_list;
+ }
+
+ apr_table_mergen(r->err_headers_out,
+ (PROXYREQ_PROXY == r->proxyreq)
+ ? "Proxy-Authenticate" : "WWW-Authenticate",
+ apr_psprintf(r->pool, "Digest realm=\"%s\", "
+ "nonce=\"%s\", algorithm=%s%s%s%s%s",
+ ap_auth_name(r), nonce, conf->algorithm,
+ opaque_param ? opaque_param : "",
+ domain ? domain : "",
+ stale ? ", stale=true" : "", qop));
+
+}
+
+
+/*
+ * Authorization header verification code
+ */
+
+static const char *get_hash(request_rec *r, const char *user,
+ const char *realm, const char *auth_pwfile)
+{
+ ap_configfile_t *f;
+ char l[MAX_STRING_LEN];
+ const char *rpw;
+ char *w, *x;
+ apr_status_t sts;
+
+ if ((sts = ap_pcfg_openfile(&f, r->pool, auth_pwfile)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, sts, r,
+ "Digest: Could not open password file: %s", auth_pwfile);
+ return NULL;
+ }
+ while (!(ap_cfg_getline(l, MAX_STRING_LEN, f))) {
+ if ((l[0] == '#') || (!l[0])) {
+ continue;
+ }
+ rpw = l;
+ w = ap_getword(r->pool, &rpw, ':');
+ x = ap_getword(r->pool, &rpw, ':');
+
+ if (x && w && !strcmp(user, w) && !strcmp(realm, x)) {
+ ap_cfg_closefile(f);
+ return apr_pstrdup(r->pool, rpw);
+ }
+ }
+ ap_cfg_closefile(f);
+ return NULL;
+}
+
+static int check_nc(const request_rec *r, const digest_header_rec *resp,
+ const digest_config_rec *conf)
+{
+ unsigned long nc;
+ const char *snc = resp->nonce_count;
+ char *endptr;
+
+ if (!conf->check_nc || !client_shm) {
+ return OK;
+ }
+
+ nc = strtol(snc, &endptr, 16);
+ if (endptr < (snc+strlen(snc)) && !apr_isspace(*endptr)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: invalid nc %s received - not a number", snc);
+ return !OK;
+ }
+
+ if (!resp->client) {
+ return !OK;
+ }
+
+ if (nc != resp->client->nonce_count) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: Warning, possible replay attack: nonce-count "
+ "check failed: %lu != %lu", nc,
+ resp->client->nonce_count);
+ return !OK;
+ }
+
+ return OK;
+}
+
+static int check_nonce(request_rec *r, digest_header_rec *resp,
+ const digest_config_rec *conf)
+{
+ apr_time_t dt;
+ int len;
+ time_rec nonce_time;
+ char tmp, hash[NONCE_HASH_LEN+1];
+
+ if (strlen(resp->nonce) != NONCE_LEN) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: invalid nonce %s received - length is not %d",
+ resp->nonce, NONCE_LEN);
+ note_digest_auth_failure(r, conf, resp, 1);
+ return HTTP_UNAUTHORIZED;
+ }
+
+ tmp = resp->nonce[NONCE_TIME_LEN];
+ resp->nonce[NONCE_TIME_LEN] = '\0';
+ len = apr_base64_decode_binary(nonce_time.arr, resp->nonce);
+ gen_nonce_hash(hash, resp->nonce, resp->opaque, r->server, conf);
+ resp->nonce[NONCE_TIME_LEN] = tmp;
+ resp->nonce_time = nonce_time.time;
+
+ if (strcmp(hash, resp->nonce+NONCE_TIME_LEN)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: invalid nonce %s received - hash is not %s",
+ resp->nonce, hash);
+ note_digest_auth_failure(r, conf, resp, 1);
+ return HTTP_UNAUTHORIZED;
+ }
+
+ dt = r->request_time - nonce_time.time;
+ if (conf->nonce_lifetime > 0 && dt < 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: invalid nonce %s received - user attempted "
+ "time travel", resp->nonce);
+ note_digest_auth_failure(r, conf, resp, 1);
+ return HTTP_UNAUTHORIZED;
+ }
+
+ if (conf->nonce_lifetime > 0) {
+ if (dt > conf->nonce_lifetime) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0,r,
+ "Digest: user %s: nonce expired (%.2f seconds old "
+ "- max lifetime %.2f) - sending new nonce",
+ r->user, (double)apr_time_sec(dt),
+ (double)apr_time_sec(conf->nonce_lifetime));
+ note_digest_auth_failure(r, conf, resp, 1);
+ return HTTP_UNAUTHORIZED;
+ }
+ }
+ else if (conf->nonce_lifetime == 0 && resp->client) {
+ if (memcmp(resp->client->last_nonce, resp->nonce, NONCE_LEN)) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
+ "Digest: user %s: one-time-nonce mismatch - sending "
+ "new nonce", r->user);
+ note_digest_auth_failure(r, conf, resp, 1);
+ return HTTP_UNAUTHORIZED;
+ }
+ }
+ /* else (lifetime < 0) => never expires */
+
+ return OK;
+}
+
+/* The actual MD5 code... whee */
+
+/* RFC-2069 */
+static const char *old_digest(const request_rec *r,
+ const digest_header_rec *resp, const char *ha1)
+{
+ const char *ha2;
+
+ ha2 = ap_md5(r->pool, (unsigned char *)apr_pstrcat(r->pool, resp->method, ":",
+ resp->uri, NULL));
+ return ap_md5(r->pool,
+ (unsigned char *)apr_pstrcat(r->pool, ha1, ":", resp->nonce,
+ ":", ha2, NULL));
+}
+
+/* RFC-2617 */
+static const char *new_digest(const request_rec *r,
+ digest_header_rec *resp,
+ const digest_config_rec *conf)
+{
+ const char *ha1, *ha2, *a2;
+
+ if (resp->algorithm && !strcasecmp(resp->algorithm, "MD5-sess")) {
+ ha1 = get_session_HA1(r, resp, conf, 1);
+ if (!ha1) {
+ return NULL;
+ }
+ }
+ else {
+ ha1 = conf->ha1;
+ }
+
+ if (resp->message_qop && !strcasecmp(resp->message_qop, "auth-int")) {
+ a2 = apr_pstrcat(r->pool, resp->method, ":", resp->uri, ":",
+ ap_md5(r->pool, (const unsigned char*) ""), NULL);
+ /* TBD */
+ }
+ else {
+ a2 = apr_pstrcat(r->pool, resp->method, ":", resp->uri, NULL);
+ }
+ ha2 = ap_md5(r->pool, (const unsigned char *)a2);
+
+ return ap_md5(r->pool,
+ (unsigned char *)apr_pstrcat(r->pool, ha1, ":", resp->nonce,
+ ":", resp->nonce_count, ":",
+ resp->cnonce, ":",
+ resp->message_qop, ":", ha2,
+ NULL));
+}
+
+
+static void copy_uri_components(apr_uri_t *dst,
+ apr_uri_t *src, request_rec *r) {
+ if (src->scheme && src->scheme[0] != '\0') {
+ dst->scheme = src->scheme;
+ }
+ else {
+ dst->scheme = (char *) "http";
+ }
+
+ if (src->hostname && src->hostname[0] != '\0') {
+ dst->hostname = apr_pstrdup(r->pool, src->hostname);
+ ap_unescape_url(dst->hostname);
+ }
+ else {
+ dst->hostname = (char *) ap_get_server_name(r);
+ }
+
+ if (src->port_str && src->port_str[0] != '\0') {
+ dst->port = src->port;
+ }
+ else {
+ dst->port = ap_get_server_port(r);
+ }
+
+ if (src->path && src->path[0] != '\0') {
+ dst->path = apr_pstrdup(r->pool, src->path);
+ ap_unescape_url(dst->path);
+ }
+ else {
+ dst->path = src->path;
+ }
+
+ if (src->query && src->query[0] != '\0') {
+ dst->query = apr_pstrdup(r->pool, src->query);
+ ap_unescape_url(dst->query);
+ }
+ else {
+ dst->query = src->query;
+ }
+
+ dst->hostinfo = src->hostinfo;
+}
+
+/* These functions return 0 if client is OK, and proper error status
+ * if not... either HTTP_UNAUTHORIZED, if we made a check, and it failed, or
+ * HTTP_INTERNAL_SERVER_ERROR, if things are so totally confused that we
+ * couldn't figure out how to tell if the client is authorized or not.
+ *
+ * If they return DECLINED, and all other modules also decline, that's
+ * treated by the server core as a configuration error, logged and
+ * reported as such.
+ */
+
+/* Determine user ID, and check if the attributes are correct, if it
+ * really is that user, if the nonce is correct, etc.
+ */
+
+static int authenticate_digest_user(request_rec *r)
+{
+ digest_config_rec *conf;
+ digest_header_rec *resp;
+ request_rec *mainreq;
+ const char *t;
+ int res;
+
+ /* do we require Digest auth for this URI? */
+
+ if (!(t = ap_auth_type(r)) || strcasecmp(t, "Digest")) {
+ return DECLINED;
+ }
+
+ if (!ap_auth_name(r)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: need AuthName: %s", r->uri);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+
+ /* get the client response and mark */
+
+ mainreq = r;
+ while (mainreq->main != NULL) {
+ mainreq = mainreq->main;
+ }
+ while (mainreq->prev != NULL) {
+ mainreq = mainreq->prev;
+ }
+ resp = (digest_header_rec *) ap_get_module_config(mainreq->request_config,
+ &auth_digest_module);
+ resp->needed_auth = 1;
+
+
+ /* get our conf */
+
+ conf = (digest_config_rec *) ap_get_module_config(r->per_dir_config,
+ &auth_digest_module);
+
+
+ /* check for existence and syntax of Auth header */
+
+ if (resp->auth_hdr_sts != VALID) {
+ if (resp->auth_hdr_sts == NOT_DIGEST) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: client used wrong authentication scheme "
+ "`%s': %s", resp->scheme, r->uri);
+ }
+ else if (resp->auth_hdr_sts == INVALID) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: missing user, realm, nonce, uri, digest, "
+ "cnonce, or nonce_count in authorization header: %s",
+ r->uri);
+ }
+ /* else (resp->auth_hdr_sts == NO_HEADER) */
+ note_digest_auth_failure(r, conf, resp, 0);
+ return HTTP_UNAUTHORIZED;
+ }
+
+ r->user = (char *) resp->username;
+ r->ap_auth_type = (char *) "Digest";
+
+ /* check the auth attributes */
+
+ if (strcmp(resp->uri, resp->raw_request_uri)) {
+ /* Hmm, the simple match didn't work (probably a proxy modified the
+ * request-uri), so lets do a more sophisticated match
+ */
+ apr_uri_t r_uri, d_uri;
+
+ copy_uri_components(&r_uri, resp->psd_request_uri, r);
+ if (apr_uri_parse(r->pool, resp->uri, &d_uri) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: invalid uri <%s> in Authorization header",
+ resp->uri);
+ return HTTP_BAD_REQUEST;
+ }
+
+ if (d_uri.hostname) {
+ ap_unescape_url(d_uri.hostname);
+ }
+ if (d_uri.path) {
+ ap_unescape_url(d_uri.path);
+ }
+ if (d_uri.query) {
+ ap_unescape_url(d_uri.query);
+ }
+ else if (r_uri.query) {
+ /* MSIE compatibility hack. MSIE has some RFC issues - doesn't
+ * include the query string in the uri Authorization component
+ * or when computing the response component. the second part
+ * works out ok, since we can hash the header and get the same
+ * result. however, the uri from the request line won't match
+ * the uri Authorization component since the header lacks the
+ * query string, leaving us incompatable with a (broken) MSIE.
+ *
+ * the workaround is to fake a query string match if in the proper
+ * environment - BrowserMatch MSIE, for example. the cool thing
+ * is that if MSIE ever fixes itself the simple match ought to
+ * work and this code won't be reached anyway, even if the
+ * environment is set.
+ */
+
+ if (apr_table_get(r->subprocess_env,
+ "AuthDigestEnableQueryStringHack")) {
+ d_uri.query = r_uri.query;
+ }
+ }
+
+ if (r->method_number == M_CONNECT) {
+ if (!r_uri.hostinfo || strcmp(resp->uri, r_uri.hostinfo)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: uri mismatch - <%s> does not match "
+ "request-uri <%s>", resp->uri, r_uri.hostinfo);
+ return HTTP_BAD_REQUEST;
+ }
+ }
+ else if (
+ /* check hostname matches, if present */
+ (d_uri.hostname && d_uri.hostname[0] != '\0'
+ && strcasecmp(d_uri.hostname, r_uri.hostname))
+ /* check port matches, if present */
+ || (d_uri.port_str && d_uri.port != r_uri.port)
+ /* check that server-port is default port if no port present */
+ || (d_uri.hostname && d_uri.hostname[0] != '\0'
+ && !d_uri.port_str && r_uri.port != ap_default_port(r))
+ /* check that path matches */
+ || (d_uri.path != r_uri.path
+ /* either exact match */
+ && (!d_uri.path || !r_uri.path
+ || strcmp(d_uri.path, r_uri.path))
+ /* or '*' matches empty path in scheme://host */
+ && !(d_uri.path && !r_uri.path && resp->psd_request_uri->hostname
+ && d_uri.path[0] == '*' && d_uri.path[1] == '\0'))
+ /* check that query matches */
+ || (d_uri.query != r_uri.query
+ && (!d_uri.query || !r_uri.query
+ || strcmp(d_uri.query, r_uri.query)))
+ ) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: uri mismatch - <%s> does not match "
+ "request-uri <%s>", resp->uri, resp->raw_request_uri);
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ if (resp->opaque && resp->opaque_num == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: received invalid opaque - got `%s'",
+ resp->opaque);
+ note_digest_auth_failure(r, conf, resp, 0);
+ return HTTP_UNAUTHORIZED;
+ }
+
+ if (strcmp(resp->realm, conf->realm)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: realm mismatch - got `%s' but expected `%s'",
+ resp->realm, conf->realm);
+ note_digest_auth_failure(r, conf, resp, 0);
+ return HTTP_UNAUTHORIZED;
+ }
+
+ if (resp->algorithm != NULL
+ && strcasecmp(resp->algorithm, "MD5")
+ && strcasecmp(resp->algorithm, "MD5-sess")) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: unknown algorithm `%s' received: %s",
+ resp->algorithm, r->uri);
+ note_digest_auth_failure(r, conf, resp, 0);
+ return HTTP_UNAUTHORIZED;
+ }
+
+ if (!conf->pwfile) {
+ return DECLINED;
+ }
+
+ if (!(conf->ha1 = get_hash(r, r->user, conf->realm, conf->pwfile))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: user `%s' in realm `%s' not found: %s",
+ r->user, conf->realm, r->uri);
+ note_digest_auth_failure(r, conf, resp, 0);
+ return HTTP_UNAUTHORIZED;
+ }
+
+
+ if (resp->message_qop == NULL) {
+ /* old (rfc-2069) style digest */
+ if (strcmp(resp->digest, old_digest(r, resp, conf->ha1))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: user %s: password mismatch: %s", r->user,
+ r->uri);
+ note_digest_auth_failure(r, conf, resp, 0);
+ return HTTP_UNAUTHORIZED;
+ }
+ }
+ else {
+ const char *exp_digest;
+ int match = 0, idx;
+ for (idx = 0; conf->qop_list[idx] != NULL; idx++) {
+ if (!strcasecmp(conf->qop_list[idx], resp->message_qop)) {
+ match = 1;
+ break;
+ }
+ }
+
+ if (!match
+ && !(conf->qop_list[0] == NULL
+ && !strcasecmp(resp->message_qop, "auth"))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: invalid qop `%s' received: %s",
+ resp->message_qop, r->uri);
+ note_digest_auth_failure(r, conf, resp, 0);
+ return HTTP_UNAUTHORIZED;
+ }
+
+ exp_digest = new_digest(r, resp, conf);
+ if (!exp_digest) {
+ /* we failed to allocate a client struct */
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ if (strcmp(resp->digest, exp_digest)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: user %s: password mismatch: %s", r->user,
+ r->uri);
+ note_digest_auth_failure(r, conf, resp, 0);
+ return HTTP_UNAUTHORIZED;
+ }
+ }
+
+ if (check_nc(r, resp, conf) != OK) {
+ note_digest_auth_failure(r, conf, resp, 0);
+ return HTTP_UNAUTHORIZED;
+ }
+
+ /* Note: this check is done last so that a "stale=true" can be
+ generated if the nonce is old */
+ if ((res = check_nonce(r, resp, conf))) {
+ return res;
+ }
+
+ return OK;
+}
+
+
+/*
+ * Checking ID
+ */
+
+static apr_table_t *groups_for_user(request_rec *r, const char *user,
+ const char *grpfile)
+{
+ ap_configfile_t *f;
+ apr_table_t *grps = apr_table_make(r->pool, 15);
+ apr_pool_t *sp;
+ char l[MAX_STRING_LEN];
+ const char *group_name, *ll, *w;
+ apr_status_t sts;
+
+ if ((sts = ap_pcfg_openfile(&f, r->pool, grpfile)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, sts, r,
+ "Digest: Could not open group file: %s", grpfile);
+ return NULL;
+ }
+
+ if (apr_pool_create(&sp, r->pool) != APR_SUCCESS) {
+ return NULL;
+ }
+
+ while (!(ap_cfg_getline(l, MAX_STRING_LEN, f))) {
+ if ((l[0] == '#') || (!l[0])) {
+ continue;
+ }
+ ll = l;
+ apr_pool_clear(sp);
+
+ group_name = ap_getword(sp, &ll, ':');
+
+ while (ll[0]) {
+ w = ap_getword_conf(sp, &ll);
+ if (!strcmp(w, user)) {
+ apr_table_setn(grps, apr_pstrdup(r->pool, group_name), "in");
+ break;
+ }
+ }
+ }
+
+ ap_cfg_closefile(f);
+ apr_pool_destroy(sp);
+ return grps;
+}
+
+
+static int digest_check_auth(request_rec *r)
+{
+ const digest_config_rec *conf =
+ (digest_config_rec *) ap_get_module_config(r->per_dir_config,
+ &auth_digest_module);
+ const char *user = r->user;
+ int m = r->method_number;
+ int method_restricted = 0;
+ register int x;
+ const char *t, *w;
+ apr_table_t *grpstatus;
+ const apr_array_header_t *reqs_arr;
+ require_line *reqs;
+
+ if (!(t = ap_auth_type(r)) || strcasecmp(t, "Digest")) {
+ return DECLINED;
+ }
+
+ reqs_arr = ap_requires(r);
+ /* If there is no "requires" directive, then any user will do.
+ */
+ if (!reqs_arr) {
+ return OK;
+ }
+ reqs = (require_line *) reqs_arr->elts;
+
+ if (conf->grpfile) {
+ grpstatus = groups_for_user(r, user, conf->grpfile);
+ }
+ else {
+ grpstatus = NULL;
+ }
+
+ for (x = 0; x < reqs_arr->nelts; x++) {
+
+ if (!(reqs[x].method_mask & (AP_METHOD_BIT << m))) {
+ continue;
+ }
+
+ method_restricted = 1;
+
+ t = reqs[x].requirement;
+ w = ap_getword_white(r->pool, &t);
+ if (!strcasecmp(w, "valid-user")) {
+ return OK;
+ }
+ else if (!strcasecmp(w, "user")) {
+ while (t[0]) {
+ w = ap_getword_conf(r->pool, &t);
+ if (!strcmp(user, w)) {
+ return OK;
+ }
+ }
+ }
+ else if (!strcasecmp(w, "group")) {
+ if (!grpstatus) {
+ return DECLINED;
+ }
+
+ while (t[0]) {
+ w = ap_getword_conf(r->pool, &t);
+ if (apr_table_get(grpstatus, w)) {
+ return OK;
+ }
+ }
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: access to %s failed, reason: unknown "
+ "require directive \"%s\"",
+ r->uri, reqs[x].requirement);
+ return DECLINED;
+ }
+ }
+
+ if (!method_restricted) {
+ return OK;
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: access to %s failed, reason: user %s not "
+ "allowed access", r->uri, user);
+
+ note_digest_auth_failure(r, conf,
+ (digest_header_rec *) ap_get_module_config(r->request_config,
+ &auth_digest_module),
+ 0);
+ return HTTP_UNAUTHORIZED;
+}
+
+
+/*
+ * Authorization-Info header code
+ */
+
+#ifdef SEND_DIGEST
+static const char *hdr(const apr_table_t *tbl, const char *name)
+{
+ const char *val = apr_table_get(tbl, name);
+ if (val) {
+ return val;
+ }
+ else {
+ return "";
+ }
+}
+#endif
+
+static int add_auth_info(request_rec *r)
+{
+ const digest_config_rec *conf =
+ (digest_config_rec *) ap_get_module_config(r->per_dir_config,
+ &auth_digest_module);
+ digest_header_rec *resp =
+ (digest_header_rec *) ap_get_module_config(r->request_config,
+ &auth_digest_module);
+ const char *ai = NULL, *digest = NULL, *nextnonce = "";
+
+ if (resp == NULL || !resp->needed_auth || conf == NULL) {
+ return OK;
+ }
+
+
+ /* rfc-2069 digest
+ */
+ if (resp->message_qop == NULL) {
+ /* old client, so calc rfc-2069 digest */
+
+#ifdef SEND_DIGEST
+ /* most of this totally bogus because the handlers don't set the
+ * headers until the final handler phase (I wonder why this phase
+ * is called fixup when there's almost nothing you can fix up...)
+ *
+ * Because it's basically impossible to get this right (e.g. the
+ * Content-length is never set yet when we get here, and we can't
+ * calc the entity hash) it's best to just leave this #def'd out.
+ */
+ char date[APR_RFC822_DATE_LEN];
+ apr_rfc822_date(date, r->request_time);
+ char *entity_info =
+ ap_md5(r->pool,
+ (unsigned char *) apr_pstrcat(r->pool, resp->raw_request_uri,
+ ":",
+ r->content_type ? r->content_type : ap_default_type(r), ":",
+ hdr(r->headers_out, "Content-Length"), ":",
+ r->content_encoding ? r->content_encoding : "", ":",
+ hdr(r->headers_out, "Last-Modified"), ":",
+ r->no_cache && !apr_table_get(r->headers_out, "Expires") ?
+ date :
+ hdr(r->headers_out, "Expires"),
+ NULL));
+ digest =
+ ap_md5(r->pool,
+ (unsigned char *)apr_pstrcat(r->pool, conf->ha1, ":",
+ resp->nonce, ":",
+ r->method, ":",
+ date, ":",
+ entity_info, ":",
+ ap_md5(r->pool, (unsigned char *) ""), /* H(entity) - TBD */
+ NULL));
+#endif
+ }
+
+
+ /* setup nextnonce
+ */
+ if (conf->nonce_lifetime > 0) {
+ /* send nextnonce if current nonce will expire in less than 30 secs */
+ if ((r->request_time - resp->nonce_time) > (conf->nonce_lifetime-NEXTNONCE_DELTA)) {
+ nextnonce = apr_pstrcat(r->pool, ", nextnonce=\"",
+ gen_nonce(r->pool, r->request_time,
+ resp->opaque, r->server, conf),
+ "\"", NULL);
+ if (resp->client)
+ resp->client->nonce_count = 0;
+ }
+ }
+ else if (conf->nonce_lifetime == 0 && resp->client) {
+ const char *nonce = gen_nonce(r->pool, 0, resp->opaque, r->server,
+ conf);
+ nextnonce = apr_pstrcat(r->pool, ", nextnonce=\"", nonce, "\"", NULL);
+ memcpy(resp->client->last_nonce, nonce, NONCE_LEN+1);
+ }
+ /* else nonce never expires, hence no nextnonce */
+
+
+ /* do rfc-2069 digest
+ */
+ if (conf->qop_list[0] && !strcasecmp(conf->qop_list[0], "none")
+ && resp->message_qop == NULL) {
+ /* use only RFC-2069 format */
+ if (digest) {
+ ai = apr_pstrcat(r->pool, "digest=\"", digest, "\"", nextnonce,NULL);
+ }
+ else {
+ ai = nextnonce;
+ }
+ }
+ else {
+ const char *resp_dig, *ha1, *a2, *ha2;
+
+ /* calculate rspauth attribute
+ */
+ if (resp->algorithm && !strcasecmp(resp->algorithm, "MD5-sess")) {
+ ha1 = get_session_HA1(r, resp, conf, 0);
+ if (!ha1) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Digest: internal error: couldn't find session "
+ "info for user %s", resp->username);
+ return !OK;
+ }
+ }
+ else {
+ ha1 = conf->ha1;
+ }
+
+ if (resp->message_qop && !strcasecmp(resp->message_qop, "auth-int")) {
+ a2 = apr_pstrcat(r->pool, ":", resp->uri, ":",
+ ap_md5(r->pool,(const unsigned char *) ""), NULL);
+ /* TBD */
+ }
+ else {
+ a2 = apr_pstrcat(r->pool, ":", resp->uri, NULL);
+ }
+ ha2 = ap_md5(r->pool, (const unsigned char *)a2);
+
+ resp_dig = ap_md5(r->pool,
+ (unsigned char *)apr_pstrcat(r->pool, ha1, ":",
+ resp->nonce, ":",
+ resp->nonce_count, ":",
+ resp->cnonce, ":",
+ resp->message_qop ?
+ resp->message_qop : "",
+ ":", ha2, NULL));
+
+ /* assemble Authentication-Info header
+ */
+ ai = apr_pstrcat(r->pool,
+ "rspauth=\"", resp_dig, "\"",
+ nextnonce,
+ resp->cnonce ? ", cnonce=\"" : "",
+ resp->cnonce
+ ? ap_escape_quotes(r->pool, resp->cnonce)
+ : "",
+ resp->cnonce ? "\"" : "",
+ resp->nonce_count ? ", nc=" : "",
+ resp->nonce_count ? resp->nonce_count : "",
+ resp->message_qop ? ", qop=" : "",
+ resp->message_qop ? resp->message_qop : "",
+ digest ? "digest=\"" : "",
+ digest ? digest : "",
+ digest ? "\"" : "",
+ NULL);
+ }
+
+ if (ai && ai[0]) {
+ apr_table_mergen(r->headers_out,
+ (PROXYREQ_PROXY == r->proxyreq)
+ ? "Proxy-Authentication-Info"
+ : "Authentication-Info",
+ ai);
+ }
+
+ return OK;
+}
+
+
+static void register_hooks(apr_pool_t *p)
+{
+ static const char * const cfgPost[]={ "http_core.c", NULL };
+ static const char * const parsePre[]={ "mod_proxy.c", NULL };
+
+ ap_hook_post_config(initialize_module, NULL, cfgPost, APR_HOOK_MIDDLE);
+ ap_hook_child_init(initialize_child, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_read_request(parse_hdr_and_update_nc, parsePre, NULL, APR_HOOK_MIDDLE);
+ ap_hook_check_user_id(authenticate_digest_user, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_auth_checker(digest_check_auth, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_fixups(add_auth_info, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA auth_digest_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_digest_dir_config, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ digest_cmds, /* command table */
+ register_hooks /* register hooks */
+};
+
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_digest.dsp b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_digest.dsp
new file mode 100644
index 00000000..74daf60b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/mod_auth_digest.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_auth_digest" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_auth_digest - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_auth_digest.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_auth_digest.mak" CFG="mod_auth_digest - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_auth_digest - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_auth_digest - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_auth_digest - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_auth_digest_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o /win32 "NUL"
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o /win32 "NUL"
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_auth_digest.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_digest.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_auth_digest.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_digest.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_auth_digest - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_auth_digest_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o /win32 "NUL"
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o /win32 "NUL"
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_auth_digest.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_digest.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_auth_digest.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_digest.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_auth_digest - Win32 Release"
+# Name "mod_auth_digest - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_auth_digest.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_auth_digest.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_auth_digest - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_auth_digest.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_auth_digest.so "auth_digest_module for Apache" ../../include/ap_release.h > .\mod_auth_digest.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_auth_digest - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_auth_digest.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_auth_digest.so "auth_digest_module for Apache" ../../include/ap_release.h > .\mod_auth_digest.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/aaa/modules.mk b/rubbos/app/httpd-2.0.64/modules/aaa/modules.mk
new file mode 100644
index 00000000..79900dea
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/aaa/modules.mk
@@ -0,0 +1,7 @@
+mod_access.la: mod_access.lo
+ $(MOD_LINK) mod_access.lo $(MOD_ACCESS_LDADD)
+mod_auth.la: mod_auth.lo
+ $(MOD_LINK) mod_auth.lo $(MOD_AUTH_LDADD)
+DISTCLEAN_TARGETS = modules.mk
+static = mod_access.la mod_auth.la
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/libprews.c b/rubbos/app/httpd-2.0.64/modules/arch/netware/libprews.c
new file mode 100644
index 00000000..5ac5e54c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/libprews.c
@@ -0,0 +1,70 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*------------------------------------------------------------------
+ These functions are to be called when the shared NLM starts and
+ stops. By using these functions instead of defining a main()
+ and calling ExitThread(TSR_THREAD, 0), the load time of the
+ shared NLM is faster and memory size reduced.
+
+ You may also want to override these in your own Apache module
+ to do any cleanup other than the mechanism Apache modules
+ provide.
+------------------------------------------------------------------*/
+#include <netware.h>
+//#include "stddef.h"
+#include "novsock2.h"
+
+int _NonAppStart
+(
+ void *NLMHandle,
+ void *errorScreen,
+ const char *cmdLine,
+ const char *loadDirPath,
+ size_t uninitializedDataLength,
+ void *NLMFileHandle,
+ int (*readRoutineP)( int conn, void *fileHandle, size_t offset,
+ size_t nbytes, size_t *bytesRead, void *buffer ),
+ size_t customDataOffset,
+ size_t customDataSize,
+ int messageCount,
+ const char **messages
+)
+{
+#pragma unused(cmdLine)
+#pragma unused(loadDirPath)
+#pragma unused(uninitializedDataLength)
+#pragma unused(NLMFileHandle)
+#pragma unused(readRoutineP)
+#pragma unused(customDataOffset)
+#pragma unused(customDataSize)
+#pragma unused(messageCount)
+#pragma unused(messages)
+
+ WSADATA wsaData;
+
+ return WSAStartup((WORD) MAKEWORD(2, 0), &wsaData);
+}
+
+void _NonAppStop( void )
+{
+ WSACleanup();
+}
+
+int _NonAppCheckUnload( void )
+{
+ return 0;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_anon.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_anon.def
new file mode 100644
index 00000000..ab6b138f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_anon.def
@@ -0,0 +1 @@
+EXPORT auth_anon_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_basic.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_basic.def
new file mode 100644
index 00000000..0a6f81aa
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_basic.def
@@ -0,0 +1 @@
+EXPORT auth_basic_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_dbm.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_dbm.def
new file mode 100644
index 00000000..830f194d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_dbm.def
@@ -0,0 +1 @@
+EXPORT auth_dbm_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_digest.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_digest.def
new file mode 100644
index 00000000..6a3aa085
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_auth_digest.def
@@ -0,0 +1 @@
+EXPORT auth_digest_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_cache.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_cache.def
new file mode 100644
index 00000000..6fd6423b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_cache.def
@@ -0,0 +1,5 @@
+EXPORT cache_module
+EXPORT @mod_cache.imp
+
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_cern_meta.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_cern_meta.def
new file mode 100644
index 00000000..5638325b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_cern_meta.def
@@ -0,0 +1 @@
+EXPORT cern_meta_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_dav.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_dav.def
new file mode 100644
index 00000000..fb56c92f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_dav.def
@@ -0,0 +1,3 @@
+EXPORT dav_module
+EXPORT @dav.imp
+
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_disk_cache.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_disk_cache.def
new file mode 100644
index 00000000..0a9440ad
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_disk_cache.def
@@ -0,0 +1,3 @@
+IMPORT @mod_cache.imp
+EXPORT disk_cache_module
+
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_echo.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_echo.def
new file mode 100644
index 00000000..694135a5
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_echo.def
@@ -0,0 +1,2 @@
+EXPORT echo_module
+
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_expires.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_expires.def
new file mode 100644
index 00000000..bc416630
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_expires.def
@@ -0,0 +1 @@
+EXPORT expires_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_file_cache.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_file_cache.def
new file mode 100644
index 00000000..8ab98cfb
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_file_cache.def
@@ -0,0 +1,2 @@
+EXPORT file_cache_module
+
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_headers.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_headers.def
new file mode 100644
index 00000000..2fe35a85
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_headers.def
@@ -0,0 +1 @@
+EXPORT headers_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_info.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_info.def
new file mode 100644
index 00000000..ce71cb37
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_info.def
@@ -0,0 +1 @@
+EXPORT info_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_logio.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_logio.def
new file mode 100644
index 00000000..68c70891
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_logio.def
@@ -0,0 +1,2 @@
+EXPORT logio_module
+
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_mem_cache.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_mem_cache.def
new file mode 100644
index 00000000..531d6871
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_mem_cache.def
@@ -0,0 +1,3 @@
+IMPORT @mod_cache.imp
+EXPORT mem_cache_module
+
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_mime_magic.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_mime_magic.def
new file mode 100644
index 00000000..95307476
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_mime_magic.def
@@ -0,0 +1 @@
+EXPORT mime_magic_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_netware.c b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_netware.c
new file mode 100644
index 00000000..3fec8cc4
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_netware.c
@@ -0,0 +1,194 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_strings.h"
+#include "apr_portable.h"
+#include "apr_buckets.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "http_log.h"
+#include "util_script.h"
+#include "mod_core.h"
+#include "apr_optional.h"
+#include "apr_lib.h"
+#include "mod_cgi.h"
+
+#ifdef NETWARE
+
+
+module AP_MODULE_DECLARE_DATA netware_module;
+
+typedef struct {
+ apr_table_t *file_type_handlers; /* CGI map from file types to CGI modules */
+ apr_table_t *file_handler_mode; /* CGI module mode (spawn in same address space or not) */
+ apr_table_t *extra_env_vars; /* Environment variables to be added to the CGI environment */
+} netware_dir_config;
+
+
+static void *create_netware_dir_config(apr_pool_t *p, char *dir)
+{
+ netware_dir_config *new = (netware_dir_config*) apr_palloc(p, sizeof(netware_dir_config));
+
+ new->file_type_handlers = apr_table_make(p, 10);
+ new->file_handler_mode = apr_table_make(p, 10);
+ new->extra_env_vars = apr_table_make(p, 10);
+
+ apr_table_set(new->file_type_handlers, "NLM", "OS");
+
+ return new;
+}
+
+static void *merge_netware_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ netware_dir_config *base = (netware_dir_config *) basev;
+ netware_dir_config *add = (netware_dir_config *) addv;
+ netware_dir_config *new = (netware_dir_config *) apr_palloc(p, sizeof(netware_dir_config));
+
+ new->file_type_handlers = apr_table_overlay(p, add->file_type_handlers, base->file_type_handlers);
+ new->file_handler_mode = apr_table_overlay(p, add->file_handler_mode, base->file_handler_mode);
+ new->extra_env_vars = apr_table_overlay(p, add->extra_env_vars, base->extra_env_vars);
+
+ return new;
+}
+
+static const char *set_extension_map(cmd_parms *cmd, netware_dir_config *m,
+ char *CGIhdlr, char *ext, char *detach)
+{
+ int i, len;
+
+ if (*ext == '.')
+ ++ext;
+
+ if (CGIhdlr != NULL) {
+ len = strlen(CGIhdlr);
+ for (i=0; i<len; i++) {
+ if (CGIhdlr[i] == '\\') {
+ CGIhdlr[i] = '/';
+ }
+ }
+ }
+
+ apr_table_set(m->file_type_handlers, ext, CGIhdlr);
+ if (detach) {
+ apr_table_set(m->file_handler_mode, ext, "y");
+ }
+
+ return NULL;
+}
+
+static apr_status_t ap_cgi_build_command(const char **cmd, const char ***argv,
+ request_rec *r, apr_pool_t *p,
+ cgi_exec_info_t *e_info)
+{
+ char *ext = NULL;
+ char *cmd_only, *ptr;
+ const char *new_cmd;
+ netware_dir_config *d;
+ apr_file_t *fh;
+ const char *args = "";
+
+ d = (netware_dir_config *)ap_get_module_config(r->per_dir_config,
+ &netware_module);
+
+ if (e_info->process_cgi) {
+ /* Handle the complete file name, we DON'T want to follow suexec, since
+ * an unrooted command is as predictable as shooting craps in Win32.
+ *
+ * Notice that unlike most mime extension parsing, we have to use the
+ * win32 parsing here, therefore the final extension is the only one
+ * we will consider
+ */
+ *cmd = r->filename;
+ if (r->args && r->args[0] && !ap_strchr_c(r->args, '=')) {
+ args = r->args;
+ }
+ }
+
+ cmd_only = apr_pstrdup(p, *cmd);
+ e_info->cmd_type = APR_PROGRAM;
+
+ /* truncate any arguments from the cmd */
+ for (ptr = cmd_only; *ptr && (*ptr != ' '); ptr++);
+ *ptr = '\0';
+
+ /* Figure out what the extension is so that we can matche it. */
+ ext = strrchr(apr_filepath_name_get(cmd_only), '.');
+
+ /* If there isn't an extension then give it an empty string */
+ if (!ext) {
+ ext = "";
+ }
+
+ /* eliminate the '.' if there is one */
+ if (*ext == '.')
+ ++ext;
+
+ /* check if we have a registered command for the extension*/
+ new_cmd = apr_table_get(d->file_type_handlers, ext);
+ e_info->detached = AP_PROC_DETACHED;
+ if (new_cmd == NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Could not find a command associated with the %s extension", ext);
+ return APR_EBADF;
+ }
+ if (stricmp(new_cmd, "OS")) {
+ /* If we have a registered command then add the file that was passed in as a
+ parameter to the registered command. */
+ *cmd = apr_pstrcat (p, new_cmd, " ", cmd_only, NULL);
+
+ /* Run in its own address space if specified */
+ if(apr_table_get(d->file_handler_mode, ext))
+ e_info->detached |= AP_PROC_NEWADDRSPACE;
+ }
+
+ /* Tokenize the full command string into its arguments */
+ apr_tokenize_to_argv(*cmd, (char***)argv, p);
+
+ /* The first argument should be the executible */
+ *cmd = ap_server_root_relative(p, *argv[0]);
+
+ return APR_SUCCESS;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ APR_REGISTER_OPTIONAL_FN(ap_cgi_build_command);
+}
+
+static const command_rec netware_cmds[] = {
+AP_INIT_TAKE23("CGIMapExtension", set_extension_map, NULL, OR_FILEINFO,
+ "Full path to the CGI NLM module followed by a file extension. If the "
+ "first parameter is set to \"OS\" then the following file extension is "
+ "treated as NLM. The optional parameter \"detach\" can be specified if "
+ "the NLM should be launched in its own address space."),
+{ NULL }
+};
+
+module AP_MODULE_DECLARE_DATA netware_module = {
+ STANDARD20_MODULE_STUFF,
+ create_netware_dir_config, /* create per-dir config */
+ merge_netware_dir_configs, /* merge per-dir config */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ netware_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
+
+#endif
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_nw_ssl.c b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_nw_ssl.c
new file mode 100644
index 00000000..3d45f149
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_nw_ssl.c
@@ -0,0 +1,1151 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_tls.c - Apache SSL/TLS module for NetWare by Mike Gardiner.
+ *
+ * This module gives Apache the ability to do SSL/TLS with a minimum amount
+ * of effort. All of the SSL/TLS logic is already on NetWare versions 5 and
+ * above and is interfaced through WinSock on NetWare. As you can see in
+ * the code below SSL/TLS sockets can be created with three WinSock calls.
+ *
+ * To load, simply place the module in the modules directory under the main
+ * apache tree. Then add a "SecureListen" with two arguments. The first
+ * argument is an address and/or port. The second argument is the key pair
+ * name as created in ConsoleOne.
+ *
+ * Examples:
+ *
+ * SecureListen 443 "SSL CertificateIP"
+ * SecureListen 123.45.67.89:443 mycert
+ */
+
+#define WS_SSL
+
+#define MAX_ADDRESS 512
+#define MAX_KEY 80
+
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#include "http_core.h"
+#include "ap_listen.h"
+#include "apr_strings.h"
+#include "apr_portable.h"
+#include "apr_optional.h"
+
+#include <unilib.h>
+
+#ifndef SO_TLS_UNCLEAN_SHUTDOWN
+#define SO_TLS_UNCLEAN_SHUTDOWN 0
+#endif
+
+/* The ssl_var_lookup() optional function retrieves SSL environment
+ * variables. */
+APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup,
+ (apr_pool_t *, server_rec *,
+ conn_rec *, request_rec *,
+ char *));
+
+/* An optional function which returns non-zero if the given connection
+ * is using SSL/TLS. */
+APR_DECLARE_OPTIONAL_FN(int, ssl_is_https, (conn_rec *));
+
+/* The ssl_proxy_enable() and ssl_engine_disable() optional functions
+ * are used by mod_proxy to enable use of SSL for outgoing
+ * connections. */
+APR_DECLARE_OPTIONAL_FN(int, ssl_proxy_enable, (conn_rec *));
+APR_DECLARE_OPTIONAL_FN(int, ssl_engine_disable, (conn_rec *));
+
+#define strEQ(s1,s2) (strcmp(s1,s2) == 0)
+#define strNE(s1,s2) (strcmp(s1,s2) != 0)
+#define strEQn(s1,s2,n) (strncmp(s1,s2,n) == 0)
+#define strNEn(s1,s2,n) (strncmp(s1,s2,n) != 0)
+
+#define strcEQ(s1,s2) (strcasecmp(s1,s2) == 0)
+#define strcNE(s1,s2) (strcasecmp(s1,s2) != 0)
+#define strcEQn(s1,s2,n) (strncasecmp(s1,s2,n) == 0)
+#define strcNEn(s1,s2,n) (strncasecmp(s1,s2,n) != 0)
+
+#define strIsEmpty(s) (s == NULL || s[0] == NUL)
+
+
+module AP_MODULE_DECLARE_DATA nwssl_module;
+
+typedef struct NWSSLSrvConfigRec NWSSLSrvConfigRec;
+typedef struct seclisten_rec seclisten_rec;
+typedef struct seclistenup_rec seclistenup_rec;
+typedef struct secsocket_data secsocket_data;
+
+struct seclisten_rec {
+ seclisten_rec *next;
+ struct sockaddr_in local_addr; /* local IP address and port */
+ int fd;
+ int used; /* Only used during restart */
+ char key[MAX_KEY];
+ int mutual;
+ char *addr;
+ apr_port_t port;
+};
+
+struct seclistenup_rec {
+ seclistenup_rec *next;
+ char key[MAX_KEY];
+ char *addr;
+ apr_port_t port;
+};
+
+struct NWSSLSrvConfigRec {
+ apr_table_t *sltable;
+ apr_table_t *slutable;
+ apr_pool_t *pPool;
+};
+
+struct secsocket_data {
+ apr_socket_t* csd;
+ int is_secure;
+};
+
+static apr_array_header_t *certlist = NULL;
+static unicode_t** certarray = NULL;
+static int numcerts = 0;
+static seclisten_rec* ap_seclisteners = NULL;
+static seclistenup_rec* ap_seclistenersup = NULL;
+
+#define get_nwssl_cfg(srv) (NWSSLSrvConfigRec *) ap_get_module_config(srv->module_config, &nwssl_module)
+
+
+static void build_cert_list (apr_pool_t *p)
+{
+ int i;
+ char **rootcerts = (char **)certlist->elts;
+
+ numcerts = certlist->nelts;
+ certarray = apr_palloc(p, sizeof(unicode_t*)*numcerts);
+
+ for (i = 0; i < numcerts; ++i) {
+ unicode_t *unistr;
+ unistr = (unicode_t*)apr_palloc(p, strlen(rootcerts[i])*4);
+ loc2uni (UNI_LOCAL_DEFAULT, unistr, rootcerts[i], 0, 2);
+ certarray[i] = unistr;
+ }
+}
+
+/*
+ * Parses a host of the form <address>[:port]
+ * :port is permitted if 'port' is not NULL
+ */
+static unsigned long parse_addr(const char *w, unsigned short *ports)
+{
+ struct hostent *hep;
+ unsigned long my_addr;
+ char *p;
+
+ p = strchr(w, ':');
+ if (ports != NULL) {
+ *ports = 0;
+ if (p != NULL && strcmp(p + 1, "*") != 0)
+ *ports = atoi(p + 1);
+ }
+
+ if (p != NULL)
+ *p = '\0';
+ if (strcmp(w, "*") == 0) {
+ if (p != NULL)
+ *p = ':';
+ return htonl(INADDR_ANY);
+ }
+
+ my_addr = apr_inet_addr((char *)w);
+ if (my_addr != INADDR_NONE) {
+ if (p != NULL)
+ *p = ':';
+ return my_addr;
+ }
+
+ hep = gethostbyname(w);
+
+ if ((!hep) || (hep->h_addrtype != AF_INET || !hep->h_addr_list[0])) {
+ /* XXX Should be echoing by h_errno the actual failure, no?
+ * ap_log_error would be good here. Better yet - APRize.
+ */
+ fprintf(stderr, "Cannot resolve host name %s --- exiting!\n", w);
+ exit(1);
+ }
+
+ if (hep->h_addr_list[1]) {
+ fprintf(stderr, "Host %s has multiple addresses ---\n", w);
+ fprintf(stderr, "you must choose one explicitly for use as\n");
+ fprintf(stderr, "a secure port. Exiting!!!\n");
+ exit(1);
+ }
+
+ if (p != NULL)
+ *p = ':';
+
+ return ((struct in_addr *) (hep->h_addr))->s_addr;
+}
+
+static int find_secure_listener(seclisten_rec *lr)
+{
+ seclisten_rec *sl;
+
+ for (sl = ap_seclisteners; sl; sl = sl->next) {
+ if (!memcmp(&sl->local_addr, &lr->local_addr, sizeof(sl->local_addr))) {
+ sl->used = 1;
+ return sl->fd;
+ }
+ }
+ return -1;
+}
+
+static char *get_port_key(conn_rec *c)
+{
+ seclistenup_rec *sl;
+
+ for (sl = ap_seclistenersup; sl; sl = sl->next) {
+ if ((sl->port == (c->local_addr)->port) &&
+ ((strcmp(sl->addr, "0.0.0.0") == 0) || (strcmp(sl->addr, c->local_ip) == 0))) {
+ return sl->key;
+ }
+ }
+ return NULL;
+}
+
+static int make_secure_socket(apr_pool_t *pconf, const struct sockaddr_in *server,
+ char* key, int mutual, server_rec *sconf)
+{
+ int s;
+ int one = 1;
+ char addr[MAX_ADDRESS];
+ struct sslserveropts opts;
+ unsigned int optParam;
+ WSAPROTOCOL_INFO SecureProtoInfo;
+ int no = 1;
+
+ if (server->sin_addr.s_addr != htonl(INADDR_ANY))
+ apr_snprintf(addr, sizeof(addr), "address %s port %d",
+ inet_ntoa(server->sin_addr), ntohs(server->sin_port));
+ else
+ apr_snprintf(addr, sizeof(addr), "port %d", ntohs(server->sin_port));
+
+ /* note that because we're about to slack we don't use psocket */
+ memset(&SecureProtoInfo, 0, sizeof(WSAPROTOCOL_INFO));
+
+ SecureProtoInfo.iAddressFamily = AF_INET;
+ SecureProtoInfo.iSocketType = SOCK_STREAM;
+ SecureProtoInfo.iProtocol = IPPROTO_TCP;
+ SecureProtoInfo.iSecurityScheme = SECURITY_PROTOCOL_SSL;
+
+ s = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP,
+ (LPWSAPROTOCOL_INFO)&SecureProtoInfo, 0, 0);
+
+ if (s == INVALID_SOCKET) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, WSAGetLastError(), sconf,
+ "make_secure_socket: failed to get a socket for %s",
+ addr);
+ return -1;
+ }
+
+ if (!mutual) {
+ optParam = SO_SSL_ENABLE | SO_SSL_SERVER;
+
+ if (WSAIoctl(s, SO_SSL_SET_FLAGS, (char *)&optParam,
+ sizeof(optParam), NULL, 0, NULL, NULL, NULL)) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, WSAGetLastError(), sconf,
+ "make_secure_socket: for %s, WSAIoctl: "
+ "(SO_SSL_SET_FLAGS)", addr);
+ return -1;
+ }
+ }
+
+ opts.cert = key;
+ opts.certlen = strlen(key);
+ opts.sidtimeout = 0;
+ opts.sidentries = 0;
+ opts.siddir = NULL;
+
+ if (WSAIoctl(s, SO_SSL_SET_SERVER, (char *)&opts, sizeof(opts),
+ NULL, 0, NULL, NULL, NULL) != 0) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, WSAGetLastError(), sconf,
+ "make_secure_socket: for %s, WSAIoctl: "
+ "(SO_SSL_SET_SERVER)", addr);
+ return -1;
+ }
+
+ if (mutual) {
+ optParam = 0x07; // SO_SSL_AUTH_CLIENT
+
+ if(WSAIoctl(s, SO_SSL_SET_FLAGS, (char*)&optParam,
+ sizeof(optParam), NULL, 0, NULL, NULL, NULL)) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, WSAGetLastError(), sconf,
+ "make_secure_socket: for %s, WSAIoctl: "
+ "(SO_SSL_SET_FLAGS)", addr);
+ return -1;
+ }
+ }
+
+ optParam = SO_TLS_UNCLEAN_SHUTDOWN;
+ WSAIoctl(s, SO_SSL_SET_FLAGS, (char *)&optParam, sizeof(optParam),
+ NULL, 0, NULL, NULL, NULL);
+
+ return s;
+}
+
+int convert_secure_socket(conn_rec *c, apr_socket_t *csd)
+{
+ int rcode;
+ struct tlsclientopts sWS2Opts;
+ struct nwtlsopts sNWTLSOpts;
+ struct sslserveropts opts;
+ unsigned long ulFlags;
+ SOCKET sock;
+ unicode_t keyFileName[60];
+
+ apr_os_sock_get(&sock, csd);
+
+ /* zero out buffers */
+ memset((char *)&sWS2Opts, 0, sizeof(struct tlsclientopts));
+ memset((char *)&sNWTLSOpts, 0, sizeof(struct nwtlsopts));
+
+ /* turn on ssl for the socket */
+ ulFlags = (numcerts ? SO_TLS_ENABLE : SO_TLS_ENABLE | SO_TLS_BLIND_ACCEPT);
+ rcode = WSAIoctl(sock, SO_TLS_SET_FLAGS, &ulFlags, sizeof(unsigned long),
+ NULL, 0, NULL, NULL, NULL);
+ if (SOCKET_ERROR == rcode)
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, c->base_server,
+ "Error: %d with ioctlsocket(flag SO_TLS_ENABLE)", WSAGetLastError());
+ return rcode;
+ }
+
+ ulFlags = SO_TLS_UNCLEAN_SHUTDOWN;
+ WSAIoctl(sock, SO_TLS_SET_FLAGS, &ulFlags, sizeof(unsigned long),
+ NULL, 0, NULL, NULL, NULL);
+
+ /* setup the socket for SSL */
+ memset (&sWS2Opts, 0, sizeof(sWS2Opts));
+ memset (&sNWTLSOpts, 0, sizeof(sNWTLSOpts));
+ sWS2Opts.options = &sNWTLSOpts;
+
+ if (numcerts) {
+ sNWTLSOpts.walletProvider = WAL_PROV_DER; //the wallet provider defined in wdefs.h
+ sNWTLSOpts.TrustedRootList = certarray; //array of certs in UNICODE format
+ sNWTLSOpts.numElementsInTRList = numcerts; //number of certs in TRList
+ }
+ else {
+ /* setup the socket for SSL */
+ unicpy(keyFileName, L"SSL CertificateIP");
+ sWS2Opts.wallet = keyFileName; /* no client certificate */
+ sWS2Opts.walletlen = unilen(keyFileName);
+
+ sNWTLSOpts.walletProvider = WAL_PROV_KMO; //the wallet provider defined in wdefs.h
+ }
+
+ /* make the IOCTL call */
+ rcode = WSAIoctl(sock, SO_TLS_SET_CLIENT, &sWS2Opts,
+ sizeof(struct tlsclientopts), NULL, 0, NULL,
+ NULL, NULL);
+
+ /* make sure that it was successfull */
+ if(SOCKET_ERROR == rcode ){
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, c->base_server,
+ "Error: %d with ioctl (SO_TLS_SET_CLIENT)", WSAGetLastError());
+ }
+ return rcode;
+}
+
+int SSLize_Socket(SOCKET socketHnd, char *key, request_rec *r)
+{
+ int rcode;
+ struct tlsserveropts sWS2Opts;
+ struct nwtlsopts sNWTLSOpts;
+ unicode_t SASKey[512];
+ unsigned long ulFlag;
+
+ memset((char *)&sWS2Opts, 0, sizeof(struct tlsserveropts));
+ memset((char *)&sNWTLSOpts, 0, sizeof(struct nwtlsopts));
+
+
+ ulFlag = SO_TLS_ENABLE;
+ rcode = WSAIoctl(socketHnd, SO_TLS_SET_FLAGS, &ulFlag, sizeof(unsigned long), NULL, 0, NULL, NULL, NULL);
+ if(rcode)
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Error: %d with WSAIoctl(SO_TLS_SET_FLAGS, SO_TLS_ENABLE)", WSAGetLastError());
+ goto ERR;
+ }
+
+
+ ulFlag = SO_TLS_SERVER;
+ rcode = WSAIoctl(socketHnd, SO_TLS_SET_FLAGS, &ulFlag, sizeof(unsigned long),NULL, 0, NULL, NULL, NULL);
+
+ if(rcode)
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Error: %d with WSAIoctl(SO_TLS_SET_FLAGS, SO_TLS_SERVER)", WSAGetLastError());
+ goto ERR;
+ }
+
+ loc2uni(UNI_LOCAL_DEFAULT, SASKey, key, 0, 0);
+
+ //setup the tlsserveropts struct
+ sWS2Opts.wallet = SASKey;
+ sWS2Opts.walletlen = unilen(SASKey);
+ sWS2Opts.sidtimeout = 0;
+ sWS2Opts.sidentries = 0;
+ sWS2Opts.siddir = NULL;
+ sWS2Opts.options = &sNWTLSOpts;
+
+ //setup the nwtlsopts structure
+
+ sNWTLSOpts.walletProvider = WAL_PROV_KMO;
+ sNWTLSOpts.keysList = NULL;
+ sNWTLSOpts.numElementsInKeyList = 0;
+ sNWTLSOpts.reservedforfutureuse = NULL;
+ sNWTLSOpts.reservedforfutureCRL = NULL;
+ sNWTLSOpts.reservedforfutureCRLLen = 0;
+ sNWTLSOpts.reserved1 = NULL;
+ sNWTLSOpts.reserved2 = NULL;
+ sNWTLSOpts.reserved3 = NULL;
+
+
+ rcode = WSAIoctl(socketHnd,
+ SO_TLS_SET_SERVER,
+ &sWS2Opts,
+ sizeof(struct tlsserveropts),
+ NULL,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+ if(SOCKET_ERROR == rcode) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Error: %d with WSAIoctl(SO_TLS_SET_SERVER)", WSAGetLastError());
+ goto ERR;
+ }
+
+ERR:
+ return rcode;
+}
+
+static const char *set_secure_listener(cmd_parms *cmd, void *dummy,
+ const char *ips, const char* key,
+ const char* mutual)
+{
+ NWSSLSrvConfigRec* sc = get_nwssl_cfg(cmd->server);
+ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ char *ports, *addr;
+ unsigned short port;
+ seclisten_rec *new;
+
+
+ if (err != NULL)
+ return err;
+
+ ports = strchr(ips, ':');
+
+ if (ports != NULL) {
+ if (ports == ips)
+ return "Missing IP address";
+ else if (ports[1] == '\0')
+ return "Address must end in :<port-number>";
+
+ *(ports++) = '\0';
+ }
+ else {
+ ports = (char*)ips;
+ }
+
+ new = apr_pcalloc(cmd->pool, sizeof(seclisten_rec));
+ new->local_addr.sin_family = AF_INET;
+
+ if (ports == ips) {
+ new->local_addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr = apr_pstrdup(cmd->pool, "0.0.0.0");
+ }
+ else {
+ new->local_addr.sin_addr.s_addr = parse_addr(ips, NULL);
+ addr = apr_pstrdup(cmd->pool, ips);
+ }
+
+ port = atoi(ports);
+
+ if (!port)
+ return "Port must be numeric";
+
+ apr_table_add(sc->sltable, ports, addr);
+
+ new->local_addr.sin_port = htons(port);
+ new->fd = -1;
+ new->used = 0;
+ new->next = ap_seclisteners;
+ strcpy(new->key, key);
+ new->mutual = (mutual) ? 1 : 0;
+ new->addr = addr;
+ new->port = port;
+ ap_seclisteners = new;
+ return NULL;
+}
+
+static const char *set_secure_upgradeable_listener(cmd_parms *cmd, void *dummy,
+ const char *ips, const char* key)
+{
+ NWSSLSrvConfigRec* sc = get_nwssl_cfg(cmd->server);
+ seclistenup_rec *listen_node;
+ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ char *ports, *addr;
+ unsigned short port;
+ seclistenup_rec *new;
+
+ if (err != NULL)
+ return err;
+
+ ports = strchr(ips, ':');
+
+ if (ports != NULL) {
+ if (ports == ips)
+ return "Missing IP address";
+ else if (ports[1] == '\0')
+ return "Address must end in :<port-number>";
+
+ *(ports++) = '\0';
+ }
+ else {
+ ports = (char*)ips;
+ }
+
+ if (ports == ips) {
+ addr = apr_pstrdup(cmd->pool, "0.0.0.0");
+ }
+ else {
+ addr = apr_pstrdup(cmd->pool, ips);
+ }
+
+ port = atoi(ports);
+
+ if (!port)
+ return "Port must be numeric";
+
+ apr_table_set(sc->slutable, ports, addr);
+
+ new = apr_pcalloc(cmd->pool, sizeof(seclistenup_rec));
+ new->next = ap_seclistenersup;
+ strcpy(new->key, key);
+ new->addr = addr;
+ new->port = port;
+ ap_seclistenersup = new;
+
+ return err;
+}
+
+static apr_status_t nwssl_socket_cleanup(void *data)
+{
+ ap_listen_rec* slr = (ap_listen_rec*)data;
+ ap_listen_rec* lr;
+
+ /* Remove our secure listener from the listener list */
+ for (lr = ap_listeners; lr; lr = lr->next) {
+ /* slr is at the head of the list */
+ if (lr == slr) {
+ ap_listeners = slr->next;
+ break;
+ }
+ /* slr is somewhere in between or at the end*/
+ if (lr->next == slr) {
+ lr->next = slr->next;
+ break;
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static const char *set_trusted_certs(cmd_parms *cmd, void *dummy, char *arg)
+{
+ char **ptr = (char **)apr_array_push(certlist);
+
+ *ptr = apr_pstrdup(cmd->pool, arg);
+ return NULL;
+}
+
+static int nwssl_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptemp)
+{
+ ap_seclisteners = NULL;
+ ap_seclistenersup = NULL;
+ certlist = apr_array_make(pconf, 1, sizeof(char *));
+
+ return OK;
+}
+
+static int nwssl_pre_connection(conn_rec *c, void *csd)
+{
+
+ if (apr_table_get(c->notes, "nwconv-ssl")) {
+ convert_secure_socket(c, (apr_socket_t*)csd);
+ }
+ else {
+ secsocket_data *csd_data = apr_palloc(c->pool, sizeof(secsocket_data));
+
+ csd_data->csd = (apr_socket_t*)csd;
+ csd_data->is_secure = 0;
+ ap_set_module_config(c->conn_config, &nwssl_module, (void*)csd_data);
+ }
+
+ return OK;
+}
+
+static int nwssl_post_config(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ seclisten_rec* sl;
+ ap_listen_rec* lr;
+ apr_socket_t* sd;
+ apr_status_t status;
+ seclistenup_rec *slu;
+ int found;
+
+ for (sl = ap_seclisteners; sl != NULL; sl = sl->next) {
+ sl->fd = find_secure_listener(sl);
+
+ if (sl->fd < 0)
+ sl->fd = make_secure_socket(pconf, &sl->local_addr, sl->key, sl->mutual, s);
+
+ if (sl->fd >= 0) {
+ apr_os_sock_info_t sock_info;
+
+ sock_info.os_sock = &(sl->fd);
+ sock_info.local = (struct sockaddr*)&(sl->local_addr);
+ sock_info.remote = NULL;
+ sock_info.family = APR_INET;
+ sock_info.type = SOCK_STREAM;
+
+ apr_os_sock_make(&sd, &sock_info, pconf);
+
+ lr = apr_pcalloc(pconf, sizeof(ap_listen_rec));
+
+ if (lr) {
+ lr->sd = sd;
+ if ((status = apr_sockaddr_info_get(&lr->bind_addr, sl->addr, APR_UNSPEC, sl->port, 0,
+ pconf)) != APR_SUCCESS) {
+ ap_log_perror(APLOG_MARK, APLOG_CRIT, status, pconf,
+ "alloc_listener: failed to set up sockaddr for %s:%d", sl->addr, sl->port);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ lr->next = ap_listeners;
+ ap_listeners = lr;
+ apr_pool_cleanup_register(pconf, lr, nwssl_socket_cleanup, apr_pool_cleanup_null);
+ }
+ } else {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+
+ for (slu = ap_seclistenersup; slu; slu = slu->next) {
+ /* Check the listener list for a matching upgradeable listener */
+ found = 0;
+ for (lr = ap_listeners; lr; lr = lr->next) {
+ if (slu->port == lr->bind_addr->port) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, plog,
+ "No Listen directive found for upgradeable listener %s:%d", slu->addr, slu->port);
+ }
+ }
+
+ build_cert_list(pconf);
+
+ return OK;
+}
+
+static void *nwssl_config_server_create(apr_pool_t *p, server_rec *s)
+{
+ NWSSLSrvConfigRec *new = apr_palloc(p, sizeof(NWSSLSrvConfigRec));
+ new->sltable = apr_table_make(p, 5);
+ new->slutable = apr_table_make(p, 5);
+ return new;
+}
+
+static void *nwssl_config_server_merge(apr_pool_t *p, void *basev, void *addv)
+{
+ NWSSLSrvConfigRec *base = (NWSSLSrvConfigRec *)basev;
+ NWSSLSrvConfigRec *add = (NWSSLSrvConfigRec *)addv;
+ NWSSLSrvConfigRec *merged = (NWSSLSrvConfigRec *)apr_palloc(p, sizeof(NWSSLSrvConfigRec));
+ return merged;
+}
+
+static int compare_ipports(void *rec, const char *key, const char *value)
+{
+ conn_rec *c = (conn_rec*)rec;
+
+ if (value &&
+ ((strcmp(value, "0.0.0.0") == 0) || (strcmp(value, c->local_ip) == 0)))
+ {
+ return 0;
+ }
+ return 1;
+}
+
+static int isSecureConnEx (const server_rec *s, const conn_rec *c, const apr_table_t *t)
+{
+ char port[8];
+
+ itoa((c->local_addr)->port, port, 10);
+ if (!apr_table_do(compare_ipports, (void*)c, t, port, NULL))
+ {
+ return 1;
+ }
+
+ return 0;
+}
+
+static int isSecureConn (const server_rec *s, const conn_rec *c)
+{
+ NWSSLSrvConfigRec *sc = get_nwssl_cfg(s);
+
+ return isSecureConnEx (s, c, sc->sltable);
+}
+
+static int isSecureConnUpgradeable (const server_rec *s, const conn_rec *c)
+{
+ NWSSLSrvConfigRec *sc = get_nwssl_cfg(s);
+
+ return isSecureConnEx (s, c, sc->slutable);
+}
+
+static int isSecure (const request_rec *r)
+{
+ return isSecureConn (r->server, r->connection);
+}
+
+static int isSecureUpgradeable (const request_rec *r)
+{
+ return isSecureConnUpgradeable (r->server, r->connection);
+}
+
+static int isSecureUpgraded (const request_rec *r)
+{
+ secsocket_data *csd_data = (secsocket_data*)ap_get_module_config(r->connection->conn_config, &nwssl_module);
+
+ return csd_data->is_secure;
+}
+
+static int nwssl_hook_Fixup(request_rec *r)
+{
+ int i;
+
+ if (!isSecure(r) && !isSecureUpgraded(r))
+ return DECLINED;
+
+ apr_table_set(r->subprocess_env, "HTTPS", "on");
+
+ return DECLINED;
+}
+
+static const char *nwssl_hook_http_method (const request_rec *r)
+{
+ if (isSecure(r) && !isSecureUpgraded(r))
+ return "https";
+
+ return NULL;
+}
+
+static apr_port_t nwssl_hook_default_port(const request_rec *r)
+{
+ if (isSecure(r))
+ return DEFAULT_HTTPS_PORT;
+
+ return 0;
+}
+
+int ssl_proxy_enable(conn_rec *c)
+{
+ apr_table_set(c->notes, "nwconv-ssl", "Y");
+
+ return 1;
+}
+
+int ssl_engine_disable(conn_rec *c)
+{
+ return 1;
+}
+
+static int ssl_is_https(conn_rec *c)
+{
+ secsocket_data *csd_data = (secsocket_data*)ap_get_module_config(c->conn_config, &nwssl_module);
+
+ return isSecureConn (c->base_server, c) || (csd_data && csd_data->is_secure);
+}
+
+/* This function must remain safe to use for a non-SSL connection. */
+char *ssl_var_lookup(apr_pool_t *p, server_rec *s, conn_rec *c, request_rec *r, char *var)
+{
+ NWSSLSrvConfigRec *mc = get_nwssl_cfg(s);
+ const char *result;
+ BOOL resdup;
+ apr_time_exp_t tm;
+
+ result = NULL;
+ resdup = TRUE;
+
+ /*
+ * When no pool is given try to find one
+ */
+ if (p == NULL) {
+ if (r != NULL)
+ p = r->pool;
+ else if (c != NULL)
+ p = c->pool;
+ else
+ p = mc->pPool;
+ }
+
+ /*
+ * Request dependent stuff
+ */
+ if (r != NULL) {
+ switch (var[0]) {
+ case 'H':
+ case 'h':
+ if (strcEQ(var, "HTTP_USER_AGENT"))
+ result = apr_table_get(r->headers_in, "User-Agent");
+ else if (strcEQ(var, "HTTP_REFERER"))
+ result = apr_table_get(r->headers_in, "Referer");
+ else if (strcEQ(var, "HTTP_COOKIE"))
+ result = apr_table_get(r->headers_in, "Cookie");
+ else if (strcEQ(var, "HTTP_FORWARDED"))
+ result = apr_table_get(r->headers_in, "Forwarded");
+ else if (strcEQ(var, "HTTP_HOST"))
+ result = apr_table_get(r->headers_in, "Host");
+ else if (strcEQ(var, "HTTP_PROXY_CONNECTION"))
+ result = apr_table_get(r->headers_in, "Proxy-Connection");
+ else if (strcEQ(var, "HTTP_ACCEPT"))
+ result = apr_table_get(r->headers_in, "Accept");
+ else if (strcEQ(var, "HTTPS")) {
+ if (isSecure(r) || isSecureUpgraded(r))
+ result = "on";
+ else
+ result = "off";
+ }
+ else if (strlen(var) > 5 && strcEQn(var, "HTTP:", 5))
+ /* all other headers from which we are still not know about */
+ result = apr_table_get(r->headers_in, var+5);
+ break;
+
+ case 'R':
+ case 'r':
+ if (strcEQ(var, "REQUEST_METHOD"))
+ result = r->method;
+ else if (strcEQ(var, "REQUEST_SCHEME"))
+ result = ap_http_method(r);
+ else if (strcEQ(var, "REQUEST_URI"))
+ result = r->uri;
+ else if (strcEQ(var, "REQUEST_FILENAME"))
+ result = r->filename;
+ else if (strcEQ(var, "REMOTE_HOST"))
+ result = ap_get_remote_host(r->connection, r->per_dir_config,
+ REMOTE_NAME, NULL);
+ else if (strcEQ(var, "REMOTE_IDENT"))
+ result = ap_get_remote_logname(r);
+ else if (strcEQ(var, "REMOTE_USER"))
+ result = r->user;
+ break;
+
+ case 'S':
+ case 's':
+ if (strcEQn(var, "SSL", 3)) break; /* shortcut common case */
+
+ if (strcEQ(var, "SERVER_ADMIN"))
+ result = r->server->server_admin;
+ else if (strcEQ(var, "SERVER_NAME"))
+ result = ap_get_server_name(r);
+ else if (strcEQ(var, "SERVER_PORT"))
+ result = apr_psprintf(p, "%u", ap_get_server_port(r));
+ else if (strcEQ(var, "SERVER_PROTOCOL"))
+ result = r->protocol;
+ else if (strcEQ(var, "SCRIPT_FILENAME"))
+ result = r->filename;
+ break;
+
+ default:
+ if (strcEQ(var, "PATH_INFO"))
+ result = r->path_info;
+ else if (strcEQ(var, "QUERY_STRING"))
+ result = r->args;
+ else if (strcEQ(var, "IS_SUBREQ"))
+ result = (r->main != NULL ? "true" : "false");
+ else if (strcEQ(var, "DOCUMENT_ROOT"))
+ result = ap_document_root(r);
+ else if (strcEQ(var, "AUTH_TYPE"))
+ result = r->ap_auth_type;
+ else if (strcEQ(var, "THE_REQUEST"))
+ result = r->the_request;
+ break;
+ }
+ }
+
+ /*
+ * Connection stuff
+ */
+ if (result == NULL && c != NULL) {
+
+ /* XXX-Can't get specific SSL info from NetWare */
+ /* SSLConnRec *sslconn = myConnConfig(c);
+ if (strlen(var) > 4 && strcEQn(var, "SSL_", 4)
+ && sslconn && sslconn->ssl)
+ result = ssl_var_lookup_ssl(p, c, var+4);*/
+
+ if (strlen(var) > 4 && strcEQn(var, "SSL_", 4))
+ result = NULL;
+ else if (strcEQ(var, "REMOTE_ADDR"))
+ result = c->remote_ip;
+ }
+
+ /*
+ * Totally independent stuff
+ */
+ if (result == NULL) {
+ if (strlen(var) > 12 && strcEQn(var, "SSL_VERSION_", 12))
+ result = NULL;
+ /* XXX-Can't get specific SSL info from NetWare */
+ /*result = ssl_var_lookup_ssl_version(p, var+12);*/
+ else if (strcEQ(var, "SERVER_SOFTWARE"))
+ result = ap_get_server_version();
+ else if (strcEQ(var, "API_VERSION")) {
+ result = apr_itoa(p, MODULE_MAGIC_NUMBER);
+ resdup = FALSE;
+ }
+ else if (strcEQ(var, "TIME_YEAR")) {
+ apr_time_exp_lt(&tm, apr_time_now());
+ result = apr_psprintf(p, "%02d%02d",
+ (tm.tm_year / 100) + 19, tm.tm_year % 100);
+ resdup = FALSE;
+ }
+#define MKTIMESTR(format, tmfield) \
+ apr_time_exp_lt(&tm, apr_time_now()); \
+ result = apr_psprintf(p, format, tm.tmfield); \
+ resdup = FALSE;
+ else if (strcEQ(var, "TIME_MON")) {
+ MKTIMESTR("%02d", tm_mon+1)
+ }
+ else if (strcEQ(var, "TIME_DAY")) {
+ MKTIMESTR("%02d", tm_mday)
+ }
+ else if (strcEQ(var, "TIME_HOUR")) {
+ MKTIMESTR("%02d", tm_hour)
+ }
+ else if (strcEQ(var, "TIME_MIN")) {
+ MKTIMESTR("%02d", tm_min)
+ }
+ else if (strcEQ(var, "TIME_SEC")) {
+ MKTIMESTR("%02d", tm_sec)
+ }
+ else if (strcEQ(var, "TIME_WDAY")) {
+ MKTIMESTR("%d", tm_wday)
+ }
+ else if (strcEQ(var, "TIME")) {
+ apr_time_exp_lt(&tm, apr_time_now());
+ result = apr_psprintf(p,
+ "%02d%02d%02d%02d%02d%02d%02d", (tm.tm_year / 100) + 19,
+ (tm.tm_year % 100), tm.tm_mon+1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+ resdup = FALSE;
+ }
+ /* all other env-variables from the parent Apache process */
+ else if (strlen(var) > 4 && strcEQn(var, "ENV:", 4)) {
+ result = apr_table_get(r->notes, var+4);
+ if (result == NULL)
+ result = apr_table_get(r->subprocess_env, var+4);
+ if (result == NULL)
+ result = getenv(var+4);
+ }
+ }
+
+ if (result != NULL && resdup)
+ result = apr_pstrdup(p, result);
+ if (result == NULL)
+ result = "";
+ return (char *)result;
+}
+
+static apr_status_t ssl_io_filter_Upgrade(ap_filter_t *f,
+ apr_bucket_brigade *bb)
+
+{
+#define SWITCH_STATUS_LINE "HTTP/1.1 101 Switching Protocols"
+#define UPGRADE_HEADER "Upgrade: TLS/1.0, HTTP/1.1"
+#define CONNECTION_HEADER "Connection: Upgrade"
+ const char *upgrade;
+ const char *connection;
+ apr_bucket_brigade *upgradebb;
+ request_rec *r = f->r;
+ apr_socket_t *csd = NULL;
+ char *key;
+ unicode_t keyFileName[512];
+ int ret;
+ char *token_string;
+ char *token;
+ char *token_state;
+ secsocket_data *csd_data;
+
+ /* Just remove the filter, if it doesn't work the first time, it won't
+ * work at all for this request.
+ */
+ ap_remove_output_filter(f);
+
+ /* No need to ensure that this is a server with optional SSL, the filter
+ * is only inserted if that is true.
+ */
+
+ upgrade = apr_table_get(r->headers_in, "Upgrade");
+ if (upgrade == NULL) {
+ return ap_pass_brigade(f->next, bb);
+ }
+ token_string = apr_pstrdup(r->pool,upgrade);
+ token = apr_strtok(token_string,", ",&token_state);
+ while (token && strcmp(token,"TLS/1.0")) {
+ apr_strtok(NULL,", ",&token_state);
+ }
+ // "Upgrade: TLS/1.0" header not found, don't do Upgrade
+ if (!token) {
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ connection = apr_table_get(r->headers_in, "Connection");
+ token_string = apr_pstrdup(r->pool,connection);
+ token = apr_strtok(token_string,",",&token_state);
+ while (token && strcmp(token,"Upgrade")) {
+ apr_strtok(NULL,",",&token_state);
+ }
+ // "Connection: Upgrade" header not found, don't do Upgrade
+ if (!token) {
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ apr_table_unset(r->headers_out, "Upgrade");
+
+ if (r) {
+ csd_data = (secsocket_data*)ap_get_module_config(r->connection->conn_config, &nwssl_module);
+ csd = csd_data->csd;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Unable to get upgradeable socket handle");
+ return ap_pass_brigade(f->next, bb);
+ }
+
+
+ if (r->method_number == M_OPTIONS) {
+ apr_bucket *b = NULL;
+ /* This is a mandatory SSL upgrade. */
+
+ upgradebb = apr_brigade_create(r->pool, f->c->bucket_alloc);
+
+ ap_fputstrs(f->next, upgradebb, SWITCH_STATUS_LINE, CRLF,
+ UPGRADE_HEADER, CRLF, CONNECTION_HEADER, CRLF, CRLF, NULL);
+
+ b = apr_bucket_flush_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(upgradebb, b);
+ ap_pass_brigade(f->next, upgradebb);
+ }
+ else {
+ /* This is optional, and should be configurable, for now don't bother
+ * doing anything.
+ */
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ key = get_port_key(r->connection);
+
+ if (csd && key) {
+ int sockdes;
+ apr_os_sock_get(&sockdes, csd);
+
+
+ ret = SSLize_Socket(sockdes, key, r);
+ if (!ret) {
+ csd_data->is_secure = 1;
+ }
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Upgradeable socket handle not found");
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "Awaiting re-negotiation handshake");
+
+ return ap_pass_brigade(f->next, bb);
+}
+
+static void ssl_hook_Insert_Filter(request_rec *r)
+{
+ NWSSLSrvConfigRec *sc = get_nwssl_cfg(r->server);
+
+ if (isSecureUpgradeable (r)) {
+ ap_add_output_filter("UPGRADE_FILTER", NULL, r, r->connection);
+ }
+}
+
+static const command_rec nwssl_module_cmds[] =
+{
+ AP_INIT_TAKE23("SecureListen", set_secure_listener, NULL, RSRC_CONF,
+ "specify an address and/or port with a key pair name.\n"
+ "Optional third parameter of MUTUAL configures the port for mutual authentication."),
+ AP_INIT_TAKE2("NWSSLUpgradeable", set_secure_upgradeable_listener, NULL, RSRC_CONF,
+ "specify an address and/or port with a key pair name, that can be upgraded to an SSL connection.\n"
+ "The address and/or port must have already be defined using a Listen directive."),
+ AP_INIT_ITERATE("NWSSLTrustedCerts", set_trusted_certs, NULL, RSRC_CONF,
+ "Adds trusted certificates that are used to create secure connections to proxied servers"),
+ {NULL}
+};
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_register_output_filter ("UPGRADE_FILTER", ssl_io_filter_Upgrade, NULL, AP_FTYPE_PROTOCOL + 5);
+
+ ap_hook_pre_config(nwssl_pre_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_pre_connection(nwssl_pre_connection, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_config(nwssl_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_fixups(nwssl_hook_Fixup, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_http_method(nwssl_hook_http_method, NULL,NULL, APR_HOOK_MIDDLE);
+ ap_hook_default_port (nwssl_hook_default_port, NULL,NULL, APR_HOOK_MIDDLE);
+ ap_hook_insert_filter (ssl_hook_Insert_Filter, NULL,NULL, APR_HOOK_MIDDLE);
+
+ APR_REGISTER_OPTIONAL_FN(ssl_is_https);
+ APR_REGISTER_OPTIONAL_FN(ssl_var_lookup);
+
+ APR_REGISTER_OPTIONAL_FN(ssl_proxy_enable);
+ APR_REGISTER_OPTIONAL_FN(ssl_engine_disable);
+}
+
+module AP_MODULE_DECLARE_DATA nwssl_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ nwssl_config_server_create, /* server config */
+ nwssl_config_server_merge, /* merge server config */
+ nwssl_module_cmds, /* command apr_table_t */
+ register_hooks
+};
+
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy.def
new file mode 100644
index 00000000..ab02a53c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy.def
@@ -0,0 +1,6 @@
+EXPORT proxy_module
+EXPORT proxy_hook_scheme_handler
+EXPORT proxy_hook_canon_handler
+EXPORT ap_proxy_ssl_enable
+EXPORT ap_proxy_ssl_disable
+EXPORT proxy_run_fixups
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy_connect.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy_connect.def
new file mode 100644
index 00000000..13611140
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy_connect.def
@@ -0,0 +1,4 @@
+EXPORT proxy_connect_module
+IMPORT proxy_module
+IMPORT proxy_hook_scheme_handler
+IMPORT proxy_hook_canon_handler
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy_ftp.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy_ftp.def
new file mode 100644
index 00000000..f2dba7d6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy_ftp.def
@@ -0,0 +1,4 @@
+EXPORT proxy_ftp_module
+IMPORT proxy_module
+IMPORT proxy_hook_scheme_handler
+IMPORT proxy_hook_canon_handler
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy_http.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy_http.def
new file mode 100644
index 00000000..b24358b1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_proxy_http.def
@@ -0,0 +1,7 @@
+EXPORT proxy_http_module
+IMPORT proxy_module
+IMPORT proxy_hook_scheme_handler
+IMPORT proxy_run_fixups
+IMPORT proxy_hook_canon_handler
+IMPORT ap_proxy_ssl_enable
+IMPORT ap_proxy_ssl_disable
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_rewrite.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_rewrite.def
new file mode 100644
index 00000000..cfdcf6b1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_rewrite.def
@@ -0,0 +1 @@
+EXPORT rewrite_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_speling.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_speling.def
new file mode 100644
index 00000000..3d45a6aa
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_speling.def
@@ -0,0 +1 @@
+EXPORT speling_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_status.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_status.def
new file mode 100644
index 00000000..9a5a32d4
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_status.def
@@ -0,0 +1,2 @@
+EXPORT status_module
+
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_unique_id.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_unique_id.def
new file mode 100644
index 00000000..0b72c1ec
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_unique_id.def
@@ -0,0 +1 @@
+EXPORT unique_id_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_usertrack.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_usertrack.def
new file mode 100644
index 00000000..7264c41e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_usertrack.def
@@ -0,0 +1 @@
+EXPORT usertrack_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_vhost_alias.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_vhost_alias.def
new file mode 100644
index 00000000..574b85f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/mod_vhost_alias.def
@@ -0,0 +1,2 @@
+EXPORT vhost_alias_module
+
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/netware/moddavfs.def b/rubbos/app/httpd-2.0.64/modules/arch/netware/moddavfs.def
new file mode 100644
index 00000000..67ec3117
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/netware/moddavfs.def
@@ -0,0 +1 @@
+EXPORT dav_fs_module
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/win32/.deps b/rubbos/app/httpd-2.0.64/modules/arch/win32/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/win32/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/win32/Makefile b/rubbos/app/httpd-2.0.64/modules/arch/win32/Makefile
new file mode 100644
index 00000000..7886ef35
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/win32/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/arch/win32
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/arch/win32
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/arch/win32
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/win32/Makefile.in b/rubbos/app/httpd-2.0.64/modules/arch/win32/Makefile.in
new file mode 100644
index 00000000..7c5c149d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/win32/Makefile.in
@@ -0,0 +1,3 @@
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/win32/config.m4 b/rubbos/app/httpd-2.0.64/modules/arch/win32/config.m4
new file mode 100644
index 00000000..25f7a850
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/win32/config.m4
@@ -0,0 +1,11 @@
+dnl modules enabled in this directory by default
+
+dnl APACHE_MODULE(name, helptext[, objects[, structname[, default[, config]]]])
+
+APACHE_MODPATH_INIT(arch/win32)
+
+APACHE_MODULE(isapi, isapi extension support, , , no)
+
+APR_ADDTO(LT_LDFLAGS,-export-dynamic)
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/win32/mod_isapi.c b/rubbos/app/httpd-2.0.64/modules/arch/win32/mod_isapi.c
new file mode 100644
index 00000000..859d5670
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/win32/mod_isapi.c
@@ -0,0 +1,1760 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_isapi.c - Internet Server Application (ISA) module for Apache
+ * by Alexei Kosut <akosut apache.org>, significant overhauls and
+ * redesign by William Rowe <wrowe covalent.net>, and hints from many
+ * other developer/users who have hit on specific flaws.
+ *
+ * This module implements the ISAPI Handler architecture, allowing
+ * Apache to load Internet Server Applications (ISAPI extensions),
+ * similar to the support in IIS, Zope, O'Reilly's WebSite and others.
+ *
+ * It is a complete implementation of the ISAPI 2.0 specification,
+ * except for "Microsoft extensions" to the API which provide
+ * asynchronous I/O. It is further extended to include additional
+ * "Microsoft extentions" through IIS 5.0, with some deficiencies
+ * where one-to-one mappings don't exist.
+ *
+ * Refer to /manual/mod/mod_isapi.html for additional details on
+ * configuration and use, but check this source for specific support
+ * of the API,
+ */
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "http_log.h"
+#include "util_script.h"
+#include "mod_core.h"
+#include "apr_lib.h"
+#include "apr_strings.h"
+#include "apr_portable.h"
+#include "apr_buckets.h"
+#include "apr_thread_mutex.h"
+#include "apr_thread_rwlock.h"
+#include "apr_hash.h"
+#include "mod_isapi.h"
+
+/* Retry frequency for a failed-to-load isapi .dll */
+#define ISAPI_RETRY apr_time_from_sec(30)
+
+/**********************************************************
+ *
+ * ISAPI Module Configuration
+ *
+ **********************************************************/
+
+module AP_MODULE_DECLARE_DATA isapi_module;
+
+#define ISAPI_UNDEF -1
+
+/* Our isapi per-dir config structure */
+typedef struct isapi_dir_conf {
+ int read_ahead_buflen;
+ int log_unsupported;
+ int log_to_errlog;
+ int log_to_query;
+ int fake_async;
+} isapi_dir_conf;
+
+typedef struct isapi_loaded isapi_loaded;
+
+apr_status_t isapi_lookup(apr_pool_t *p, server_rec *s, request_rec *r,
+ const char *fpath, isapi_loaded** isa);
+
+static void *create_isapi_dir_config(apr_pool_t *p, char *dummy)
+{
+ isapi_dir_conf *dir = apr_palloc(p, sizeof(isapi_dir_conf));
+
+ dir->read_ahead_buflen = ISAPI_UNDEF;
+ dir->log_unsupported = ISAPI_UNDEF;
+ dir->log_to_errlog = ISAPI_UNDEF;
+ dir->log_to_query = ISAPI_UNDEF;
+ dir->fake_async = ISAPI_UNDEF;
+
+ return dir;
+}
+
+static void *merge_isapi_dir_configs(apr_pool_t *p, void *base_, void *add_)
+{
+ isapi_dir_conf *base = (isapi_dir_conf *) base_;
+ isapi_dir_conf *add = (isapi_dir_conf *) add_;
+ isapi_dir_conf *dir = apr_palloc(p, sizeof(isapi_dir_conf));
+
+ dir->read_ahead_buflen = (add->read_ahead_buflen == ISAPI_UNDEF)
+ ? base->read_ahead_buflen
+ : add->read_ahead_buflen;
+ dir->log_unsupported = (add->log_unsupported == ISAPI_UNDEF)
+ ? base->log_unsupported
+ : add->log_unsupported;
+ dir->log_to_errlog = (add->log_to_errlog == ISAPI_UNDEF)
+ ? base->log_to_errlog
+ : add->log_to_errlog;
+ dir->log_to_query = (add->log_to_query == ISAPI_UNDEF)
+ ? base->log_to_query
+ : add->log_to_query;
+ dir->fake_async = (add->fake_async == ISAPI_UNDEF)
+ ? base->fake_async
+ : add->fake_async;
+
+ return dir;
+}
+
+static const char *isapi_cmd_cachefile(cmd_parms *cmd, void *dummy,
+ const char *filename)
+{
+ isapi_loaded *isa;
+ apr_finfo_t tmp;
+ apr_status_t rv;
+ char *fspec;
+
+ /* ### Just an observation ... it would be terribly cool to be
+ * able to use this per-dir, relative to the directory block being
+ * defined. The hash result remains global, but shorthand of
+ * <Directory "c:/webapps/isapi">
+ * ISAPICacheFile myapp.dll anotherapp.dll thirdapp.dll
+ * </Directory>
+ * would be very convienent.
+ */
+ fspec = ap_server_root_relative(cmd->pool, filename);
+ if (!fspec) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, APR_EBADPATH, cmd->server,
+ "ISAPI: invalid module path, skipping %s", filename);
+ return NULL;
+ }
+ if ((rv = apr_stat(&tmp, fspec, APR_FINFO_TYPE,
+ cmd->temp_pool)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, rv, cmd->server,
+ "ISAPI: unable to stat, skipping %s", fspec);
+ return NULL;
+ }
+ if (tmp.filetype != APR_REG) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server,
+ "ISAPI: not a regular file, skipping %s", fspec);
+ return NULL;
+ }
+
+ /* Load the extention as cached (with null request_rec) */
+ rv = isapi_lookup(cmd->pool, cmd->server, NULL, fspec, &isa);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, rv, cmd->server,
+ "ISAPI: unable to cache, skipping %s", fspec);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static const command_rec isapi_cmds[] = {
+ AP_INIT_TAKE1("ISAPIReadAheadBuffer", ap_set_int_slot,
+ (void *)APR_OFFSETOF(isapi_dir_conf, read_ahead_buflen),
+ OR_FILEINFO, "Maximum client request body to initially pass to the"
+ " ISAPI handler (default: 49152)"),
+ AP_INIT_FLAG("ISAPILogNotSupported", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(isapi_dir_conf, log_unsupported),
+ OR_FILEINFO, "Log requests not supported by the ISAPI server"
+ " on or off (default: off)"),
+ AP_INIT_FLAG("ISAPIAppendLogToErrors", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(isapi_dir_conf, log_to_errlog),
+ OR_FILEINFO, "Send all Append Log requests to the error log"
+ " on or off (default: off)"),
+ AP_INIT_FLAG("ISAPIAppendLogToQuery", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(isapi_dir_conf, log_to_query),
+ OR_FILEINFO, "Append Log requests are concatinated to the query args"
+ " on or off (default: on)"),
+ AP_INIT_FLAG("ISAPIFakeAsync", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(isapi_dir_conf, fake_async),
+ OR_FILEINFO, "Fake Asynchronous support for isapi callbacks"
+ " on or off [Experimental] (default: off)"),
+ AP_INIT_ITERATE("ISAPICacheFile", isapi_cmd_cachefile, NULL,
+ RSRC_CONF, "Cache the specified ISAPI extension in-process"),
+ {NULL}
+};
+
+/**********************************************************
+ *
+ * ISAPI Module Cache handling section
+ *
+ **********************************************************/
+
+/* Our isapi global config values */
+static struct isapi_global_conf {
+ apr_pool_t *pool;
+ apr_thread_mutex_t *lock;
+ apr_hash_t *hash;
+} loaded;
+
+/* Our loaded isapi module description structure */
+struct isapi_loaded {
+ const char *filename;
+ apr_thread_rwlock_t *in_progress;
+ apr_status_t last_load_rv;
+ apr_time_t last_load_time;
+ apr_dso_handle_t *handle;
+ HSE_VERSION_INFO *isapi_version;
+ apr_uint32_t report_version;
+ apr_uint32_t timeout;
+ PFN_GETEXTENSIONVERSION GetExtensionVersion;
+ PFN_HTTPEXTENSIONPROC HttpExtensionProc;
+ PFN_TERMINATEEXTENSION TerminateExtension;
+};
+
+static apr_status_t isapi_unload(isapi_loaded *isa, int force)
+{
+ /* All done with the DLL... get rid of it...
+ *
+ * If optionally cached, and we weren't asked to force the unload,
+ * pass HSE_TERM_ADVISORY_UNLOAD, and if it returns 1, unload,
+ * otherwise, leave it alone (it didn't choose to cooperate.)
+ */
+ if (!isa->handle) {
+ return APR_SUCCESS;
+ }
+ if (isa->TerminateExtension) {
+ if (force) {
+ (*isa->TerminateExtension)(HSE_TERM_MUST_UNLOAD);
+ }
+ else if (!(*isa->TerminateExtension)(HSE_TERM_ADVISORY_UNLOAD)) {
+ return APR_EGENERAL;
+ }
+ }
+ apr_dso_unload(isa->handle);
+ isa->handle = NULL;
+ return APR_SUCCESS;
+}
+
+static apr_status_t cleanup_isapi(void *isa_)
+{
+ isapi_loaded* isa = (isapi_loaded*) isa_;
+
+ /* We must force the module to unload, we are about
+ * to lose the isapi structure's allocation entirely.
+ */
+ return isapi_unload(isa, 1);
+}
+
+static apr_status_t isapi_load(apr_pool_t *p, server_rec *s, isapi_loaded *isa)
+{
+ apr_status_t rv;
+
+ isa->isapi_version = apr_pcalloc(p, sizeof(HSE_VERSION_INFO));
+
+ /* TODO: These aught to become overrideable, so that we
+ * assure a given isapi can be fooled into behaving well.
+ *
+ * The tricky bit, they aren't really a per-dir sort of
+ * config, they will always be constant across every
+ * reference to the .dll no matter what context (vhost,
+ * location, etc) they apply to.
+ */
+ isa->report_version = 0x500; /* Revision 5.0 */
+ isa->timeout = 300 * 1000000; /* microsecs, not used */
+
+ rv = apr_dso_load(&isa->handle, isa->filename, p);
+ if (rv)
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "ISAPI: failed to load %s", isa->filename);
+ isa->handle = NULL;
+ return rv;
+ }
+
+ rv = apr_dso_sym((void**)&isa->GetExtensionVersion, isa->handle,
+ "GetExtensionVersion");
+ if (rv)
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "ISAPI: missing GetExtensionVersion() in %s",
+ isa->filename);
+ apr_dso_unload(isa->handle);
+ isa->handle = NULL;
+ return rv;
+ }
+
+ rv = apr_dso_sym((void**)&isa->HttpExtensionProc, isa->handle,
+ "HttpExtensionProc");
+ if (rv)
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "ISAPI: missing HttpExtensionProc() in %s",
+ isa->filename);
+ apr_dso_unload(isa->handle);
+ isa->handle = NULL;
+ return rv;
+ }
+
+ /* TerminateExtension() is an optional interface */
+ rv = apr_dso_sym((void**)&isa->TerminateExtension, isa->handle,
+ "TerminateExtension");
+ apr_set_os_error(0);
+
+ /* Run GetExtensionVersion() */
+ if (!(isa->GetExtensionVersion)(isa->isapi_version)) {
+ apr_status_t rv = apr_get_os_error();
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "ISAPI: failed call to GetExtensionVersion() in %s",
+ isa->filename);
+ apr_dso_unload(isa->handle);
+ isa->handle = NULL;
+ return rv;
+ }
+
+ apr_pool_cleanup_register(p, isa, cleanup_isapi,
+ apr_pool_cleanup_null);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t isapi_lookup(apr_pool_t *p, server_rec *s, request_rec *r,
+ const char *fpath, isapi_loaded** isa)
+{
+ apr_status_t rv;
+ const char *key;
+
+ if ((rv = apr_thread_mutex_lock(loaded.lock)) != APR_SUCCESS) {
+ return rv;
+ }
+
+ *isa = apr_hash_get(loaded.hash, fpath, APR_HASH_KEY_STRING);
+
+ if (*isa) {
+
+ /* If we find this lock exists, use a set-aside copy of gainlock
+ * to avoid race conditions on NULLing the in_progress variable
+ * when the load has completed. Release the global isapi hash
+ * lock so other requests can proceed, then rdlock for completion
+ * of loading our desired dll or wrlock if we would like to retry
+ * loading the dll (because last_load_rv failed and retry is up.)
+ */
+ apr_thread_rwlock_t *gainlock = (*isa)->in_progress;
+
+ /* gainlock is NULLed after the module loads successfully.
+ * This free-threaded module can be used without any locking.
+ */
+ if (!gainlock) {
+ rv = (*isa)->last_load_rv;
+ apr_thread_mutex_unlock(loaded.lock);
+ return rv;
+ }
+
+
+ if ((*isa)->last_load_rv == APR_SUCCESS) {
+ apr_thread_mutex_unlock(loaded.lock);
+ if ((rv = apr_thread_rwlock_rdlock(gainlock))
+ != APR_SUCCESS) {
+ return rv;
+ }
+ rv = (*isa)->last_load_rv;
+ apr_thread_rwlock_unlock(gainlock);
+ return rv;
+ }
+
+ if (apr_time_now() > (*isa)->last_load_time + ISAPI_RETRY) {
+
+ /* Remember last_load_time before releasing the global
+ * hash lock to avoid colliding with another thread
+ * that hit this exception at the same time as our
+ * retry attempt, since we unlock the global mutex
+ * before attempting a write lock for this module.
+ */
+ apr_time_t check_time = (*isa)->last_load_time;
+ apr_thread_mutex_unlock(loaded.lock);
+
+ if ((rv = apr_thread_rwlock_wrlock(gainlock))
+ != APR_SUCCESS) {
+ return rv;
+ }
+
+ /* If last_load_time is unchanged, we still own this
+ * retry, otherwise presume another thread provided
+ * our retry (for good or ill). Relock the global
+ * hash for updating last_load_ vars, so their update
+ * is always atomic to the global lock.
+ */
+ if (check_time == (*isa)->last_load_time) {
+
+ rv = isapi_load(loaded.pool, s, *isa);
+
+ apr_thread_mutex_lock(loaded.lock);
+ (*isa)->last_load_rv = rv;
+ (*isa)->last_load_time = apr_time_now();
+ apr_thread_mutex_unlock(loaded.lock);
+ }
+ else {
+ rv = (*isa)->last_load_rv;
+ }
+ apr_thread_rwlock_unlock(gainlock);
+
+ return rv;
+ }
+
+ /* We haven't hit timeup on retry, let's grab the last_rv
+ * within the hash mutex before unlocking.
+ */
+ rv = (*isa)->last_load_rv;
+ apr_thread_mutex_unlock(loaded.lock);
+
+ return rv;
+ }
+
+ /* If the module was not found, it's time to create a hash key entry
+ * before releasing the hash lock to avoid multiple threads from
+ * loading the same module.
+ */
+ key = apr_pstrdup(loaded.pool, fpath);
+ *isa = apr_pcalloc(loaded.pool, sizeof(isapi_loaded));
+ (*isa)->filename = key;
+ if (r) {
+ /* A mutex that exists only long enough to attempt to
+ * load this isapi dll, the release this module to all
+ * other takers that came along during the one-time
+ * load process. Short lifetime for this lock would
+ * be great, however, using r->pool is nasty if those
+ * blocked on the lock haven't all unlocked before we
+ * attempt to destroy. A nastier race condition than
+ * I want to deal with at this moment...
+ */
+ apr_thread_rwlock_create(&(*isa)->in_progress, loaded.pool);
+ apr_thread_rwlock_wrlock((*isa)->in_progress);
+ }
+
+ apr_hash_set(loaded.hash, key, APR_HASH_KEY_STRING, *isa);
+
+ /* Now attempt to load the isapi on our own time,
+ * allow other isapi processing to resume.
+ */
+ apr_thread_mutex_unlock(loaded.lock);
+
+ rv = isapi_load(loaded.pool, s, *isa);
+ (*isa)->last_load_time = apr_time_now();
+ (*isa)->last_load_rv = rv;
+
+ if (r && (rv == APR_SUCCESS)) {
+ /* Let others who are blocked on this particular
+ * module resume their requests, for better or worse.
+ */
+ apr_thread_rwlock_t *unlock = (*isa)->in_progress;
+ (*isa)->in_progress = NULL;
+ apr_thread_rwlock_unlock(unlock);
+ }
+ else if (!r && (rv != APR_SUCCESS)) {
+ /* We must leave a rwlock around for requests to retry
+ * loading this dll after timeup... since we were in
+ * the setup code we had avoided creating this lock.
+ */
+ apr_thread_rwlock_create(&(*isa)->in_progress, loaded.pool);
+ }
+
+ return (*isa)->last_load_rv;
+}
+
+/**********************************************************
+ *
+ * ISAPI Module request callbacks section
+ *
+ **********************************************************/
+
+/* Our "Connection ID" structure */
+struct isapi_cid {
+ EXTENSION_CONTROL_BLOCK *ecb;
+ isapi_dir_conf dconf;
+ isapi_loaded *isa;
+ request_rec *r;
+ int headers_set;
+ int response_sent;
+ PFN_HSE_IO_COMPLETION completion;
+ void *completion_arg;
+ apr_thread_mutex_t *completed;
+};
+
+int APR_THREAD_FUNC GetServerVariable (isapi_cid *cid,
+ char *variable_name,
+ void *buf_ptr,
+ apr_uint32_t *buf_size)
+{
+ request_rec *r = cid->r;
+ const char *result;
+ char *buf_data = (char*)buf_ptr;
+ apr_uint32_t len;
+
+ if (!strcmp(variable_name, "ALL_HTTP"))
+ {
+ /* crlf delimited, colon split, comma separated and
+ * null terminated list of HTTP_ vars
+ */
+ const apr_array_header_t *arr = apr_table_elts(r->subprocess_env);
+ const apr_table_entry_t *elts = (const apr_table_entry_t *)arr->elts;
+ int i;
+
+ for (len = 0, i = 0; i < arr->nelts; i++) {
+ if (!strncmp(elts[i].key, "HTTP_", 5)) {
+ len += strlen(elts[i].key) + strlen(elts[i].val) + 3;
+ }
+ }
+
+ if (*buf_size < len + 1) {
+ *buf_size = len + 1;
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INSUFFICIENT_BUFFER));
+ return 0;
+ }
+
+ for (i = 0; i < arr->nelts; i++) {
+ if (!strncmp(elts[i].key, "HTTP_", 5)) {
+ strcpy(buf_data, elts[i].key);
+ buf_data += strlen(elts[i].key);
+ *(buf_data++) = ':';
+ strcpy(buf_data, elts[i].val);
+ buf_data += strlen(elts[i].val);
+ *(buf_data++) = '\r';
+ *(buf_data++) = '\n';
+ }
+ }
+
+ *(buf_data++) = '\0';
+ *buf_size = len + 1;
+ return 1;
+ }
+
+ if (!strcmp(variable_name, "ALL_RAW"))
+ {
+ /* crlf delimited, colon split, comma separated and
+ * null terminated list of the raw request header
+ */
+ const apr_array_header_t *arr = apr_table_elts(r->headers_in);
+ const apr_table_entry_t *elts = (const apr_table_entry_t *)arr->elts;
+ int i;
+
+ for (len = 0, i = 0; i < arr->nelts; i++) {
+ len += strlen(elts[i].key) + strlen(elts[i].val) + 4;
+ }
+
+ if (*buf_size < len + 1) {
+ *buf_size = len + 1;
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INSUFFICIENT_BUFFER));
+ return 0;
+ }
+
+ for (i = 0; i < arr->nelts; i++) {
+ strcpy(buf_data, elts[i].key);
+ buf_data += strlen(elts[i].key);
+ *(buf_data++) = ':';
+ *(buf_data++) = ' ';
+ strcpy(buf_data, elts[i].val);
+ buf_data += strlen(elts[i].val);
+ *(buf_data++) = '\r';
+ *(buf_data++) = '\n';
+ }
+ *(buf_data++) = '\0';
+ *buf_size = len + 1;
+ return 1;
+ }
+
+ /* Not a special case */
+ result = apr_table_get(r->subprocess_env, variable_name);
+
+ if (result) {
+ len = strlen(result);
+ if (*buf_size < len + 1) {
+ *buf_size = len + 1;
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INSUFFICIENT_BUFFER));
+ return 0;
+ }
+ strcpy(buf_data, result);
+ *buf_size = len + 1;
+ return 1;
+ }
+
+ /* Not Found */
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_INDEX));
+ return 0;
+}
+
+int APR_THREAD_FUNC ReadClient(isapi_cid *cid,
+ void *buf_data,
+ apr_uint32_t *buf_size)
+{
+ request_rec *r = cid->r;
+ apr_uint32_t read = 0;
+ int res;
+
+ if (r->remaining < *buf_size) {
+ *buf_size = (apr_size_t)r->remaining;
+ }
+
+ while (read < *buf_size &&
+ ((res = ap_get_client_block(r, (char*)buf_data + read,
+ *buf_size - read)) > 0)) {
+ read += res;
+ }
+
+ *buf_size = read;
+ if (res < 0) {
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_READ_FAULT));
+ }
+ return (res >= 0);
+}
+
+/* Common code invoked for both HSE_REQ_SEND_RESPONSE_HEADER and
+ * the newer HSE_REQ_SEND_RESPONSE_HEADER_EX ServerSupportFunction(s)
+ * as well as other functions that write responses and presume that
+ * the support functions above are optional.
+ *
+ * Other callers trying to split headers and body bytes should pass
+ * head/headlen alone (leaving stat/statlen NULL/0), so that they
+ * get a proper count of bytes consumed. The argument passed to stat
+ * isn't counted as the head bytes are.
+ */
+static apr_ssize_t send_response_header(isapi_cid *cid,
+ const char *stat,
+ const char *head,
+ apr_size_t statlen,
+ apr_size_t headlen)
+{
+ int head_present = 1;
+ int termarg;
+ int res;
+ int old_status;
+ const char *termch;
+ apr_size_t ate = 0;
+
+ if (!head || headlen == 0 || !*head) {
+ head = stat;
+ stat = NULL;
+ headlen = statlen;
+ statlen = 0;
+ head_present = 0; /* Don't eat the header */
+ }
+
+ if (!stat || statlen == 0 || !*stat) {
+ if (head && headlen && *head && ((stat = memchr(head, '\r', headlen))
+ || (stat = memchr(head, '\n', headlen))
+ || (stat = memchr(head, '\0', headlen))
+ || (stat = head + headlen))) {
+ statlen = stat - head;
+ if (memchr(head, ':', statlen)) {
+ stat = "Status: 200 OK";
+ statlen = strlen(stat);
+ }
+ else {
+ const char *flip = head;
+ head = stat;
+ stat = flip;
+ headlen -= statlen;
+ ate += statlen;
+ if (*head == '\r' && headlen)
+ ++head, --headlen, ++ate;
+ if (*head == '\n' && headlen)
+ ++head, --headlen, ++ate;
+ }
+ }
+ }
+
+ if (stat && (statlen > 0) && *stat) {
+ char *newstat;
+ if (!apr_isdigit(*stat)) {
+ const char *stattok = stat;
+ int toklen = statlen;
+ while (toklen && *stattok && !apr_isspace(*stattok)) {
+ ++stattok; --toklen;
+ }
+ while (toklen && apr_isspace(*stattok)) {
+ ++stattok; --toklen;
+ }
+ /* Now decide if we follow the xxx message
+ * or the http/x.x xxx message format
+ */
+ if (toklen && apr_isdigit(*stattok)) {
+ statlen = toklen;
+ stat = stattok;
+ }
+ }
+ newstat = apr_palloc(cid->r->pool, statlen + 9);
+ strcpy(newstat, "Status: ");
+ apr_cpystrn(newstat + 8, stat, statlen + 1);
+ stat = newstat;
+ statlen += 8;
+ }
+
+ if (!head || headlen == 0 || !*head) {
+ head = "\r\n";
+ headlen = 2;
+ }
+ else
+ {
+ if (head[headlen - 1] && head[headlen]) {
+ /* Whoops... not NULL terminated */
+ head = apr_pstrndup(cid->r->pool, head, headlen);
+ }
+ }
+
+ /* Seems IIS does not enforce the requirement for \r\n termination
+ * on HSE_REQ_SEND_RESPONSE_HEADER, but we won't panic...
+ * ap_scan_script_header_err_strs handles this aspect for us.
+ *
+ * Parse them out, or die trying
+ */
+ old_status = cid->r->status;
+
+ if (stat) {
+ res = ap_scan_script_header_err_strs(cid->r, NULL, &termch, &termarg,
+ stat, head, NULL);
+ }
+ else {
+ res = ap_scan_script_header_err_strs(cid->r, NULL, &termch, &termarg,
+ head, NULL);
+ }
+
+ /* Set our status. */
+ if (res) {
+ /* This is an immediate error result from the parser
+ */
+ cid->r->status = res;
+ cid->r->status_line = ap_get_status_line(cid->r->status);
+ cid->ecb->dwHttpStatusCode = cid->r->status;
+ }
+ else if (cid->r->status) {
+ /* We have a status in r->status, so let's just use it.
+ * This is likely to be the Status: parsed above, and
+ * may also be a delayed error result from the parser.
+ * If it was filled in, status_line should also have
+ * been filled in.
+ */
+ cid->ecb->dwHttpStatusCode = cid->r->status;
+ }
+ else if (cid->ecb->dwHttpStatusCode
+ && cid->ecb->dwHttpStatusCode != HTTP_OK) {
+ /* Now we fall back on dwHttpStatusCode if it appears
+ * ap_scan_script_header fell back on the default code.
+ * Any other results set dwHttpStatusCode to the decoded
+ * status value.
+ */
+ cid->r->status = cid->ecb->dwHttpStatusCode;
+ cid->r->status_line = ap_get_status_line(cid->r->status);
+ }
+ else if (old_status) {
+ /* Well... either there is no dwHttpStatusCode or it's HTTP_OK.
+ * In any case, we don't have a good status to return yet...
+ * Perhaps the one we came in with will be better. Let's use it,
+ * if we were given one (note this is a pendantic case, it would
+ * normally be covered above unless the scan script code unset
+ * the r->status). Should there be a check here as to whether
+ * we are setting a valid response code?
+ */
+ cid->r->status = old_status;
+ cid->r->status_line = ap_get_status_line(cid->r->status);
+ cid->ecb->dwHttpStatusCode = cid->r->status;
+ }
+ else {
+ /* None of dwHttpStatusCode, the parser's r->status nor the
+ * old value of r->status were helpful, and nothing was decoded
+ * from Status: string passed to us. Let's just say HTTP_OK
+ * and get the data out, this was the isapi dev's oversight.
+ */
+ cid->r->status = HTTP_OK;
+ cid->r->status_line = ap_get_status_line(cid->r->status);
+ cid->ecb->dwHttpStatusCode = cid->r->status;
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, cid->r,
+ "ISAPI: Could not determine HTTP response code; using %d",
+ cid->r->status);
+ }
+
+ if (cid->r->status == HTTP_INTERNAL_SERVER_ERROR) {
+ return -1;
+ }
+
+ /* If only Status was passed, we consumed nothing
+ */
+ if (!head_present)
+ return 0;
+
+ cid->headers_set = 1;
+
+ /* If all went well, tell the caller we consumed the headers complete
+ */
+ if (!termch)
+ return(ate + headlen);
+
+ /* Any data left must be sent directly by the caller, all we
+ * give back is the size of the headers we consumed (which only
+ * happens if the parser got to the head arg, which varies based
+ * on whether we passed stat+head to scan, or only head.
+ */
+ if (termch && (termarg == (stat ? 1 : 0))
+ && head_present && head + headlen > termch) {
+ return ate + termch - head;
+ }
+ return ate;
+}
+
+int APR_THREAD_FUNC WriteClient(isapi_cid *cid,
+ void *buf_ptr,
+ apr_uint32_t *size_arg,
+ apr_uint32_t flags)
+{
+ request_rec *r = cid->r;
+ conn_rec *c = r->connection;
+ apr_uint32_t buf_size = *size_arg;
+ char *buf_data = (char*)buf_ptr;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ apr_status_t rv = APR_SUCCESS;
+
+ if (!cid->headers_set) {
+ /* It appears that the foxisapi module and other clients
+ * presume that WriteClient("headers\n\nbody") will work.
+ * Parse them out, or die trying.
+ */
+ apr_ssize_t ate;
+ ate = send_response_header(cid, NULL, buf_data, 0, buf_size);
+ if (ate < 0) {
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+ }
+
+ buf_data += ate;
+ buf_size -= ate;
+ }
+
+ if (buf_size) {
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = apr_bucket_transient_create(buf_data, buf_size, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(r->output_filters, bb);
+ cid->response_sent = 1;
+ if (rv != APR_SUCCESS)
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
+ "ISAPI: WriteClient ap_pass_brigade "
+ "failed: %s", r->filename);
+ }
+
+ if ((flags & HSE_IO_ASYNC) && cid->completion) {
+ if (rv == APR_SUCCESS) {
+ cid->completion(cid->ecb, cid->completion_arg,
+ *size_arg, ERROR_SUCCESS);
+ }
+ else {
+ cid->completion(cid->ecb, cid->completion_arg,
+ *size_arg, ERROR_WRITE_FAULT);
+ }
+ }
+ return (rv == APR_SUCCESS);
+}
+
+/* A "safe" maximum bucket size, 1Gb */
+#define MAX_BUCKET_SIZE (0x40000000)
+
+apr_bucket *brigade_insert_file(apr_bucket_brigade *bb,
+ apr_file_t *f,
+ apr_off_t start,
+ apr_off_t length,
+ apr_pool_t *p)
+{
+ apr_bucket *e;
+
+ if (sizeof(apr_off_t) == sizeof(apr_size_t) || length < MAX_BUCKET_SIZE) {
+ e = apr_bucket_file_create(f, start, (apr_size_t)length, p,
+ bb->bucket_alloc);
+ }
+ else {
+ /* Several buckets are needed. */
+ e = apr_bucket_file_create(f, start, MAX_BUCKET_SIZE, p,
+ bb->bucket_alloc);
+
+ while (length > MAX_BUCKET_SIZE) {
+ apr_bucket *ce;
+ apr_bucket_copy(e, &ce);
+ APR_BRIGADE_INSERT_TAIL(bb, ce);
+ e->start += MAX_BUCKET_SIZE;
+ length -= MAX_BUCKET_SIZE;
+ }
+ e->length = (apr_size_t)length; /* Resize just the last bucket */
+ }
+
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ return e;
+}
+
+int APR_THREAD_FUNC ServerSupportFunction(isapi_cid *cid,
+ apr_uint32_t HSE_code,
+ void *buf_ptr,
+ apr_uint32_t *buf_size,
+ apr_uint32_t *data_type)
+{
+ request_rec *r = cid->r;
+ conn_rec *c = r->connection;
+ char *buf_data = (char*)buf_ptr;
+ request_rec *subreq;
+ apr_status_t rv;
+
+ switch (HSE_code) {
+ case HSE_REQ_SEND_URL_REDIRECT_RESP:
+ /* Set the status to be returned when the HttpExtensionProc()
+ * is done.
+ * WARNING: Microsoft now advertises HSE_REQ_SEND_URL_REDIRECT_RESP
+ * and HSE_REQ_SEND_URL as equivalant per the Jan 2000 SDK.
+ * They most definately are not, even in their own samples.
+ */
+ apr_table_set (r->headers_out, "Location", buf_data);
+ cid->r->status = cid->ecb->dwHttpStatusCode = HTTP_MOVED_TEMPORARILY;
+ cid->r->status_line = ap_get_status_line(cid->r->status);
+ cid->headers_set = 1;
+ return 1;
+
+ case HSE_REQ_SEND_URL:
+ /* Soak up remaining input */
+ if (r->remaining > 0) {
+ char argsbuffer[HUGE_STRING_LEN];
+ while (ap_get_client_block(r, argsbuffer, HUGE_STRING_LEN));
+ }
+
+ /* Reset the method to GET */
+ r->method = apr_pstrdup(r->pool, "GET");
+ r->method_number = M_GET;
+
+ /* Don't let anyone think there's still data */
+ apr_table_unset(r->headers_in, "Content-Length");
+
+ /* AV fault per PR3598 - redirected path is lost! */
+ buf_data = apr_pstrdup(r->pool, (char*)buf_data);
+ ap_internal_redirect(buf_data, r);
+ return 1;
+
+ case HSE_REQ_SEND_RESPONSE_HEADER:
+ {
+ /* Parse them out, or die trying */
+ apr_size_t statlen = 0, headlen = 0;
+ apr_ssize_t ate;
+ if (buf_data)
+ statlen = strlen((char*) buf_data);
+ if (data_type)
+ headlen = strlen((char*) data_type);
+ ate = send_response_header(cid, (char*) buf_data,
+ (char*) data_type,
+ statlen, headlen);
+ if (ate < 0) {
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+ }
+ else if ((apr_size_t)ate < headlen) {
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ bb = apr_brigade_create(cid->r->pool, c->bucket_alloc);
+ b = apr_bucket_transient_create((char*) data_type + ate,
+ headlen - ate, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(cid->r->output_filters, bb);
+ cid->response_sent = 1;
+ if (rv != APR_SUCCESS)
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
+ "ISAPI: ServerSupport function "
+ "HSE_REQ_SEND_RESPONSE_HEADER "
+ "ap_pass_brigade failed: %s", r->filename);
+ return (rv == APR_SUCCESS);
+ }
+ /* Deliberately hold off sending 'just the headers' to begin to
+ * accumulate the body and speed up the overall response, or at
+ * least wait for the end the session.
+ */
+ return 1;
+ }
+
+ case HSE_REQ_DONE_WITH_SESSION:
+ /* Signal to resume the thread completing this request,
+ * leave it to the pool cleanup to dispose of our mutex.
+ */
+ if (cid->completed) {
+ (void)apr_thread_mutex_unlock(cid->completed);
+ return 1;
+ }
+ else if (cid->dconf.log_unsupported) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: ServerSupportFunction "
+ "HSE_REQ_DONE_WITH_SESSION is not supported: %s",
+ r->filename);
+ }
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+
+ case HSE_REQ_MAP_URL_TO_PATH:
+ {
+ /* Map a URL to a filename */
+ char *file = (char *)buf_data;
+ apr_uint32_t len;
+ subreq = ap_sub_req_lookup_uri(
+ apr_pstrndup(cid->r->pool, file, *buf_size), r, NULL);
+
+ if (!subreq->filename) {
+ ap_destroy_sub_req(subreq);
+ return 0;
+ }
+
+ len = (apr_uint32_t)strlen(r->filename);
+
+ if ((subreq->finfo.filetype == APR_DIR)
+ && (!subreq->path_info)
+ && (file[len - 1] != '/'))
+ file = apr_pstrcat(cid->r->pool, subreq->filename, "/", NULL);
+ else
+ file = apr_pstrcat(cid->r->pool, subreq->filename,
+ subreq->path_info, NULL);
+
+ ap_destroy_sub_req(subreq);
+
+#ifdef WIN32
+ /* We need to make this a real Windows path name */
+ apr_filepath_merge(&file, "", file, APR_FILEPATH_NATIVE, r->pool);
+#endif
+
+ *buf_size = apr_cpystrn(buf_data, file, *buf_size) - buf_data;
+
+ return 1;
+ }
+
+ case HSE_REQ_GET_SSPI_INFO:
+ if (cid->dconf.log_unsupported)
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: ServerSupportFunction HSE_REQ_GET_SSPI_INFO "
+ "is not supported: %s", r->filename);
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+
+ case HSE_APPEND_LOG_PARAMETER:
+ /* Log buf_data, of buf_size bytes, in the URI Query (cs-uri-query) field
+ */
+ apr_table_set(r->notes, "isapi-parameter", (char*) buf_data);
+ if (cid->dconf.log_to_query) {
+ if (r->args)
+ r->args = apr_pstrcat(r->pool, r->args, (char*) buf_data, NULL);
+ else
+ r->args = apr_pstrdup(r->pool, (char*) buf_data);
+ }
+ if (cid->dconf.log_to_errlog)
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
+ "ISAPI: %s: %s", cid->r->filename,
+ (char*) buf_data);
+ return 1;
+
+ case HSE_REQ_IO_COMPLETION:
+ /* Emulates a completion port... Record callback address and
+ * user defined arg, we will call this after any async request
+ * (e.g. transmitfile) as if the request executed async.
+ * Per MS docs... HSE_REQ_IO_COMPLETION replaces any prior call
+ * to HSE_REQ_IO_COMPLETION, and buf_data may be set to NULL.
+ */
+ if (cid->dconf.fake_async) {
+ cid->completion = (PFN_HSE_IO_COMPLETION) buf_data;
+ cid->completion_arg = (void *) data_type;
+ return 1;
+ }
+ if (cid->dconf.log_unsupported)
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: ServerSupportFunction HSE_REQ_IO_COMPLETION "
+ "is not supported: %s", r->filename);
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+
+ case HSE_REQ_TRANSMIT_FILE:
+ {
+ /* we do nothing with (tf->dwFlags & HSE_DISCONNECT_AFTER_SEND)
+ */
+ HSE_TF_INFO *tf = (HSE_TF_INFO*)buf_data;
+ apr_uint32_t sent = 0;
+ apr_ssize_t ate = 0;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ apr_file_t *fd;
+ apr_off_t fsize;
+
+ if (!cid->dconf.fake_async && (tf->dwFlags & HSE_IO_ASYNC)) {
+ if (cid->dconf.log_unsupported)
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: ServerSupportFunction HSE_REQ_TRANSMIT_FILE "
+ "as HSE_IO_ASYNC is not supported: %s", r->filename);
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+ }
+
+ /* Presume the handle was opened with the CORRECT semantics
+ * for TransmitFile
+ */
+ if ((rv = apr_os_file_put(&fd, &tf->hFile,
+ APR_READ | APR_XTHREAD, r->pool))
+ != APR_SUCCESS) {
+ return 0;
+ }
+ if (tf->BytesToWrite) {
+ fsize = tf->BytesToWrite;
+ }
+ else {
+ apr_finfo_t fi;
+ if (apr_file_info_get(&fi, APR_FINFO_SIZE, fd) != APR_SUCCESS) {
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+ }
+ fsize = fi.size - tf->Offset;
+ }
+
+ /* apr_dupfile_oshandle (&fd, tf->hFile, r->pool); */
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ /* According to MS: if calling HSE_REQ_TRANSMIT_FILE with the
+ * HSE_IO_SEND_HEADERS flag, then you can't otherwise call any
+ * HSE_SEND_RESPONSE_HEADERS* fn, but if you don't use the flag,
+ * you must have done so. They document that the pHead headers
+ * option is valid only for HSE_IO_SEND_HEADERS - we are a bit
+ * more flexible and assume with the flag, pHead are the
+ * response headers, and without, pHead simply contains text
+ * (handled after this case).
+ */
+ if ((tf->dwFlags & HSE_IO_SEND_HEADERS) && tf->pszStatusCode) {
+ ate = send_response_header(cid, tf->pszStatusCode,
+ (char*)tf->pHead,
+ strlen(tf->pszStatusCode),
+ tf->HeadLength);
+ }
+ else if (!cid->headers_set && tf->pHead && tf->HeadLength
+ && *(char*)tf->pHead) {
+ ate = send_response_header(cid, NULL, (char*)tf->pHead,
+ 0, tf->HeadLength);
+ if (ate < 0)
+ {
+ apr_brigade_destroy(bb);
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+ }
+ }
+
+ if (tf->pHead && (apr_size_t)ate < tf->HeadLength) {
+ b = apr_bucket_transient_create((char*)tf->pHead + ate,
+ tf->HeadLength - ate,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ sent = tf->HeadLength;
+ }
+
+ sent += (apr_uint32_t)fsize;
+ brigade_insert_file(bb, fd, tf->Offset, fsize, r->pool);
+
+ if (tf->pTail && tf->TailLength) {
+ sent += tf->TailLength;
+ b = apr_bucket_transient_create((char*)tf->pTail,
+ tf->TailLength, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
+
+ b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(r->output_filters, bb);
+ cid->response_sent = 1;
+ if (rv != APR_SUCCESS)
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
+ "ISAPI: ServerSupport function "
+ "HSE_REQ_TRANSMIT_FILE "
+ "ap_pass_brigade failed: %s", r->filename);
+
+ /* Use tf->pfnHseIO + tf->pContext, or if NULL, then use cid->fnIOComplete
+ * pass pContect to the HseIO callback.
+ */
+ if (tf->dwFlags & HSE_IO_ASYNC) {
+ if (tf->pfnHseIO) {
+ if (rv == APR_SUCCESS) {
+ tf->pfnHseIO(cid->ecb, tf->pContext,
+ ERROR_SUCCESS, sent);
+ }
+ else {
+ tf->pfnHseIO(cid->ecb, tf->pContext,
+ ERROR_WRITE_FAULT, sent);
+ }
+ }
+ else if (cid->completion) {
+ if (rv == APR_SUCCESS) {
+ cid->completion(cid->ecb, cid->completion_arg,
+ sent, ERROR_SUCCESS);
+ }
+ else {
+ cid->completion(cid->ecb, cid->completion_arg,
+ sent, ERROR_WRITE_FAULT);
+ }
+ }
+ }
+ return (rv == APR_SUCCESS);
+ }
+
+ case HSE_REQ_REFRESH_ISAPI_ACL:
+ if (cid->dconf.log_unsupported)
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: ServerSupportFunction "
+ "HSE_REQ_REFRESH_ISAPI_ACL "
+ "is not supported: %s", r->filename);
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+
+ case HSE_REQ_IS_KEEP_CONN:
+ *((int *)buf_data) = (r->connection->keepalive == AP_CONN_KEEPALIVE);
+ return 1;
+
+ case HSE_REQ_ASYNC_READ_CLIENT:
+ {
+ apr_uint32_t read = 0;
+ int res;
+ if (!cid->dconf.fake_async) {
+ if (cid->dconf.log_unsupported)
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: asynchronous I/O not supported: %s",
+ r->filename);
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+ }
+
+ if (r->remaining < *buf_size) {
+ *buf_size = (apr_size_t)r->remaining;
+ }
+
+ while (read < *buf_size &&
+ ((res = ap_get_client_block(r, (char*)buf_data + read,
+ *buf_size - read)) > 0)) {
+ read += res;
+ }
+
+ if ((*data_type & HSE_IO_ASYNC) && cid->completion) {
+ /* XXX: Many authors issue their next HSE_REQ_ASYNC_READ_CLIENT
+ * within the completion logic. An example is MS's own PSDK
+ * sample web/iis/extensions/io/ASyncRead. This potentially
+ * leads to stack exhaustion. To refactor, the notification
+ * logic needs to move to isapi_handler() - differentiating
+ * the cid->completed event with a new flag to indicate
+ * an async-notice versus the async request completed.
+ */
+ if (res >= 0) {
+ cid->completion(cid->ecb, cid->completion_arg,
+ read, ERROR_SUCCESS);
+ }
+ else {
+ cid->completion(cid->ecb, cid->completion_arg,
+ read, ERROR_READ_FAULT);
+ }
+ }
+ return (res >= 0);
+ }
+
+ case HSE_REQ_GET_IMPERSONATION_TOKEN: /* Added in ISAPI 4.0 */
+ if (cid->dconf.log_unsupported)
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: ServerSupportFunction "
+ "HSE_REQ_GET_IMPERSONATION_TOKEN "
+ "is not supported: %s", r->filename);
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+
+ case HSE_REQ_MAP_URL_TO_PATH_EX:
+ {
+ /* Map a URL to a filename */
+ HSE_URL_MAPEX_INFO *info = (HSE_URL_MAPEX_INFO*)data_type;
+ char* test_uri = apr_pstrndup(r->pool, (char *)buf_data, *buf_size);
+
+ subreq = ap_sub_req_lookup_uri(test_uri, r, NULL);
+ info->cchMatchingURL = strlen(test_uri);
+ info->cchMatchingPath = apr_cpystrn(info->lpszPath, subreq->filename,
+ sizeof(info->lpszPath)) - info->lpszPath;
+
+ /* Mapping started with assuming both strings matched.
+ * Now roll on the path_info as a mismatch and handle
+ * terminating slashes for directory matches.
+ */
+ if (subreq->path_info && *subreq->path_info) {
+ apr_cpystrn(info->lpszPath + info->cchMatchingPath,
+ subreq->path_info,
+ sizeof(info->lpszPath) - info->cchMatchingPath);
+ info->cchMatchingURL -= strlen(subreq->path_info);
+ if (subreq->finfo.filetype == APR_DIR
+ && info->cchMatchingPath < sizeof(info->lpszPath) - 1) {
+ /* roll forward over path_info's first slash */
+ ++info->cchMatchingPath;
+ ++info->cchMatchingURL;
+ }
+ }
+ else if (subreq->finfo.filetype == APR_DIR
+ && info->cchMatchingPath < sizeof(info->lpszPath) - 1) {
+ /* Add a trailing slash for directory */
+ info->lpszPath[info->cchMatchingPath++] = '/';
+ info->lpszPath[info->cchMatchingPath] = '\0';
+ }
+
+ /* If the matched isn't a file, roll match back to the prior slash */
+ if (subreq->finfo.filetype == APR_NOFILE) {
+ while (info->cchMatchingPath && info->cchMatchingURL) {
+ if (info->lpszPath[info->cchMatchingPath - 1] == '/')
+ break;
+ --info->cchMatchingPath;
+ --info->cchMatchingURL;
+ }
+ }
+
+ /* Paths returned with back slashes */
+ for (test_uri = info->lpszPath; *test_uri; ++test_uri)
+ if (*test_uri == '/')
+ *test_uri = '\\';
+
+ /* is a combination of:
+ * HSE_URL_FLAGS_READ 0x001 Allow read
+ * HSE_URL_FLAGS_WRITE 0x002 Allow write
+ * HSE_URL_FLAGS_EXECUTE 0x004 Allow execute
+ * HSE_URL_FLAGS_SSL 0x008 Require SSL
+ * HSE_URL_FLAGS_DONT_CACHE 0x010 Don't cache (VRoot only)
+ * HSE_URL_FLAGS_NEGO_CERT 0x020 Allow client SSL cert
+ * HSE_URL_FLAGS_REQUIRE_CERT 0x040 Require client SSL cert
+ * HSE_URL_FLAGS_MAP_CERT 0x080 Map client SSL cert to account
+ * HSE_URL_FLAGS_SSL128 0x100 Require 128-bit SSL cert
+ * HSE_URL_FLAGS_SCRIPT 0x200 Allow script execution
+ *
+ * XxX: As everywhere, EXEC flags could use some work...
+ * and this could go further with more flags, as desired.
+ */
+ info->dwFlags = (subreq->finfo.protection & APR_UREAD ? 0x001 : 0)
+ | (subreq->finfo.protection & APR_UWRITE ? 0x002 : 0)
+ | (subreq->finfo.protection & APR_UEXECUTE ? 0x204 : 0);
+ return 1;
+ }
+
+ case HSE_REQ_ABORTIVE_CLOSE:
+ if (cid->dconf.log_unsupported)
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: ServerSupportFunction HSE_REQ_ABORTIVE_CLOSE"
+ " is not supported: %s", r->filename);
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+
+ case HSE_REQ_GET_CERT_INFO_EX: /* Added in ISAPI 4.0 */
+ if (cid->dconf.log_unsupported)
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: ServerSupportFunction "
+ "HSE_REQ_GET_CERT_INFO_EX "
+ "is not supported: %s", r->filename);
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+
+ case HSE_REQ_SEND_RESPONSE_HEADER_EX: /* Added in ISAPI 4.0 */
+ {
+ HSE_SEND_HEADER_EX_INFO *shi = (HSE_SEND_HEADER_EX_INFO*)buf_data;
+
+ /* Ignore shi->fKeepConn - we don't want the advise
+ */
+ apr_ssize_t ate = send_response_header(cid, shi->pszStatus,
+ shi->pszHeader,
+ shi->cchStatus,
+ shi->cchHeader);
+ if (ate < 0) {
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+ }
+ else if ((apr_size_t)ate < shi->cchHeader) {
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ bb = apr_brigade_create(cid->r->pool, c->bucket_alloc);
+ b = apr_bucket_transient_create(shi->pszHeader + ate,
+ shi->cchHeader - ate,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(cid->r->output_filters, bb);
+ cid->response_sent = 1;
+ if (rv != APR_SUCCESS)
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
+ "ISAPI: ServerSupport function "
+ "HSE_REQ_SEND_RESPONSE_HEADER_EX "
+ "ap_pass_brigade failed: %s", r->filename);
+ return (rv == APR_SUCCESS);
+ }
+ /* Deliberately hold off sending 'just the headers' to begin to
+ * accumulate the body and speed up the overall response, or at
+ * least wait for the end the session.
+ */
+ return 1;
+ }
+
+ case HSE_REQ_CLOSE_CONNECTION: /* Added after ISAPI 4.0 */
+ if (cid->dconf.log_unsupported)
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: ServerSupportFunction "
+ "HSE_REQ_CLOSE_CONNECTION "
+ "is not supported: %s", r->filename);
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+
+ case HSE_REQ_IS_CONNECTED: /* Added after ISAPI 4.0 */
+ /* Returns True if client is connected c.f. MSKB Q188346
+ * assuming the identical return mechanism as HSE_REQ_IS_KEEP_CONN
+ */
+ *((int *)buf_data) = (r->connection->aborted == 0);
+ return 1;
+
+ case HSE_REQ_EXTENSION_TRIGGER: /* Added after ISAPI 4.0 */
+ /* Undocumented - defined by the Microsoft Jan '00 Platform SDK
+ */
+ if (cid->dconf.log_unsupported)
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: ServerSupportFunction "
+ "HSE_REQ_EXTENSION_TRIGGER "
+ "is not supported: %s", r->filename);
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+
+ default:
+ if (cid->dconf.log_unsupported)
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: ServerSupportFunction (%d) not supported: "
+ "%s", HSE_code, r->filename);
+ apr_set_os_error(APR_FROM_OS_ERROR(ERROR_INVALID_PARAMETER));
+ return 0;
+ }
+}
+
+/**********************************************************
+ *
+ * ISAPI Module request invocation section
+ *
+ **********************************************************/
+
+apr_status_t isapi_handler (request_rec *r)
+{
+ isapi_dir_conf *dconf;
+ apr_table_t *e;
+ apr_status_t rv;
+ isapi_loaded *isa;
+ isapi_cid *cid;
+ const char *val;
+ apr_uint32_t read;
+ int res;
+
+ if(strcmp(r->handler, "isapi-isa")
+ && strcmp(r->handler, "isapi-handler")) {
+ /* Hang on to the isapi-isa for compatibility with older docs
+ * (wtf did '-isa' mean in the first place?) but introduce
+ * a newer and clearer "isapi-handler" name.
+ */
+ return DECLINED;
+ }
+ dconf = ap_get_module_config(r->per_dir_config, &isapi_module);
+ e = r->subprocess_env;
+
+ /* Use similar restrictions as CGIs
+ *
+ * If this fails, it's pointless to load the isapi dll.
+ */
+ if (!(ap_allow_options(r) & OPT_EXECCGI)) {
+ return HTTP_FORBIDDEN;
+ }
+ if (r->finfo.filetype == APR_NOFILE) {
+ return HTTP_NOT_FOUND;
+ }
+ if (r->finfo.filetype != APR_REG) {
+ return HTTP_FORBIDDEN;
+ }
+ if ((r->used_path_info == AP_REQ_REJECT_PATH_INFO) &&
+ r->path_info && *r->path_info) {
+ /* default to accept */
+ return HTTP_NOT_FOUND;
+ }
+
+ if (isapi_lookup(r->pool, r->server, r, r->filename, &isa)
+ != APR_SUCCESS) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ /* Set up variables */
+ ap_add_common_vars(r);
+ ap_add_cgi_vars(r);
+ apr_table_setn(e, "UNMAPPED_REMOTE_USER", "REMOTE_USER");
+ if ((val = apr_table_get(e, "HTTPS")) && (strcmp(val, "on") == 0))
+ apr_table_setn(e, "SERVER_PORT_SECURE", "1");
+ else
+ apr_table_setn(e, "SERVER_PORT_SECURE", "0");
+ apr_table_setn(e, "URL", r->uri);
+
+ /* Set up connection structure and ecb,
+ * NULL or zero out most fields.
+ */
+ cid = apr_pcalloc(r->pool, sizeof(isapi_cid));
+
+ /* Fixup defaults for dconf */
+ cid->dconf.read_ahead_buflen = (dconf->read_ahead_buflen == ISAPI_UNDEF)
+ ? 49152 : dconf->read_ahead_buflen;
+ cid->dconf.log_unsupported = (dconf->log_unsupported == ISAPI_UNDEF)
+ ? 0 : dconf->log_unsupported;
+ cid->dconf.log_to_errlog = (dconf->log_to_errlog == ISAPI_UNDEF)
+ ? 0 : dconf->log_to_errlog;
+ cid->dconf.log_to_query = (dconf->log_to_query == ISAPI_UNDEF)
+ ? 1 : dconf->log_to_query;
+ cid->dconf.fake_async = (dconf->fake_async == ISAPI_UNDEF)
+ ? 0 : dconf->fake_async;
+
+ cid->ecb = apr_pcalloc(r->pool, sizeof(EXTENSION_CONTROL_BLOCK));
+ cid->ecb->ConnID = cid;
+ cid->isa = isa;
+ cid->r = r;
+ r->status = 0;
+
+ cid->ecb->cbSize = sizeof(EXTENSION_CONTROL_BLOCK);
+ cid->ecb->dwVersion = isa->report_version;
+ cid->ecb->dwHttpStatusCode = 0;
+ strcpy(cid->ecb->lpszLogData, "");
+ /* TODO: are copies really needed here?
+ */
+ cid->ecb->lpszMethod = (char*) r->method;
+ cid->ecb->lpszQueryString = (char*) apr_table_get(e, "QUERY_STRING");
+ cid->ecb->lpszPathInfo = (char*) apr_table_get(e, "PATH_INFO");
+ cid->ecb->lpszPathTranslated = (char*) apr_table_get(e, "PATH_TRANSLATED");
+ cid->ecb->lpszContentType = (char*) apr_table_get(e, "CONTENT_TYPE");
+
+ /* Set up the callbacks */
+ cid->ecb->GetServerVariable = GetServerVariable;
+ cid->ecb->WriteClient = WriteClient;
+ cid->ecb->ReadClient = ReadClient;
+ cid->ecb->ServerSupportFunction = ServerSupportFunction;
+
+ /* Set up client input */
+ res = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR);
+ if (res) {
+ return res;
+ }
+
+ if (ap_should_client_block(r)) {
+ /* Time to start reading the appropriate amount of data,
+ * and allow the administrator to tweak the number
+ */
+ if (r->remaining) {
+ cid->ecb->cbTotalBytes = (apr_size_t)r->remaining;
+ if (cid->ecb->cbTotalBytes > (apr_uint32_t)cid->dconf.read_ahead_buflen)
+ cid->ecb->cbAvailable = cid->dconf.read_ahead_buflen;
+ else
+ cid->ecb->cbAvailable = cid->ecb->cbTotalBytes;
+ }
+ else
+ {
+ cid->ecb->cbTotalBytes = 0xffffffff;
+ cid->ecb->cbAvailable = cid->dconf.read_ahead_buflen;
+ }
+
+ cid->ecb->lpbData = apr_pcalloc(r->pool, cid->ecb->cbAvailable + 1);
+
+ read = 0;
+ while (read < cid->ecb->cbAvailable &&
+ ((res = ap_get_client_block(r, (char*)cid->ecb->lpbData + read,
+ cid->ecb->cbAvailable - read)) > 0)) {
+ read += res;
+ }
+
+ if (res < 0) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* Although it's not to spec, IIS seems to null-terminate
+ * its lpdData string. So we will too.
+ */
+ if (res == 0)
+ cid->ecb->cbAvailable = cid->ecb->cbTotalBytes = read;
+ else
+ cid->ecb->cbAvailable = read;
+ cid->ecb->lpbData[read] = '\0';
+ }
+ else {
+ cid->ecb->cbTotalBytes = 0;
+ cid->ecb->cbAvailable = 0;
+ cid->ecb->lpbData = NULL;
+ }
+
+ /* To emulate async behavior...
+ *
+ * We create a cid->completed mutex and lock on it so that the
+ * app can believe is it running async.
+ *
+ * This request completes upon a notification through
+ * ServerSupportFunction(HSE_REQ_DONE_WITH_SESSION), which
+ * unlocks this mutex. If the HttpExtensionProc() returns
+ * HSE_STATUS_PENDING, we will attempt to gain this lock again
+ * which may *only* happen once HSE_REQ_DONE_WITH_SESSION has
+ * unlocked the mutex.
+ */
+ if (cid->dconf.fake_async) {
+ rv = apr_thread_mutex_create(&cid->completed,
+ APR_THREAD_MUTEX_UNNESTED,
+ r->pool);
+ if (cid->completed && (rv == APR_SUCCESS)) {
+ rv = apr_thread_mutex_lock(cid->completed);
+ }
+
+ if (!cid->completed || (rv != APR_SUCCESS)) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: Failed to create completion mutex");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+
+ /* All right... try and run the sucker */
+ rv = (*isa->HttpExtensionProc)(cid->ecb);
+
+ /* Check for a log message - and log it */
+ if (cid->ecb->lpszLogData && *cid->ecb->lpszLogData)
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
+ "ISAPI: %s: %s", r->filename, cid->ecb->lpszLogData);
+
+ switch(rv) {
+ case 0: /* Strange, but MS isapi accepts this as success */
+ case HSE_STATUS_SUCCESS:
+ case HSE_STATUS_SUCCESS_AND_KEEP_CONN:
+ /* Ignore the keepalive stuff; Apache handles it just fine without
+ * the ISAPI Handler's "advice".
+ * Per Microsoft: "In IIS versions 4.0 and later, the return
+ * values HSE_STATUS_SUCCESS and HSE_STATUS_SUCCESS_AND_KEEP_CONN
+ * are functionally identical: Keep-Alive connections are
+ * maintained, if supported by the client."
+ * ... so we were pat all this time
+ */
+ break;
+
+ case HSE_STATUS_PENDING:
+ /* emulating async behavior...
+ */
+ if (cid->completed) {
+ /* The completion port was locked prior to invoking
+ * HttpExtensionProc(). Once we can regain the lock,
+ * when ServerSupportFunction(HSE_REQ_DONE_WITH_SESSION)
+ * is called by the extension to release the lock,
+ * we may finally destroy the request.
+ */
+ (void)apr_thread_mutex_lock(cid->completed);
+ break;
+ }
+ else if (cid->dconf.log_unsupported) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "ISAPI: asynch I/O result HSE_STATUS_PENDING "
+ "from HttpExtensionProc() is not supported: %s",
+ r->filename);
+ r->status = HTTP_INTERNAL_SERVER_ERROR;
+ }
+ break;
+
+ case HSE_STATUS_ERROR:
+ /* end response if we have yet to do so.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, apr_get_os_error(), r,
+ "ISAPI: HSE_STATUS_ERROR result from "
+ "HttpExtensionProc(): %s", r->filename);
+ r->status = HTTP_INTERNAL_SERVER_ERROR;
+ break;
+
+ default:
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, apr_get_os_error(), r,
+ "ISAPI: unrecognized result code %d "
+ "from HttpExtensionProc(): %s ",
+ rv, r->filename);
+ r->status = HTTP_INTERNAL_SERVER_ERROR;
+ break;
+ }
+
+ /* Flush the response now, including headers-only responses */
+ if (cid->headers_set || cid->response_sent) {
+ conn_rec *c = r->connection;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ apr_status_t rv;
+
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(r->output_filters, bb);
+ cid->response_sent = 1;
+
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
+ "ISAPI: ap_pass_brigade failed to "
+ "complete the response: %s ", r->filename);
+ }
+
+ return OK; /* NOT r->status, even if it has changed. */
+ }
+
+ /* As the client returned no error, and if we did not error out
+ * ourselves, trust dwHttpStatusCode to say something relevant.
+ */
+ if (!ap_is_HTTP_SERVER_ERROR(r->status) && cid->ecb->dwHttpStatusCode) {
+ r->status = cid->ecb->dwHttpStatusCode;
+ }
+
+ /* For all missing-response situations simply return the status,
+ * and let the core respond to the client.
+ */
+ return r->status;
+}
+
+/**********************************************************
+ *
+ * ISAPI Module Setup Hooks
+ *
+ **********************************************************/
+
+static int isapi_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp)
+{
+ apr_status_t rv;
+
+ apr_pool_create_ex(&loaded.pool, pconf, NULL, NULL);
+ if (!loaded.pool) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, APR_EGENERAL, NULL,
+ "ISAPI: could not create the isapi cache pool");
+ return APR_EGENERAL;
+ }
+
+ loaded.hash = apr_hash_make(loaded.pool);
+ if (!loaded.hash) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL,
+ "ISAPI: Failed to create module cache");
+ return APR_EGENERAL;
+ }
+
+ rv = apr_thread_mutex_create(&loaded.lock, APR_THREAD_MUTEX_DEFAULT,
+ loaded.pool);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, rv, 0, NULL,
+ "ISAPI: Failed to create module cache lock");
+ return rv;
+ }
+ return OK;
+}
+
+static void isapi_hooks(apr_pool_t *cont)
+{
+ ap_hook_pre_config(isapi_pre_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_handler(isapi_handler, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA isapi_module = {
+ STANDARD20_MODULE_STUFF,
+ create_isapi_dir_config, /* create per-dir config */
+ merge_isapi_dir_configs, /* merge per-dir config */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ isapi_cmds, /* command apr_table_t */
+ isapi_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/win32/mod_isapi.dsp b/rubbos/app/httpd-2.0.64/modules/arch/win32/mod_isapi.dsp
new file mode 100644
index 00000000..08cbda21
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/win32/mod_isapi.dsp
@@ -0,0 +1,132 @@
+# Microsoft Developer Studio Project File - Name="mod_isapi" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_isapi - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_isapi.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_isapi.mak" CFG="mod_isapi - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_isapi - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_isapi - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_isapi - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../../include" /I "../../../srclib/apr/include" /I "../../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_isapi_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /out:"Release/mod_isapi.so" /base:@..\..\..\os\win32\BaseAddr.ref,mod_isapi.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_isapi.so" /base:@..\..\..\os\win32\BaseAddr.ref,mod_isapi.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_isapi - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../../include" /I "../../../srclib/apr/include" /I "../../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_isapi_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_isapi.so" /base:@..\..\..\os\win32\BaseAddr.ref,mod_isapi.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_isapi.so" /base:@..\..\..\os\win32\BaseAddr.ref,mod_isapi.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_isapi - Win32 Release"
+# Name "mod_isapi - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_isapi.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_isapi.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_isapi.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_isapi - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\..\build\win32\win32ver.awk
+
+".\mod_isapi.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../../build/win32/win32ver.awk mod_isapi.so "isapi_module for Apache" ../../../include/ap_release.h > .\mod_isapi.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_isapi - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\..\build\win32\win32ver.awk
+
+".\mod_isapi.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../../build/win32/win32ver.awk mod_isapi.so "isapi_module for Apache" ../../../include/ap_release.h > .\mod_isapi.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/win32/mod_isapi.h b/rubbos/app/httpd-2.0.64/modules/arch/win32/mod_isapi.h
new file mode 100644
index 00000000..33524bcb
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/win32/mod_isapi.h
@@ -0,0 +1,271 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file mod_isapi.h
+ * @brief ISAPI module extension to Apache
+ *
+ * @defgroup MOD_ISAPI mod_isapi
+ * @ingroup APACHE_MODS
+ * @{
+ */
+
+#ifndef MOD_ISAPI_H
+#define MOD_ISAPI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* The Version Information storage passed to a module on startup
+ * via the GetExtensionVersion() entry point.
+ */
+typedef struct HSE_VERSION_INFO {
+ apr_uint32_t dwExtensionVersion;
+ char lpszExtensionDesc[256];
+} HSE_VERSION_INFO;
+
+/* The startup entry point that must be exported by every ISAPI handler
+ */
+int APR_THREAD_FUNC GetExtensionVersion(HSE_VERSION_INFO *ver_info);
+typedef int (APR_THREAD_FUNC *PFN_GETEXTENSIONVERSION)(HSE_VERSION_INFO *ver_info);
+
+/* Our internal 'HCONN' representation, always opaque to the user.
+ */
+typedef struct isapi_cid isapi_cid;
+typedef struct isapi_cid *HCONN;
+
+/* Prototypes of the essential functions exposed by mod_isapi
+ * for the module to communicate with Apache.
+ */
+typedef int (APR_THREAD_FUNC
+ *PFN_GETSERVERVARIABLE)(HCONN cid,
+ char *variable_name,
+ void *buf_data,
+ apr_uint32_t *buf_size);
+typedef int (APR_THREAD_FUNC
+ *PFN_WRITECLIENT)(HCONN cid,
+ void *buf_data,
+ apr_uint32_t *buf_size,
+ apr_uint32_t flags);
+typedef int (APR_THREAD_FUNC
+ *PFN_READCLIENT)(HCONN cid,
+ void *buf_data,
+ apr_uint32_t *buf_size);
+typedef int (APR_THREAD_FUNC
+ *PFN_SERVERSUPPORTFUNCTION)(HCONN cid,
+ apr_uint32_t HSE_code,
+ void *buf_data,
+ apr_uint32_t *buf_size,
+ apr_uint32_t *flags);
+
+/* The ecb structure is passed on each invocation of the module
+ */
+typedef struct EXTENSION_CONTROL_BLOCK {
+ apr_uint32_t cbSize;
+ apr_uint32_t dwVersion;
+ HCONN ConnID;
+ apr_uint32_t dwHttpStatusCode;
+ char lpszLogData[80];
+ char *lpszMethod;
+ char *lpszQueryString;
+ char *lpszPathInfo;
+ char *lpszPathTranslated;
+ apr_uint32_t cbTotalBytes;
+ apr_uint32_t cbAvailable;
+ unsigned char *lpbData;
+ char *lpszContentType;
+
+ PFN_GETSERVERVARIABLE GetServerVariable;
+ PFN_WRITECLIENT WriteClient;
+ PFN_READCLIENT ReadClient;
+ PFN_SERVERSUPPORTFUNCTION ServerSupportFunction;
+} EXTENSION_CONTROL_BLOCK;
+
+/* Status/Headers structure to pass to HSE_SEND_HEADER_EX,
+ * an MS extension to ServerSupportFunction
+ */
+typedef struct HSE_SEND_HEADER_EX_INFO {
+ const char * pszStatus; /* HTTP status text, such as "200 OK" */
+ const char * pszHeader; /* HTTP header lines text, such as
+ * "Content-type: text/plain\r\n"
+ * "Content-Language: en\r\n"
+ * Note that (in spite of cchFoo lengths below)
+ * NULL characters will interfere in headers.
+ */
+ apr_uint32_t cchStatus; /* length of pszStatus text */
+ apr_uint32_t cchHeader; /* length of pszHeader text */
+ int fKeepConn; /* Ignored: used to set keep-alive status,
+ * but Apache follows the client's negotiated
+ * HTTP contract to decide.
+ */
+} HSE_SEND_HEADER_EX_INFO;
+
+/* Our only 'supported' MS extended flag bit for TransmitFile,
+ * HSE_IO_SEND_HEADERS indicates that Status+Headers are present
+ * in the pszStatusCode member of the HSE_TF_INFO structure.
+ */
+#define HSE_IO_SEND_HEADERS 8
+
+/* The remaining flags are MS extended flag bits that bear little
+ * relation to Apache; the rules that the Apache server obeys follow
+ * its own design and HTTP protocol filter rules.
+ *
+ * We do not support async, however, we fake it. If HSE_IO_SYNC is
+ * not passed, and a completion context was defined, we will invoke the
+ * completion function immediately following the transfer, and then
+ * return to the caller. If HSE_IO_SYNC is passed, there is no call
+ * neccessary to the completion context.
+ */
+#define HSE_IO_SYNC 1
+#define HSE_IO_ASYNC 2
+#define HSE_IO_DISCONNECT_AFTER_SEND 4
+#define HSE_IO_NODELAY 4096
+
+/* The Completion function prototype. This callback may be fixed with
+ * the HSE_REQ_IO_COMPLETION ServerSupportFunction call, or overriden
+ * for the HSE_REQ_TRANSMIT_FILE call.
+ */
+typedef void (APR_THREAD_FUNC *PFN_HSE_IO_COMPLETION)
+ (EXTENSION_CONTROL_BLOCK *ecb,
+ void *ctxt,
+ apr_uint32_t cbIO,
+ apr_uint32_t dwError);
+
+/* TransmitFile structure to pass to HSE_REQ_TRANSMIT_FILE, an MS extension
+ */
+typedef struct HSE_TF_INFO {
+ PFN_HSE_IO_COMPLETION pfnHseIO; /* Overrides the default setting of
+ * HSE_REQ_IO_COMPLETION if not NULL
+ */
+ void *pContext;
+ apr_os_file_t hFile; /* HANDLE/fd to transmit */
+ const char *pszStatusCode; /* Ignored if HSE_IO_SEND_HEADERS is
+ * not set. Includes HTTP status text
+ * plus header text lines, such as
+ * "200 OK\r\n"
+ * "Content-type: text/plain\r\n"
+ */
+ apr_uint32_t BytesToWrite; /* 0 is write-all */
+ apr_uint32_t Offset; /* File Offset */
+ void *pHead; /* Prefix with *pHead body text */
+ apr_uint32_t HeadLength; /* Length of *pHead body text */
+ void *pTail; /* Prefix with *pTail body text */
+ apr_uint32_t TailLength; /* Length of *pTail body text */
+ apr_uint32_t dwFlags; /* bit flags described above */
+} HSE_TF_INFO;
+
+typedef struct HSE_URL_MAPEX_INFO {
+ char lpszPath[260];
+ apr_uint32_t dwFlags;
+ apr_uint32_t cchMatchingPath;
+ apr_uint32_t cchMatchingURL;
+ apr_uint32_t dwReserved1;
+ apr_uint32_t dwReserved2;
+} HSE_URL_MAPEX_INFO;
+
+/* Original ISAPI ServerSupportFunction() HSE_code methods */
+#define HSE_REQ_SEND_URL_REDIRECT_RESP 1
+#define HSE_REQ_SEND_URL 2
+#define HSE_REQ_SEND_RESPONSE_HEADER 3
+#define HSE_REQ_DONE_WITH_SESSION 4
+
+/* MS Extented methods to ISAPI ServerSupportFunction() HSE_code */
+#define HSE_REQ_MAP_URL_TO_PATH 1001 /* Emulated */
+#define HSE_REQ_GET_SSPI_INFO 1002 /* Not Supported */
+#define HSE_APPEND_LOG_PARAMETER 1003 /* Supported */
+#define HSE_REQ_IO_COMPLETION 1005 /* Emulated */
+#define HSE_REQ_TRANSMIT_FILE 1006 /* Async Emulated */
+#define HSE_REQ_REFRESH_ISAPI_ACL 1007 /* Not Supported */
+#define HSE_REQ_IS_KEEP_CONN 1008 /* Supported */
+#define HSE_REQ_ASYNC_READ_CLIENT 1010 /* Emulated */
+/* Added with ISAPI 4.0 */
+#define HSE_REQ_GET_IMPERSONATION_TOKEN 1011 /* Not Supported */
+#define HSE_REQ_MAP_URL_TO_PATH_EX 1012 /* Emulated */
+#define HSE_REQ_ABORTIVE_CLOSE 1014 /* Ignored */
+/* Added after ISAPI 4.0 in IIS 5.0 */
+#define HSE_REQ_GET_CERT_INFO_EX 1015 /* Not Supported */
+#define HSE_REQ_SEND_RESPONSE_HEADER_EX 1016 /* Supported (no nulls!) */
+#define HSE_REQ_CLOSE_CONNECTION 1017 /* Ignored */
+#define HSE_REQ_IS_CONNECTED 1018 /* Supported */
+#define HSE_REQ_EXTENSION_TRIGGER 1020 /* Not Supported */
+
+/* The request entry point that must be exported by every ISAPI handler
+ */
+apr_uint32_t APR_THREAD_FUNC HttpExtensionProc(EXTENSION_CONTROL_BLOCK *ecb);
+typedef apr_uint32_t (APR_THREAD_FUNC
+ *PFN_HTTPEXTENSIONPROC)(EXTENSION_CONTROL_BLOCK *ecb);
+
+/* Allowable return values from HttpExtensionProc (apparently 0 is also
+ * accepted by MS IIS, and we will respect it as Success.)
+ * If the HttpExtensionProc returns HSE_STATUS_PENDING, we will create
+ * a wait mutex and lock on it, until HSE_REQ_DONE_WITH_SESSION is called.
+ */
+#define HSE_STATUS_SUCCESS 1
+#define HSE_STATUS_SUCCESS_AND_KEEP_CONN 2 /* 1 vs 2 Ignored, we choose */
+#define HSE_STATUS_PENDING 3 /* Emulated (thread lock) */
+#define HSE_STATUS_ERROR 4
+
+/* Anticipated error code for common faults within mod_isapi itself
+ */
+#ifndef ERROR_INSUFFICIENT_BUFFER
+#define ERROR_INSUFFICIENT_BUFFER ENOBUFS
+#endif
+#ifndef ERROR_INVALID_INDEX
+#define ERROR_INVALID_INDEX EINVAL
+#endif
+#ifndef ERROR_INVALID_PARAMETER
+#define ERROR_INVALID_PARAMETER EINVAL
+#endif
+#ifndef ERROR_READ_FAULT
+#define ERROR_READ_FAULT EIO
+#endif
+#ifndef ERROR_WRITE_FAULT
+#define ERROR_WRITE_FAULT EIO
+#endif
+#ifndef ERROR_SUCCESS
+#define ERROR_SUCCESS 0
+#endif
+
+/* Valid flags passed with TerminateExtension()
+ */
+#define HSE_TERM_MUST_UNLOAD 1
+#define HSE_TERM_ADVISORY_UNLOAD 2
+
+/* The shutdown entry point óptionally exported by an ISAPI handler, passed
+ * HSE_TERM_MUST_UNLOAD or HSE_TERM_ADVISORY_UNLOAD. The module may return
+ * if passed HSE_TERM_ADVISORY_UNLOAD, and the module will remain loaded.
+ * If the module returns 1 to HSE_TERM_ADVISORY_UNLOAD it is immediately
+ * unloaded. If the module is passed HSE_TERM_MUST_UNLOAD, its return value
+ * is ignored.
+ */
+int APR_THREAD_FUNC TerminateExtension(apr_uint32_t flags);
+typedef int (APR_THREAD_FUNC *PFN_TERMINATEEXTENSION)(apr_uint32_t flags);
+
+/* Module may return 0 if passed HSE_TERM_ADVISORY_UNLOAD, and the module
+ * will remain loaded, or 1 if it consents to being unloaded. If the module
+ * is passed HSE_TERM_MUST_UNLOAD, it's return value is ignored.
+ */
+#define HSE_TERM_MUST_UNLOAD 1
+#define HSE_TERM_ADVISORY_UNLOAD 2
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !MOD_ISAPI_H */
+/** @} */
+
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/win32/mod_win32.c b/rubbos/app/httpd-2.0.64/modules/arch/win32/mod_win32.c
new file mode 100644
index 00000000..38b7b3b0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/win32/mod_win32.c
@@ -0,0 +1,553 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef WIN32
+
+#include "apr_strings.h"
+#include "apr_portable.h"
+#include "apr_buckets.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "http_log.h"
+#include "util_script.h"
+#include "mod_core.h"
+#include "mod_cgi.h"
+#include "apr_lib.h"
+#include "ap_regkey.h"
+
+extern OSVERSIONINFO osver; /* hiding in mpm_winnt.c */
+static int win_nt;
+
+/*
+ * CGI Script stuff for Win32...
+ */
+typedef enum { eFileTypeUNKNOWN, eFileTypeBIN, eFileTypeEXE16, eFileTypeEXE32,
+ eFileTypeSCRIPT } file_type_e;
+typedef enum { INTERPRETER_SOURCE_UNSET, INTERPRETER_SOURCE_REGISTRY_STRICT,
+ INTERPRETER_SOURCE_REGISTRY, INTERPRETER_SOURCE_SHEBANG
+ } interpreter_source_e;
+AP_DECLARE(file_type_e) ap_get_win32_interpreter(const request_rec *,
+ char **interpreter,
+ char **arguments);
+
+module AP_MODULE_DECLARE_DATA win32_module;
+
+typedef struct {
+ /* Where to find interpreter to run scripts */
+ interpreter_source_e script_interpreter_source;
+} win32_dir_conf;
+
+static void *create_win32_dir_config(apr_pool_t *p, char *dir)
+{
+ win32_dir_conf *conf;
+ conf = (win32_dir_conf*)apr_palloc(p, sizeof(win32_dir_conf));
+ conf->script_interpreter_source = INTERPRETER_SOURCE_UNSET;
+ return conf;
+}
+
+static void *merge_win32_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ win32_dir_conf *new;
+ win32_dir_conf *base = (win32_dir_conf *) basev;
+ win32_dir_conf *add = (win32_dir_conf *) addv;
+
+ new = (win32_dir_conf *) apr_pcalloc(p, sizeof(win32_dir_conf));
+ new->script_interpreter_source = (add->script_interpreter_source
+ != INTERPRETER_SOURCE_UNSET)
+ ? add->script_interpreter_source
+ : base->script_interpreter_source;
+ return new;
+}
+
+static const char *set_interpreter_source(cmd_parms *cmd, void *dv,
+ char *arg)
+{
+ win32_dir_conf *d = (win32_dir_conf *)dv;
+ if (!strcasecmp(arg, "registry")) {
+ d->script_interpreter_source = INTERPRETER_SOURCE_REGISTRY;
+ }
+ else if (!strcasecmp(arg, "registry-strict")) {
+ d->script_interpreter_source = INTERPRETER_SOURCE_REGISTRY_STRICT;
+ }
+ else if (!strcasecmp(arg, "script")) {
+ d->script_interpreter_source = INTERPRETER_SOURCE_SHEBANG;
+ }
+ else {
+ return apr_pstrcat(cmd->temp_pool, "ScriptInterpreterSource \"", arg,
+ "\" must be \"registry\", \"registry-strict\" or "
+ "\"script\"", NULL);
+ }
+ return NULL;
+}
+
+/* XXX: prep_string should translate the string into unicode,
+ * such that it is compatible with whatever codepage the client
+ * will read characters 80-ff. For the moment, use the unicode
+ * values 0080-00ff. This isn't trivial, since the code page
+ * varies between msdos and Windows applications.
+ * For subsystem 2 [GUI] the default is the system Ansi CP.
+ * For subsystem 3 [CLI] the default is the system OEM CP.
+ */
+static void prep_string(const char ** str, apr_pool_t *p)
+{
+ const char *ch = *str;
+ char *ch2;
+ int widen = 0;
+
+ if (!ch) {
+ return;
+ }
+ while (*ch) {
+ if (*(ch++) & 0x80) {
+ ++widen;
+ }
+ }
+ if (!widen) {
+ return;
+ }
+ widen += (ch - *str) + 1;
+ ch = *str;
+ *str = ch2 = apr_palloc(p, widen);
+ while (*ch) {
+ if (*ch & 0x80) {
+ /* sign extension won't hurt us here */
+ *(ch2++) = 0xC0 | ((*ch >> 6) & 0x03);
+ *(ch2++) = 0x80 | (*(ch++) & 0x3f);
+ }
+ else {
+ *(ch2++) = *(ch++);
+ }
+ }
+ *(ch2++) = '\0';
+}
+
+/* Somewhat more exciting ... figure out where the registry has stashed the
+ * ExecCGI or Open command - it may be nested one level deep (or more???)
+ */
+static char* get_interpreter_from_win32_registry(apr_pool_t *p,
+ const char* ext,
+ int strict)
+{
+ apr_status_t rv;
+ ap_regkey_t *name_key = NULL;
+ ap_regkey_t *type_key;
+ ap_regkey_t *key;
+ char execcgi_path[] = "SHELL\\EXECCGI\\COMMAND";
+ char execopen_path[] = "SHELL\\OPEN\\COMMAND";
+ char *type_name;
+ char *buffer;
+
+ if (!ext) {
+ return NULL;
+ }
+ /*
+ * Future optimization:
+ * When the registry is successfully searched, store the strings for
+ * interpreter and arguments in an ext hash to speed up subsequent look-ups
+ */
+
+ /* Open the key associated with the script filetype extension */
+ rv = ap_regkey_open(&type_key, AP_REGKEY_CLASSES_ROOT, ext, APR_READ, p);
+
+ if (rv != APR_SUCCESS) {
+ return NULL;
+ }
+
+ /* Retrieve the name of the script filetype extension */
+ rv = ap_regkey_value_get(&type_name, type_key, "", p);
+
+ if (rv == APR_SUCCESS && type_name[0]) {
+ /* Open the key associated with the script filetype extension */
+ rv = ap_regkey_open(&name_key, AP_REGKEY_CLASSES_ROOT, type_name,
+ APR_READ, p);
+ }
+
+ /* Open the key for the script command path by:
+ *
+ * 1) the 'named' filetype key for ExecCGI/Command
+ * 2) the extension's type key for ExecCGI/Command
+ *
+ * and if the strict arg is false, then continue trying:
+ *
+ * 3) the 'named' filetype key for Open/Command
+ * 4) the extension's type key for Open/Command
+ */
+
+ if (name_key) {
+ if ((rv = ap_regkey_open(&key, name_key, execcgi_path, APR_READ, p))
+ == APR_SUCCESS) {
+ rv = ap_regkey_value_get(&buffer, key, "", p);
+ ap_regkey_close(name_key);
+ }
+ }
+
+ if (!name_key || (rv != APR_SUCCESS)) {
+ if ((rv = ap_regkey_open(&key, type_key, execcgi_path, APR_READ, p))
+ == APR_SUCCESS) {
+ rv = ap_regkey_value_get(&buffer, key, "", p);
+ ap_regkey_close(type_key);
+ }
+ }
+
+ if (!strict && name_key && (rv != APR_SUCCESS)) {
+ if ((rv = ap_regkey_open(&key, name_key, execopen_path, APR_READ, p))
+ == APR_SUCCESS) {
+ rv = ap_regkey_value_get(&buffer, key, "", p);
+ ap_regkey_close(name_key);
+ }
+ }
+
+ if (!strict && (rv != APR_SUCCESS)) {
+ if ((rv = ap_regkey_open(&key, type_key, execopen_path, APR_READ, p))
+ == APR_SUCCESS) {
+ rv = ap_regkey_value_get(&buffer, key, "", p);
+ ap_regkey_close(type_key);
+ }
+ }
+
+ if (name_key) {
+ ap_regkey_close(name_key);
+ }
+
+ ap_regkey_close(type_key);
+
+ if (rv != APR_SUCCESS || !buffer[0]) {
+ return NULL;
+ }
+
+ return buffer;
+}
+
+
+static apr_array_header_t *split_argv(apr_pool_t *p, const char *interp,
+ const char *cgiprg, const char *cgiargs)
+{
+ apr_array_header_t *args = apr_array_make(p, 8, sizeof(char*));
+ char *d = apr_palloc(p, strlen(interp)+1);
+ const char *ch = interp;
+ const char **arg;
+ int prgtaken = 0;
+ int argtaken = 0;
+ int inquo;
+ int sl;
+
+ while (*ch) {
+ /* Skip on through Deep Space */
+ if (apr_isspace(*ch)) {
+ ++ch; continue;
+ }
+ /* One Arg */
+ if (((*ch == '$') || (*ch == '%')) && (*(ch + 1) == '*')) {
+ const char *cgiarg = cgiargs;
+ argtaken = 1;
+ for (;;) {
+ char *w = ap_getword_nulls(p, &cgiarg, '+');
+ if (!*w) {
+ break;
+ }
+ ap_unescape_url(w);
+ if (win_nt) {
+ prep_string(&w, p);
+ }
+ arg = (const char**)apr_array_push(args);
+ *arg = ap_escape_shell_cmd(p, w);
+ }
+ ch += 2;
+ continue;
+ }
+ if (((*ch == '$') || (*ch == '%')) && (*(ch + 1) == '1')) {
+ /* Todo: Make short name!!! */
+ prgtaken = 1;
+ arg = (const char**)apr_array_push(args);
+ if (*ch == '%') {
+ char *repl = apr_pstrdup(p, cgiprg);
+ *arg = repl;
+ while ((repl = strchr(repl, '/'))) {
+ *repl++ = '\\';
+ }
+ }
+ else {
+ *arg = cgiprg;
+ }
+ ch += 2;
+ continue;
+ }
+ if ((*ch == '\"') && ((*(ch + 1) == '$')
+ || (*(ch + 1) == '%')) && (*(ch + 2) == '1')
+ && (*(ch + 3) == '\"')) {
+ prgtaken = 1;
+ arg = (const char**)apr_array_push(args);
+ if (*(ch + 1) == '%') {
+ char *repl = apr_pstrdup(p, cgiprg);
+ *arg = repl;
+ while ((repl = strchr(repl, '/'))) {
+ *repl++ = '\\';
+ }
+ }
+ else {
+ *arg = cgiprg;
+ }
+ ch += 4;
+ continue;
+ }
+ arg = (const char**)apr_array_push(args);
+ *arg = d;
+ inquo = 0;
+ while (*ch) {
+ if (apr_isspace(*ch) && !inquo) {
+ ++ch; break;
+ }
+ /* Get 'em backslashes */
+ for (sl = 0; *ch == '\\'; ++sl) {
+ *d++ = *ch++;
+ }
+ if (sl & 1) {
+ /* last unmatched '\' + '"' sequence is a '"' */
+ if (*ch == '\"') {
+ *(d - 1) = *ch++;
+ }
+ continue;
+ }
+ if (*ch == '\"') {
+ /* '""' sequence within quotes is a '"' */
+ if (*++ch == '\"' && inquo) {
+ *d++ = *ch++; continue;
+ }
+ /* Flip quote state */
+ inquo = !inquo;
+ if (apr_isspace(*ch) && !inquo) {
+ ++ch; break;
+ }
+ /* All other '"'s are Munched */
+ continue;
+ }
+ /* Anything else is, well, something else */
+ *d++ = *ch++;
+ }
+ /* Term that arg, already pushed on args */
+ *d++ = '\0';
+ }
+
+ if (!prgtaken) {
+ arg = (const char**)apr_array_push(args);
+ *arg = cgiprg;
+ }
+
+ if (!argtaken) {
+ const char *cgiarg = cgiargs;
+ for (;;) {
+ char *w = ap_getword_nulls(p, &cgiarg, '+');
+ if (!*w) {
+ break;
+ }
+ ap_unescape_url(w);
+ if (win_nt) {
+ prep_string(&w, p);
+ }
+ arg = (const char**)apr_array_push(args);
+ *arg = ap_escape_shell_cmd(p, w);
+ }
+ }
+
+ arg = (const char**)apr_array_push(args);
+ *arg = NULL;
+
+ return args;
+}
+
+
+static apr_status_t ap_cgi_build_command(const char **cmd, const char ***argv,
+ request_rec *r, apr_pool_t *p,
+ cgi_exec_info_t *e_info)
+{
+ const apr_array_header_t *elts_arr = apr_table_elts(r->subprocess_env);
+ const apr_table_entry_t *elts = (apr_table_entry_t *) elts_arr->elts;
+ const char *ext = NULL;
+ const char *interpreter = NULL;
+ win32_dir_conf *d;
+ apr_file_t *fh;
+ const char *args = "";
+ int i;
+
+ d = (win32_dir_conf *)ap_get_module_config(r->per_dir_config,
+ &win32_module);
+
+ if (e_info->cmd_type) {
+ /* We have to consider that the client gets any QUERY_ARGS
+ * without any charset interpretation, use prep_string to
+ * create a string of the literal QUERY_ARGS bytes.
+ */
+ *cmd = r->filename;
+ if (r->args && r->args[0] && !ap_strchr_c(r->args, '=')) {
+ args = r->args;
+ }
+ }
+ /* Handle the complete file name, we DON'T want to follow suexec, since
+ * an unrooted command is as predictable as shooting craps in Win32.
+ * Notice that unlike most mime extension parsing, we have to use the
+ * win32 parsing here, therefore the final extension is the only one
+ * we will consider.
+ */
+ ext = strrchr(apr_filename_of_pathname(*cmd), '.');
+
+ /* If the file has an extension and it is not .com and not .exe and
+ * we've been instructed to search the registry, then do so.
+ * Let apr_proc_create do all of the .bat/.cmd dirty work.
+ */
+ if (ext && (!strcasecmp(ext,".exe") || !strcasecmp(ext,".com")
+ || !strcasecmp(ext,".bat") || !strcasecmp(ext,".cmd"))) {
+ interpreter = "";
+ }
+ if (!interpreter && ext
+ && (d->script_interpreter_source
+ == INTERPRETER_SOURCE_REGISTRY
+ || d->script_interpreter_source
+ == INTERPRETER_SOURCE_REGISTRY_STRICT)) {
+ /* Check the registry */
+ int strict = (d->script_interpreter_source
+ == INTERPRETER_SOURCE_REGISTRY_STRICT);
+ interpreter = get_interpreter_from_win32_registry(r->pool, ext,
+ strict);
+ if (interpreter && e_info->cmd_type != APR_SHELLCMD) {
+ e_info->cmd_type = APR_PROGRAM_PATH;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ strict ? "No ExecCGI verb found for files of type '%s'."
+ : "No ExecCGI or Open verb found for files of type '%s'.",
+ ext);
+ }
+ }
+ if (!interpreter) {
+ apr_status_t rv;
+ char buffer[1024];
+ apr_size_t bytes = sizeof(buffer);
+ int i;
+
+ /* Need to peek into the file figure out what it really is...
+ * ### aught to go back and build a cache for this one of these days.
+ */
+ if (((rv = apr_file_open(&fh, *cmd, APR_READ | APR_BUFFERED,
+ APR_OS_DEFAULT, r->pool)) != APR_SUCCESS)
+ || ((rv = apr_file_read(fh, buffer, &bytes)) != APR_SUCCESS)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "Failed to read cgi file %s for testing", *cmd);
+ return rv;
+ }
+ apr_file_close(fh);
+
+ /* Script or executable, that is the question... */
+ if ((buffer[0] == '#') && (buffer[1] == '!')) {
+ /* Assuming file is a script since it starts with a shebang */
+ for (i = 2; i < sizeof(buffer); i++) {
+ if ((buffer[i] == '\r') || (buffer[i] == '\n')) {
+ buffer[i] = '\0';
+ break;
+ }
+ }
+ if (i < sizeof(buffer)) {
+ interpreter = buffer + 2;
+ while (apr_isspace(*interpreter)) {
+ ++interpreter;
+ }
+ if (e_info->cmd_type != APR_SHELLCMD) {
+ e_info->cmd_type = APR_PROGRAM_PATH;
+ }
+ }
+ }
+ else {
+ /* Not a script, is it an executable? */
+ IMAGE_DOS_HEADER *hdr = (IMAGE_DOS_HEADER*)buffer;
+ if ((bytes >= sizeof(IMAGE_DOS_HEADER))
+ && (hdr->e_magic == IMAGE_DOS_SIGNATURE)) {
+ if (hdr->e_lfarlc < 0x40) {
+ /* Ought to invoke this 16 bit exe by a stub, (cmd /c?) */
+ interpreter = "";
+ }
+ else {
+ interpreter = "";
+ }
+ }
+ }
+ }
+ if (!interpreter) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "%s is not executable; ensure interpreted scripts have "
+ "\"#!\" first line", *cmd);
+ return APR_EBADF;
+ }
+
+ *argv = (const char **)(split_argv(p, interpreter, *cmd,
+ args)->elts);
+ *cmd = (*argv)[0];
+
+ e_info->detached = 1;
+
+ /* XXX: Must fix r->subprocess_env to follow utf-8 conventions from
+ * the client's octets so that win32 apr_proc_create is happy.
+ * The -best- way is to determine if the .exe is unicode aware
+ * (using 0x0080-0x00ff) or is linked as a command or windows
+ * application (following the OEM or Ansi code page in effect.)
+ */
+ for (i = 0; i < elts_arr->nelts; ++i) {
+ if (win_nt && elts[i].key && *elts[i].key
+ && (strncmp(elts[i].key, "HTTP_", 5) == 0
+ || strncmp(elts[i].key, "SERVER_", 7) == 0
+ || strncmp(elts[i].key, "REQUEST_", 8) == 0
+ || strcmp(elts[i].key, "QUERY_STRING") == 0
+ || strcmp(elts[i].key, "PATH_INFO") == 0
+ || strcmp(elts[i].key, "PATH_TRANSLATED") == 0)) {
+ prep_string((const char**) &elts[i].val, r->pool);
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static int win32_pre_config(apr_pool_t *pconf_, apr_pool_t *plog, apr_pool_t *ptemp)
+{
+ win_nt = (osver.dwPlatformId != VER_PLATFORM_WIN32_WINDOWS);
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ APR_REGISTER_OPTIONAL_FN(ap_cgi_build_command);
+ ap_hook_pre_config(win32_pre_config, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+static const command_rec win32_cmds[] = {
+AP_INIT_TAKE1("ScriptInterpreterSource", set_interpreter_source, NULL,
+ OR_FILEINFO,
+ "Where to find interpreter to run Win32 scripts "
+ "(Registry or script shebang line)"),
+{ NULL }
+};
+
+module AP_MODULE_DECLARE_DATA win32_module = {
+ STANDARD20_MODULE_STUFF,
+ create_win32_dir_config, /* create per-dir config */
+ merge_win32_dir_configs, /* merge per-dir config */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ win32_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
+
+#endif /* defined WIN32 */
diff --git a/rubbos/app/httpd-2.0.64/modules/arch/win32/modules.mk b/rubbos/app/httpd-2.0.64/modules/arch/win32/modules.mk
new file mode 100644
index 00000000..ceb52a1b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/arch/win32/modules.mk
@@ -0,0 +1,3 @@
+DISTCLEAN_TARGETS = modules.mk
+static =
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/cache/.deps b/rubbos/app/httpd-2.0.64/modules/cache/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/cache/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/cache/.indent.pro b/rubbos/app/httpd-2.0.64/modules/cache/.indent.pro
new file mode 100644
index 00000000..a9fbe9f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/cache/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/rubbos/app/httpd-2.0.64/modules/cache/Makefile b/rubbos/app/httpd-2.0.64/modules/cache/Makefile
new file mode 100644
index 00000000..8baae947
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/cache/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/cache
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/cache
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/cache
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/cache/Makefile.in b/rubbos/app/httpd-2.0.64/modules/cache/Makefile.in
new file mode 100644
index 00000000..167b343d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/cache/Makefile.in
@@ -0,0 +1,3 @@
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/cache/config.m4 b/rubbos/app/httpd-2.0.64/modules/cache/config.m4
new file mode 100644
index 00000000..9eabf541
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/cache/config.m4
@@ -0,0 +1,11 @@
+dnl modules enabled in this directory by default
+
+dnl APACHE_MODULE(name, helptext[, objects[, structname[, default[, config]]]])
+
+APACHE_MODPATH_INIT(cache)
+
+APACHE_MODULE(file_cache, File cache, , , no)
+
+APR_ADDTO(LT_LDFLAGS,-export-dynamic)
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/cache/mod_file_cache.c b/rubbos/app/httpd-2.0.64/modules/cache/mod_file_cache.c
new file mode 100644
index 00000000..947b8f2c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/cache/mod_file_cache.c
@@ -0,0 +1,416 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Author: mod_file_cache by Bill Stoddard <stoddard apache.org>
+ * Based on mod_mmap_static by Dean Gaudet <dgaudet arctic.org>
+ *
+ * v0.01: initial implementation
+ */
+
+/*
+ Documentation:
+
+ Some sites have a set of static files that are really busy, and
+ change infrequently (or even on a regular schedule). Save time
+ by caching open handles to these files. This module, unlike
+ mod_mmap_static, caches open file handles, not file content.
+ On systems (like Windows) with heavy system call overhead and
+ that have an efficient sendfile implementation, caching file handles
+ offers several advantages over caching content. First, the file system
+ can manage the memory, allowing infrequently hit cached files to
+ be paged out. Second, since caching open handles does not consume
+ significant resources, it will be possible to enable an AutoLoadCache
+ feature where static files are dynamically loaded in the cache
+ as the server runs. On systems that have file change notification,
+ this module can be enhanced to automatically garbage collect
+ cached files that change on disk.
+
+ This module should work on Unix systems that have sendfile. Place
+ cachefile directives into your configuration to direct files to
+ be cached.
+
+ cachefile /path/to/file1
+ cachefile /path/to/file2
+ ...
+
+ These files are only cached when the server is restarted, so if you
+ change the list, or if the files are changed, then you'll need to
+ restart the server.
+
+ To reiterate that point: if the files are modified *in place*
+ without restarting the server you may end up serving requests that
+ are completely bogus. You should update files by unlinking the old
+ copy and putting a new copy in place.
+
+ There's no such thing as inheriting these files across vhosts or
+ whatever... place the directives in the main server only.
+
+ Known problems:
+
+ Don't use Alias or RewriteRule to move these files around... unless
+ you feel like paying for an extra stat() on each request. This is
+ a deficiency in the Apache API that will hopefully be solved some day.
+ The file will be served out of the file handle cache, but there will be
+ an extra stat() that's a waste.
+*/
+
+#include "apr.h"
+
+#if !(APR_HAS_SENDFILE || APR_HAS_MMAP)
+#error mod_file_cache only works on systems with APR_HAS_SENDFILE or APR_HAS_MMAP
+#endif
+
+#include "apr_mmap.h"
+#include "apr_strings.h"
+#include "apr_hash.h"
+#include "apr_buckets.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#if APR_HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#define CORE_PRIVATE
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "http_core.h"
+
+module AP_MODULE_DECLARE_DATA file_cache_module;
+
+typedef struct {
+#if APR_HAS_SENDFILE
+ apr_file_t *file;
+#endif
+ const char *filename;
+ apr_finfo_t finfo;
+ int is_mmapped;
+#if APR_HAS_MMAP
+ apr_mmap_t *mm;
+#endif
+ char mtimestr[APR_RFC822_DATE_LEN];
+ char sizestr[21]; /* big enough to hold any 64-bit file size + null */
+} a_file;
+
+typedef struct {
+ apr_hash_t *fileht;
+} a_server_config;
+
+
+static void *create_server_config(apr_pool_t *p, server_rec *s)
+{
+ a_server_config *sconf = apr_palloc(p, sizeof(*sconf));
+
+ sconf->fileht = apr_hash_make(p);
+ return sconf;
+}
+
+static void cache_the_file(cmd_parms *cmd, const char *filename, int mmap)
+{
+ a_server_config *sconf;
+ a_file *new_file;
+ a_file tmp;
+ apr_file_t *fd = NULL;
+ apr_status_t rc;
+ const char *fspec;
+
+ fspec = ap_server_root_relative(cmd->pool, filename);
+ if (!fspec) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, APR_EBADPATH, cmd->server,
+ "mod_file_cache: invalid file path "
+ "%s, skipping", filename);
+ return;
+ }
+ if ((rc = apr_stat(&tmp.finfo, fspec, APR_FINFO_MIN,
+ cmd->temp_pool)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, rc, cmd->server,
+ "mod_file_cache: unable to stat(%s), skipping", fspec);
+ return;
+ }
+ if (tmp.finfo.filetype != APR_REG) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server,
+ "mod_file_cache: %s isn't a regular file, skipping", fspec);
+ return;
+ }
+ if (tmp.finfo.size > AP_MAX_SENDFILE) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server,
+ "mod_file_cache: %s is too large to cache, skipping", fspec);
+ return;
+ }
+
+ rc = apr_file_open(&fd, fspec, APR_READ | APR_BINARY | APR_XTHREAD,
+ APR_OS_DEFAULT, cmd->pool);
+ if (rc != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, rc, cmd->server,
+ "mod_file_cache: unable to open(%s, O_RDONLY), skipping", fspec);
+ return;
+ }
+ apr_file_inherit_set(fd);
+
+ /* WooHoo, we have a file to put in the cache */
+ new_file = apr_pcalloc(cmd->pool, sizeof(a_file));
+ new_file->finfo = tmp.finfo;
+
+#if APR_HAS_MMAP
+ if (mmap) {
+ /* MMAPFile directive. MMAP'ing the file
+ * XXX: APR_HAS_LARGE_FILES issue; need to reject this request if
+ * size is greater than MAX(apr_size_t) (perhaps greater than 1M?).
+ */
+ if ((rc = apr_mmap_create(&new_file->mm, fd, 0,
+ (apr_size_t)new_file->finfo.size,
+ APR_MMAP_READ, cmd->pool)) != APR_SUCCESS) {
+ apr_file_close(fd);
+ ap_log_error(APLOG_MARK, APLOG_WARNING, rc, cmd->server,
+ "mod_file_cache: unable to mmap %s, skipping", filename);
+ return;
+ }
+ apr_file_close(fd);
+ new_file->is_mmapped = TRUE;
+ }
+#endif
+#if APR_HAS_SENDFILE
+ if (!mmap) {
+ /* CacheFile directive. Caching the file handle */
+ new_file->is_mmapped = FALSE;
+ new_file->file = fd;
+ }
+#endif
+
+ new_file->filename = fspec;
+ apr_rfc822_date(new_file->mtimestr, new_file->finfo.mtime);
+ apr_snprintf(new_file->sizestr, sizeof new_file->sizestr, "%" APR_OFF_T_FMT, new_file->finfo.size);
+
+ sconf = ap_get_module_config(cmd->server->module_config, &file_cache_module);
+ apr_hash_set(sconf->fileht, new_file->filename, strlen(new_file->filename), new_file);
+}
+
+static const char *cachefilehandle(cmd_parms *cmd, void *dummy, const char *filename)
+{
+#if APR_HAS_SENDFILE
+ cache_the_file(cmd, filename, 0);
+#else
+ /* Sendfile not supported by this OS */
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server,
+ "mod_file_cache: unable to cache file: %s. Sendfile is not supported on this OS", filename);
+#endif
+ return NULL;
+}
+static const char *cachefilemmap(cmd_parms *cmd, void *dummy, const char *filename)
+{
+#if APR_HAS_MMAP
+ cache_the_file(cmd, filename, 1);
+#else
+ /* MMAP not supported by this OS */
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server,
+ "mod_file_cache: unable to cache file: %s. MMAP is not supported by this OS", filename);
+#endif
+ return NULL;
+}
+
+static int file_cache_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ /* Hummm, anything to do here? */
+ return OK;
+}
+
+/* If it's one of ours, fill in r->finfo now to avoid extra stat()... this is a
+ * bit of a kludge, because we really want to run after core_translate runs.
+ */
+static int file_cache_xlat(request_rec *r)
+{
+ a_server_config *sconf;
+ a_file *match;
+ int res;
+
+ sconf = ap_get_module_config(r->server->module_config, &file_cache_module);
+
+ /* we only operate when at least one cachefile directive was used */
+ if (!apr_hash_count(sconf->fileht)) {
+ return DECLINED;
+ }
+
+ res = ap_core_translate(r);
+ if (res != OK || !r->filename) {
+ return res;
+ }
+
+ /* search the cache */
+ match = (a_file *) apr_hash_get(sconf->fileht, r->filename, APR_HASH_KEY_STRING);
+ if (match == NULL)
+ return DECLINED;
+
+ /* pass search results to handler */
+ ap_set_module_config(r->request_config, &file_cache_module, match);
+
+ /* shortcircuit the get_path_info() stat() calls and stuff */
+ r->finfo = match->finfo;
+ return OK;
+}
+
+static int mmap_handler(request_rec *r, a_file *file)
+{
+#if APR_HAS_MMAP
+ conn_rec *c = r->connection;
+ apr_bucket *b;
+ apr_mmap_t *mm;
+ apr_bucket_brigade *bb = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ apr_mmap_dup(&mm, file->mm, r->pool, 0);
+ b = apr_bucket_mmap_create(mm, 0, (apr_size_t)file->finfo.size,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ if (ap_pass_brigade(r->output_filters, bb) != APR_SUCCESS)
+ return HTTP_INTERNAL_SERVER_ERROR;
+#endif
+ return OK;
+}
+
+static int sendfile_handler(request_rec *r, a_file *file)
+{
+#if APR_HAS_SENDFILE
+ conn_rec *c = r->connection;
+ apr_bucket *b;
+ apr_bucket_brigade *bb = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ b = apr_bucket_file_create(file->file, 0, (apr_size_t)file->finfo.size,
+ r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ if (ap_pass_brigade(r->output_filters, bb) != APR_SUCCESS)
+ return HTTP_INTERNAL_SERVER_ERROR;
+#endif
+ return OK;
+}
+
+static int file_cache_handler(request_rec *r)
+{
+ a_file *match;
+ int errstatus;
+ int rc = OK;
+
+ /* XXX: not sure if this is right yet
+ * see comment in http_core.c:default_handler
+ */
+ if (ap_strcmp_match(r->handler, "*/*")) {
+ return DECLINED;
+ }
+
+ /* we don't handle anything but GET */
+ if (r->method_number != M_GET) return DECLINED;
+
+ /* did xlat phase find the file? */
+ match = ap_get_module_config(r->request_config, &file_cache_module);
+
+ if (match == NULL) {
+ return DECLINED;
+ }
+
+ /* note that we would handle GET on this resource */
+ r->allowed |= (AP_METHOD_BIT << M_GET);
+
+ /* This handler has no use for a request body (yet), but we still
+ * need to read and discard it if the client sent one.
+ */
+ if ((errstatus = ap_discard_request_body(r)) != OK)
+ return errstatus;
+
+ ap_update_mtime(r, match->finfo.mtime);
+
+ /* ap_set_last_modified() always converts the file mtime to a string
+ * which is slow. Accelerate the common case.
+ * ap_set_last_modified(r);
+ */
+ {
+ apr_time_t mod_time;
+ char *datestr;
+
+ mod_time = ap_rationalize_mtime(r, r->mtime);
+ if (mod_time == match->finfo.mtime)
+ datestr = match->mtimestr;
+ else {
+ datestr = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ apr_rfc822_date(datestr, mod_time);
+ }
+ apr_table_setn(r->headers_out, "Last-Modified", datestr);
+ }
+
+ ap_set_etag(r);
+ if ((errstatus = ap_meets_conditions(r)) != OK) {
+ return errstatus;
+ }
+
+ /* ap_set_content_length() always converts the same number and never
+ * returns an error. Accelerate it.
+ */
+ r->clength = match->finfo.size;
+ apr_table_setn(r->headers_out, "Content-Length", match->sizestr);
+
+ /* Call appropriate handler */
+ if (!r->header_only) {
+ if (match->is_mmapped == TRUE)
+ rc = mmap_handler(r, match);
+ else
+ rc = sendfile_handler(r, match);
+ }
+
+ return rc;
+}
+
+static command_rec file_cache_cmds[] =
+{
+AP_INIT_ITERATE("cachefile", cachefilehandle, NULL, RSRC_CONF,
+ "A space separated list of files to add to the file handle cache at config time"),
+AP_INIT_ITERATE("mmapfile", cachefilemmap, NULL, RSRC_CONF,
+ "A space separated list of files to mmap at config time"),
+ {NULL}
+};
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_handler(file_cache_handler, NULL, NULL, APR_HOOK_LAST);
+ ap_hook_post_config(file_cache_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_translate_name(file_cache_xlat, NULL, NULL, APR_HOOK_MIDDLE);
+ /* This trick doesn't work apparently because the translate hooks
+ are single shot. If the core_hook returns OK, then our hook is
+ not called.
+ ap_hook_translate_name(file_cache_xlat, aszPre, NULL, APR_HOOK_MIDDLE);
+ */
+
+}
+
+module AP_MODULE_DECLARE_DATA file_cache_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ create_server_config, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ file_cache_cmds, /* command handlers */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/cache/mod_file_cache.dsp b/rubbos/app/httpd-2.0.64/modules/cache/mod_file_cache.dsp
new file mode 100644
index 00000000..83d2a7b4
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/cache/mod_file_cache.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_file_cache" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_file_cache - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_file_cache.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_file_cache.mak" CFG="mod_file_cache - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_file_cache - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_file_cache - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_file_cache - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_file_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o /win32 "NUL"
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o /win32 "NUL"
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 /nologo /subsystem:windows /dll /out:"Release/mod_file_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_file_cache.so
+# ADD LINK32 /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_file_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_file_cache.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_file_cache - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_file_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o /win32 "NUL"
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o /win32 "NUL"
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_file_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_file_cache.so
+# ADD LINK32 /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_file_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_file_cache.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_file_cache - Win32 Release"
+# Name "mod_file_cache - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_file_cache.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_file_cache.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_file_cache - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_file_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_file_cache.so "file_cache_module for Apache" ../../include/ap_release.h > .\mod_file_cache.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_file_cache - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_file_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_file_cache.so "file_cache_module for Apache" ../../include/ap_release.h > .\mod_file_cache.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/cache/mod_file_cache.exp b/rubbos/app/httpd-2.0.64/modules/cache/mod_file_cache.exp
new file mode 100644
index 00000000..23b092a6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/cache/mod_file_cache.exp
@@ -0,0 +1 @@
+file_cache_module
diff --git a/rubbos/app/httpd-2.0.64/modules/cache/modules.mk b/rubbos/app/httpd-2.0.64/modules/cache/modules.mk
new file mode 100644
index 00000000..ceb52a1b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/cache/modules.mk
@@ -0,0 +1,3 @@
+DISTCLEAN_TARGETS = modules.mk
+static =
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/config5.m4 b/rubbos/app/httpd-2.0.64/modules/config5.m4
new file mode 100644
index 00000000..5e38c951
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/config5.m4
@@ -0,0 +1,56 @@
+AC_MSG_CHECKING(for extra modules)
+AC_ARG_WITH(module,
+ APACHE_HELP_STRING(--with-module=module-type:module-file,
+ Enable module-file in the modules/<module-type> directory.),
+ [
+ as_save_IFS="$IFS"; IFS=","
+ for mod in $withval
+ do
+ modtype=`echo $mod | sed -e's/\(.*\):.*/\1/'`
+ pkg=`echo $mod | sed -e's/.*:\(.*\)/\1/'`
+ modfilec=`echo $pkg | sed -e 's;^.*/;;'`
+ modfileo=`echo $pkg | sed -e 's;^.*/;;' -e 's;\.c$;.o;'`
+ modpath_current="modules/$modtype"
+ if test "x$mod" != "x$modpath_current/$modfilec"; then
+ if test ! -d "$modpath_current"; then
+ mkdir $modpath_current
+ echo 'include $(top_srcdir)/build/special.mk' > $modpath_current/Makefile.in
+ fi
+ cp $pkg $modpath_current/$modfilec
+ fi
+ module=`echo $pkg | sed -e 's;\(.*/\)*mod_\(.*\).c;\2;'`
+ objects="mod_$module.lo"
+ libname="mod_$module.la"
+ BUILTIN_LIBS="$BUILTIN_LIBS $modpath_current/$libname"
+ if test ! -s "$modpath_current/modules.mk"; then
+ cat >>$modpath_current/modules.mk<<EOF
+$libname: $objects
+ \$(MOD_LINK) $objects
+DISTCLEAN_TARGETS = modules.mk
+static = $libname
+shared =
+EOF
+ else
+ cat >>$modpath_current/modules.mk.tmp<<EOF
+$libname: $objects
+ \$(MOD_LINK) $objects
+EOF
+ cat $modpath_current/modules.mk >> $modpath_current/modules.mk.tmp
+ rm $modpath_current/modules.mk
+ mv $modpath_current/modules.mk.tmp $modpath_current/modules.mk
+ sed -e "s/\(static =.*\)/\1 $libname/" $modpath_current/modules.mk > $modpath_current/modules.mk.tmp
+ rm $modpath_current/modules.mk
+ mv $modpath_current/modules.mk.tmp $modpath_current/modules.mk
+ fi
+ MODLIST="$MODLIST $module"
+ EXTRA_MODLIST="$EXTRA_MODLIST $modtype:$modfilec"
+ MODULE_DIRS="$MODULE_DIRS $modtype"
+ APACHE_FAST_OUTPUT($modpath_current/Makefile)
+ done
+ if test ! -z "$EXTRA_MODLIST"; then
+ AC_MSG_RESULT(added:$EXTRA_MODLIST)
+ fi
+ IFS="$as_save_IFS"
+ ],
+ [ AC_MSG_RESULT(none)
+ ])
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/fs/.deps b/rubbos/app/httpd-2.0.64/modules/dav/fs/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/fs/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/fs/Makefile b/rubbos/app/httpd-2.0.64/modules/dav/fs/Makefile
new file mode 100644
index 00000000..11144e34
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/fs/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/dav/fs
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/dav/fs
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/dav/fs
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/fs/Makefile.in b/rubbos/app/httpd-2.0.64/modules/dav/fs/Makefile.in
new file mode 100644
index 00000000..7c5c149d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/fs/Makefile.in
@@ -0,0 +1,3 @@
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/fs/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/dav/fs/NWGNUmakefile
new file mode 100644
index 00000000..1569be42
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/fs/NWGNUmakefile
@@ -0,0 +1,266 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(AP_WORK)/modules/dav/main \
+ $(AP_WORK)/server/mpm/NetWare \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = modDAVFS
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) DAV FileSystem Sub-Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = modDAVFS Thread
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/moddavfs.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_dav_fs.o \
+ $(OBJDIR)/dbm.o \
+ $(OBJDIR)/lock.o \
+ $(OBJDIR)/repos.o \
+ $(OBJDIR)/libprews.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Apache2 \
+ Libc \
+ mod_dav \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @$(APR)/aprlib.imp \
+ @httpd.imp \
+ @ws2nlm.imp \
+ @../main/dav.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ dav_fs_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\moddavfs.nlm $(INSTALL)\Apache2\modules
+#
+# Any specialized rules here
+#
+
+$(OBJDIR)/%.o: ../../arch/netware/%.c $(OBJDIR)\$(NLM_NAME)_cc.opt
+ @echo compiling $<
+ $(CC) $< -o=$(OBJDIR)\$(@F) @$(OBJDIR)\$(NLM_NAME)_cc.opt
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/fs/config6.m4 b/rubbos/app/httpd-2.0.64/modules/dav/fs/config6.m4
new file mode 100644
index 00000000..515111cd
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/fs/config6.m4
@@ -0,0 +1,23 @@
+dnl modules enabled in this directory by default
+
+APACHE_MODPATH_INIT(dav/fs)
+
+dav_fs_objects="mod_dav_fs.lo dbm.lo lock.lo repos.lo"
+
+if test "x$enable_dav" != "x"; then
+ dav_fs_enable=$enable_dav
+else
+ dav_fs_enable=$dav_enable
+fi
+
+case "$host" in
+ *os2*)
+ # OS/2 DLLs must resolve all symbols at build time
+ # and we need some from main DAV module
+ dav_fs_objects="$dav_fs_objects ../main/mod_dav.la"
+ ;;
+esac
+
+APACHE_MODULE(dav_fs, DAV provider for the filesystem, $dav_fs_objects, , $dav_fs_enable)
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/fs/dbm.c b/rubbos/app/httpd-2.0.64/modules/dav/fs/dbm.c
new file mode 100644
index 00000000..a772a75d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/fs/dbm.c
@@ -0,0 +1,753 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+** DAV extension module for Apache 2.0.*
+** - Database support using DBM-style databases,
+** part of the filesystem repository implementation
+*/
+
+/*
+** This implementation uses a SDBM database per file and directory to
+** record the properties. These databases are kept in a subdirectory (of
+** the directory in question or the directory that holds the file in
+** question) named by the macro DAV_FS_STATE_DIR (.DAV). The filename of the
+** database is equivalent to the target filename, and is
+** DAV_FS_STATE_FILE_FOR_DIR (.state_for_dir) for the directory itself.
+*/
+
+#include "apr_strings.h"
+#include "apr_file_io.h"
+
+#include "apr_dbm.h"
+
+#define APR_WANT_BYTEFUNC
+#include "apr_want.h" /* for ntohs and htons */
+
+#include "mod_dav.h"
+#include "repos.h"
+
+
+struct dav_db {
+ apr_pool_t *pool;
+ apr_dbm_t *file;
+
+ /* when used as a property database: */
+
+ int version; /* *minor* version of this db */
+
+ dav_buffer ns_table; /* table of namespace URIs */
+ short ns_count; /* number of entries in table */
+ int ns_table_dirty; /* ns_table was modified */
+ apr_hash_t *uri_index; /* map URIs to (1-based) table indices */
+
+ dav_buffer wb_key; /* work buffer for dav_gdbm_key */
+
+ apr_datum_t iter; /* iteration key */
+};
+
+/* -------------------------------------------------------------------------
+ *
+ * GENERIC DBM ACCESS
+ *
+ * For the most part, this just uses the APR DBM functions. They are wrapped
+ * a bit with some error handling (using the mod_dav error functions).
+ */
+
+void dav_dbm_get_statefiles(apr_pool_t *p, const char *fname,
+ const char **state1, const char **state2)
+{
+ if (fname == NULL)
+ fname = DAV_FS_STATE_FILE_FOR_DIR;
+
+ apr_dbm_get_usednames(p, fname, state1, state2);
+}
+
+static dav_error * dav_fs_dbm_error(dav_db *db, apr_pool_t *p,
+ apr_status_t status)
+{
+ int save_errno = errno;
+ int errcode;
+ const char *errstr;
+ dav_error *err;
+ char errbuf[200];
+
+ if (status == APR_SUCCESS)
+ return NULL;
+
+ p = db ? db->pool : p;
+
+ /* There might not be a <db> if we had problems creating it. */
+ if (db == NULL) {
+ errcode = 1;
+ errstr = "Could not open property database.";
+ }
+ else {
+ (void) apr_dbm_geterror(db->file, &errcode, errbuf, sizeof(errbuf));
+ errstr = apr_pstrdup(p, errbuf);
+ }
+
+ err = dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, errcode, errstr);
+ err->save_errno = save_errno;
+ return err;
+}
+
+/* ensure that our state subdirectory is present */
+/* ### does this belong here or in dav_fs_repos.c ?? */
+void dav_fs_ensure_state_dir(apr_pool_t * p, const char *dirname)
+{
+ const char *pathname = apr_pstrcat(p, dirname, "/" DAV_FS_STATE_DIR, NULL);
+
+ /* ### do we need to deal with the umask? */
+
+ /* just try to make it, ignoring any resulting errors */
+ (void) apr_dir_make(pathname, APR_OS_DEFAULT, p);
+}
+
+/* dav_dbm_open_direct: Opens a *dbm database specified by path.
+ * ro = boolean read-only flag.
+ */
+dav_error * dav_dbm_open_direct(apr_pool_t *p, const char *pathname, int ro,
+ dav_db **pdb)
+{
+ apr_status_t status;
+ apr_dbm_t *file;
+
+ *pdb = NULL;
+
+ if ((status = apr_dbm_open(&file, pathname,
+ ro ? APR_DBM_READONLY : APR_DBM_RWCREATE,
+ APR_OS_DEFAULT, p))
+ != APR_SUCCESS
+ && !ro) {
+ /* ### do something with 'status' */
+
+ /* we can't continue if we couldn't open the file
+ and we need to write */
+ return dav_fs_dbm_error(NULL, p, status);
+ }
+
+ /* may be NULL if we tried to open a non-existent db as read-only */
+ if (file != NULL) {
+ /* we have an open database... return it */
+ *pdb = apr_pcalloc(p, sizeof(**pdb));
+ (*pdb)->pool = p;
+ (*pdb)->file = file;
+ }
+
+ return NULL;
+}
+
+static dav_error * dav_dbm_open(apr_pool_t * p, const dav_resource *resource,
+ int ro, dav_db **pdb)
+{
+ const char *dirpath;
+ const char *fname;
+ const char *pathname;
+
+ /* Get directory and filename for resource */
+ /* ### should test this result value... */
+ (void) dav_fs_dir_file_name(resource, &dirpath, &fname);
+
+ /* If not opening read-only, ensure the state dir exists */
+ if (!ro) {
+ /* ### what are the perf implications of always checking this? */
+ dav_fs_ensure_state_dir(p, dirpath);
+ }
+
+ pathname = apr_pstrcat(p, dirpath, "/" DAV_FS_STATE_DIR "/",
+ fname ? fname : DAV_FS_STATE_FILE_FOR_DIR,
+ NULL);
+
+ /* ### readers cannot open while a writer has this open; we should
+ ### perform a few retries with random pauses. */
+
+ /* ### do we need to deal with the umask? */
+
+ return dav_dbm_open_direct(p, pathname, ro, pdb);
+}
+
+void dav_dbm_close(dav_db *db)
+{
+ apr_dbm_close(db->file);
+}
+
+dav_error * dav_dbm_fetch(dav_db *db, apr_datum_t key, apr_datum_t *pvalue)
+{
+ apr_status_t status = apr_dbm_fetch(db->file, key, pvalue);
+
+ return dav_fs_dbm_error(db, NULL, status);
+}
+
+dav_error * dav_dbm_store(dav_db *db, apr_datum_t key, apr_datum_t value)
+{
+ apr_status_t status = apr_dbm_store(db->file, key, value);
+
+ return dav_fs_dbm_error(db, NULL, status);
+}
+
+dav_error * dav_dbm_delete(dav_db *db, apr_datum_t key)
+{
+ apr_status_t status = apr_dbm_delete(db->file, key);
+
+ return dav_fs_dbm_error(db, NULL, status);
+}
+
+int dav_dbm_exists(dav_db *db, apr_datum_t key)
+{
+ return apr_dbm_exists(db->file, key);
+}
+
+static dav_error * dav_dbm_firstkey(dav_db *db, apr_datum_t *pkey)
+{
+ apr_status_t status = apr_dbm_firstkey(db->file, pkey);
+
+ return dav_fs_dbm_error(db, NULL, status);
+}
+
+static dav_error * dav_dbm_nextkey(dav_db *db, apr_datum_t *pkey)
+{
+ apr_status_t status = apr_dbm_nextkey(db->file, pkey);
+
+ return dav_fs_dbm_error(db, NULL, status);
+}
+
+void dav_dbm_freedatum(dav_db *db, apr_datum_t data)
+{
+ apr_dbm_freedatum(db->file, data);
+}
+
+/* -------------------------------------------------------------------------
+ *
+ * PROPERTY DATABASE FUNCTIONS
+ */
+
+
+#define DAV_GDBM_NS_KEY "METADATA"
+#define DAV_GDBM_NS_KEY_LEN 8
+
+typedef struct {
+ unsigned char major;
+#define DAV_DBVSN_MAJOR 4
+ /*
+ ** V4 -- 0.9.9 ..
+ ** Prior versions could have keys or values with invalid
+ ** namespace prefixes as a result of the xmlns="" form not
+ ** resetting the default namespace to be "no namespace". The
+ ** namespace would be set to "" which is invalid; it should
+ ** be set to "no namespace".
+ **
+ ** V3 -- 0.9.8
+ ** Prior versions could have values with invalid namespace
+ ** prefixes due to an incorrect mapping of input to propdb
+ ** namespace indices. Version bumped to obsolete the old
+ ** values.
+ **
+ ** V2 -- 0.9.7
+ ** This introduced the xml:lang value into the property value's
+ ** record in the propdb.
+ **
+ ** V1 -- .. 0.9.6
+ ** Initial version.
+ */
+
+
+ unsigned char minor;
+#define DAV_DBVSN_MINOR 0
+
+ short ns_count;
+
+} dav_propdb_metadata;
+
+struct dav_deadprop_rollback {
+ apr_datum_t key;
+ apr_datum_t value;
+};
+
+struct dav_namespace_map {
+ int *ns_map;
+};
+
+/*
+** Internal function to build a key
+**
+** WARNING: returns a pointer to a "static" buffer holding the key. The
+** value must be copied or no longer used if this function is
+** called again.
+*/
+static apr_datum_t dav_build_key(dav_db *db, const dav_prop_name *name)
+{
+ char nsbuf[20];
+ apr_size_t l_ns, l_name = strlen(name->name);
+ apr_datum_t key = { 0 };
+
+ /*
+ * Convert namespace ID to a string. "no namespace" is an empty string,
+ * so the keys will have the form ":name". Otherwise, the keys will
+ * have the form "#:name".
+ */
+ if (*name->ns == '\0') {
+ nsbuf[0] = '\0';
+ l_ns = 0;
+ }
+ else {
+ int ns_id = (int)apr_hash_get(db->uri_index, name->ns,
+ APR_HASH_KEY_STRING);
+
+
+ if (ns_id == 0) {
+ /* the namespace was not found(!) */
+ return key; /* zeroed */
+ }
+
+ l_ns = sprintf(nsbuf, "%d", ns_id - 1);
+ }
+
+ /* assemble: #:name */
+ dav_set_bufsize(db->pool, &db->wb_key, l_ns + 1 + l_name + 1);
+ memcpy(db->wb_key.buf, nsbuf, l_ns);
+ db->wb_key.buf[l_ns] = ':';
+ memcpy(&db->wb_key.buf[l_ns + 1], name->name, l_name + 1);
+
+ /* build the database key */
+ key.dsize = l_ns + 1 + l_name + 1;
+ key.dptr = db->wb_key.buf;
+
+ return key;
+}
+
+static void dav_append_prop(apr_pool_t *pool,
+ const char *name, const char *value,
+ apr_text_header *phdr)
+{
+ const char *s;
+ const char *lang = value;
+
+ /* skip past the xml:lang value */
+ value += strlen(lang) + 1;
+
+ if (*value == '\0') {
+ /* the property is an empty value */
+ if (*name == ':') {
+ /* "no namespace" case */
+ s = apr_psprintf(pool, "<%s/>" DEBUG_CR, name+1);
+ }
+ else {
+ s = apr_psprintf(pool, "<ns%s/>" DEBUG_CR, name);
+ }
+ }
+ else if (*lang != '\0') {
+ if (*name == ':') {
+ /* "no namespace" case */
+ s = apr_psprintf(pool, "<%s xml:lang=\"%s\">%s</%s>" DEBUG_CR,
+ name+1, lang, value, name+1);
+ }
+ else {
+ s = apr_psprintf(pool, "<ns%s xml:lang=\"%s\">%s</ns%s>" DEBUG_CR,
+ name, lang, value, name);
+ }
+ }
+ else if (*name == ':') {
+ /* "no namespace" case */
+ s = apr_psprintf(pool, "<%s>%s</%s>" DEBUG_CR, name+1, value, name+1);
+ }
+ else {
+ s = apr_psprintf(pool, "<ns%s>%s</ns%s>" DEBUG_CR, name, value, name);
+ }
+
+ apr_text_append(pool, phdr, s);
+}
+
+static dav_error * dav_propdb_open(apr_pool_t *pool,
+ const dav_resource *resource, int ro,
+ dav_db **pdb)
+{
+ dav_db *db;
+ dav_error *err;
+ apr_datum_t key;
+ apr_datum_t value = { 0 };
+
+ *pdb = NULL;
+
+ /*
+ ** Return if an error occurred, or there is no database.
+ **
+ ** NOTE: db could be NULL if we attempted to open a readonly
+ ** database that doesn't exist. If we require read/write
+ ** access, then a database was created and opened.
+ */
+ if ((err = dav_dbm_open(pool, resource, ro, &db)) != NULL
+ || db == NULL)
+ return err;
+
+ db->uri_index = apr_hash_make(pool);
+
+ key.dptr = DAV_GDBM_NS_KEY;
+ key.dsize = DAV_GDBM_NS_KEY_LEN;
+ if ((err = dav_dbm_fetch(db, key, &value)) != NULL) {
+ /* ### push a higher-level description? */
+ return err;
+ }
+
+ if (value.dptr == NULL) {
+ dav_propdb_metadata m = {
+ DAV_DBVSN_MAJOR, DAV_DBVSN_MINOR, 0
+ };
+
+ /*
+ ** If there is no METADATA key, then the database may be
+ ** from versions 0.9.0 .. 0.9.4 (which would be incompatible).
+ ** These can be identified by the presence of an NS_TABLE entry.
+ */
+ key.dptr = "NS_TABLE";
+ key.dsize = 8;
+ if (dav_dbm_exists(db, key)) {
+ dav_dbm_close(db);
+
+ /* call it a major version error */
+ return dav_new_error(pool, HTTP_INTERNAL_SERVER_ERROR,
+ DAV_ERR_PROP_BAD_MAJOR,
+ "Prop database has the wrong major "
+ "version number and cannot be used.");
+ }
+
+ /* initialize a new metadata structure */
+ dav_set_bufsize(pool, &db->ns_table, sizeof(m));
+ memcpy(db->ns_table.buf, &m, sizeof(m));
+ }
+ else {
+ dav_propdb_metadata m;
+ int ns;
+ const char *uri;
+
+ dav_set_bufsize(pool, &db->ns_table, value.dsize);
+ memcpy(db->ns_table.buf, value.dptr, value.dsize);
+
+ memcpy(&m, value.dptr, sizeof(m));
+ if (m.major != DAV_DBVSN_MAJOR) {
+ dav_dbm_close(db);
+
+ return dav_new_error(pool, HTTP_INTERNAL_SERVER_ERROR,
+ DAV_ERR_PROP_BAD_MAJOR,
+ "Prop database has the wrong major "
+ "version number and cannot be used.");
+ }
+ db->version = m.minor;
+ db->ns_count = ntohs(m.ns_count);
+
+ dav_dbm_freedatum(db, value);
+
+ /* create db->uri_index */
+ for (ns = 0, uri = db->ns_table.buf + sizeof(dav_propdb_metadata);
+ ns++ < db->ns_count;
+ uri += strlen(uri) + 1) {
+
+ /* we must copy the key, in case ns_table.buf moves */
+ apr_hash_set(db->uri_index,
+ apr_pstrdup(pool, uri), APR_HASH_KEY_STRING,
+ (void *)ns);
+ }
+ }
+
+ *pdb = db;
+ return NULL;
+}
+
+static void dav_propdb_close(dav_db *db)
+{
+
+ if (db->ns_table_dirty) {
+ dav_propdb_metadata m;
+ apr_datum_t key;
+ apr_datum_t value;
+ dav_error *err;
+
+ key.dptr = DAV_GDBM_NS_KEY;
+ key.dsize = DAV_GDBM_NS_KEY_LEN;
+
+ value.dptr = db->ns_table.buf;
+ value.dsize = db->ns_table.cur_len;
+
+ /* fill in the metadata that we store into the prop db. */
+ m.major = DAV_DBVSN_MAJOR;
+ m.minor = db->version; /* ### keep current minor version? */
+ m.ns_count = htons(db->ns_count);
+
+ memcpy(db->ns_table.buf, &m, sizeof(m));
+
+ err = dav_dbm_store(db, key, value);
+ /* ### what to do with the error? */
+ }
+
+ dav_dbm_close(db);
+}
+
+static dav_error * dav_propdb_define_namespaces(dav_db *db, dav_xmlns_info *xi)
+{
+ int ns;
+ const char *uri = db->ns_table.buf + sizeof(dav_propdb_metadata);
+
+ /* within the prop values, we use "ns%d" for prefixes... register them */
+ for (ns = 0; ns < db->ns_count; ++ns, uri += strlen(uri) + 1) {
+
+ /* Empty URIs signify the empty namespace. These do not get a
+ namespace prefix. when we generate the value, we will simply
+ leave off the prefix, which is defined by mod_dav to be the
+ empty namespace. */
+ if (*uri == '\0')
+ continue;
+
+ /* ns_table.buf can move, so copy its value (we want the values to
+ last as long as the provided dav_xmlns_info). */
+ dav_xmlns_add(xi,
+ apr_psprintf(xi->pool, "ns%d", ns),
+ apr_pstrdup(xi->pool, uri));
+ }
+
+ return NULL;
+}
+
+static dav_error * dav_propdb_output_value(dav_db *db,
+ const dav_prop_name *name,
+ dav_xmlns_info *xi,
+ apr_text_header *phdr,
+ int *found)
+{
+ apr_datum_t key = dav_build_key(db, name);
+ apr_datum_t value;
+ dav_error *err;
+
+ if ((err = dav_dbm_fetch(db, key, &value)) != NULL)
+ return err;
+ if (value.dptr == NULL) {
+ *found = 0;
+ return NULL;
+ }
+ *found = 1;
+
+ dav_append_prop(db->pool, key.dptr, value.dptr, phdr);
+
+ dav_dbm_freedatum(db, value);
+
+ return NULL;
+}
+
+static dav_error * dav_propdb_map_namespaces(
+ dav_db *db,
+ const apr_array_header_t *namespaces,
+ dav_namespace_map **mapping)
+{
+ dav_namespace_map *m = apr_palloc(db->pool, sizeof(*m));
+ int i;
+ int *pmap;
+ const char **puri;
+
+ /*
+ ** Iterate over the provided namespaces. If a namespace already appears
+ ** in our internal map of URI -> ns_id, then store that in the map. If
+ ** we don't know the namespace yet, then add it to the map and to our
+ ** table of known namespaces.
+ */
+ m->ns_map = pmap = apr_palloc(db->pool, namespaces->nelts * sizeof(*pmap));
+ for (i = namespaces->nelts, puri = (const char **)namespaces->elts;
+ i-- > 0;
+ ++puri, ++pmap) {
+
+ const char *uri = *puri;
+ apr_size_t uri_len = strlen(uri);
+ int ns_id = (int)apr_hash_get(db->uri_index, uri, uri_len);
+
+ if (ns_id == 0) {
+ dav_check_bufsize(db->pool, &db->ns_table, uri_len + 1);
+ memcpy(db->ns_table.buf + db->ns_table.cur_len, uri, uri_len + 1);
+ db->ns_table.cur_len += uri_len + 1;
+
+ /* copy the uri in case the passed-in namespaces changes in
+ some way. */
+ apr_hash_set(db->uri_index, apr_pstrdup(db->pool, uri), uri_len,
+ (void *)(db->ns_count + 1));
+
+ db->ns_table_dirty = 1;
+
+ *pmap = db->ns_count++;
+ }
+ else {
+ *pmap = ns_id - 1;
+ }
+ }
+
+ *mapping = m;
+ return NULL;
+}
+
+static dav_error * dav_propdb_store(dav_db *db, const dav_prop_name *name,
+ const apr_xml_elem *elem,
+ dav_namespace_map *mapping)
+{
+ apr_datum_t key = dav_build_key(db, name);
+ apr_datum_t value;
+
+ /* Note: mapping->ns_map was set up in dav_propdb_map_namespaces() */
+
+ /* ### use a db- subpool for these values? clear on exit? */
+
+ /* quote all the values in the element */
+ /* ### be nice to do this without affecting the element itself */
+ /* ### of course, the cast indicates Badness is occurring here */
+ apr_xml_quote_elem(db->pool, (apr_xml_elem *)elem);
+
+ /* generate a text blob for the xml:lang plus the contents */
+ apr_xml_to_text(db->pool, elem, APR_XML_X2T_LANG_INNER, NULL,
+ mapping->ns_map,
+ (const char **)&value.dptr, &value.dsize);
+
+ return dav_dbm_store(db, key, value);
+}
+
+static dav_error * dav_propdb_remove(dav_db *db, const dav_prop_name *name)
+{
+ apr_datum_t key = dav_build_key(db, name);
+ return dav_dbm_delete(db, key);
+}
+
+static int dav_propdb_exists(dav_db *db, const dav_prop_name *name)
+{
+ apr_datum_t key = dav_build_key(db, name);
+ return dav_dbm_exists(db, key);
+}
+
+static const char *dav_get_ns_table_uri(dav_db *db, int ns_id)
+{
+ const char *p = db->ns_table.buf + sizeof(dav_propdb_metadata);
+
+ while (ns_id--)
+ p += strlen(p) + 1;
+
+ return p;
+}
+
+static void dav_set_name(dav_db *db, dav_prop_name *pname)
+{
+ const char *s = db->iter.dptr;
+
+ if (s == NULL) {
+ pname->ns = pname->name = NULL;
+ }
+ else if (*s == ':') {
+ pname->ns = "";
+ pname->name = s + 1;
+ }
+ else {
+ int id = atoi(s);
+
+ pname->ns = dav_get_ns_table_uri(db, id);
+ if (s[1] == ':') {
+ pname->name = s + 2;
+ }
+ else {
+ pname->name = ap_strchr_c(s + 2, ':') + 1;
+ }
+ }
+}
+
+static dav_error * dav_propdb_next_name(dav_db *db, dav_prop_name *pname)
+{
+ dav_error *err;
+
+ /* free the previous key. note: if the loop is aborted, then the DBM
+ will toss the key (via pool cleanup) */
+ if (db->iter.dptr != NULL)
+ dav_dbm_freedatum(db, db->iter);
+
+ if ((err = dav_dbm_nextkey(db, &db->iter)) != NULL)
+ return err;
+
+ /* skip past the METADATA key */
+ if (db->iter.dptr != NULL && *db->iter.dptr == 'M')
+ return dav_propdb_next_name(db, pname);
+
+ dav_set_name(db, pname);
+ return NULL;
+}
+
+static dav_error * dav_propdb_first_name(dav_db *db, dav_prop_name *pname)
+{
+ dav_error *err;
+
+ if ((err = dav_dbm_firstkey(db, &db->iter)) != NULL)
+ return err;
+
+ /* skip past the METADATA key */
+ if (db->iter.dptr != NULL && *db->iter.dptr == 'M')
+ return dav_propdb_next_name(db, pname);
+
+ dav_set_name(db, pname);
+ return NULL;
+}
+
+static dav_error * dav_propdb_get_rollback(dav_db *db,
+ const dav_prop_name *name,
+ dav_deadprop_rollback **prollback)
+{
+ dav_deadprop_rollback *rb = apr_pcalloc(db->pool, sizeof(*rb));
+ apr_datum_t key;
+ apr_datum_t value;
+ dav_error *err;
+
+ key = dav_build_key(db, name);
+ rb->key.dptr = apr_pstrdup(db->pool, key.dptr);
+ rb->key.dsize = key.dsize;
+
+ if ((err = dav_dbm_fetch(db, key, &value)) != NULL)
+ return err;
+ if (value.dptr != NULL) {
+ rb->value.dptr = apr_pmemdup(db->pool, value.dptr, value.dsize);
+ rb->value.dsize = value.dsize;
+ }
+
+ *prollback = rb;
+ return NULL;
+}
+
+static dav_error * dav_propdb_apply_rollback(dav_db *db,
+ dav_deadprop_rollback *rollback)
+{
+ if (rollback->value.dptr == NULL) {
+ /* don't fail if the thing isn't really there. */
+ (void) dav_dbm_delete(db, rollback->key);
+ return NULL;
+ }
+
+ return dav_dbm_store(db, rollback->key, rollback->value);
+}
+
+const dav_hooks_db dav_hooks_db_dbm =
+{
+ dav_propdb_open,
+ dav_propdb_close,
+ dav_propdb_define_namespaces,
+ dav_propdb_output_value,
+ dav_propdb_map_namespaces,
+ dav_propdb_store,
+ dav_propdb_remove,
+ dav_propdb_exists,
+ dav_propdb_first_name,
+ dav_propdb_next_name,
+ dav_propdb_get_rollback,
+ dav_propdb_apply_rollback,
+
+ NULL /* ctx */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/fs/lock.c b/rubbos/app/httpd-2.0.64/modules/dav/fs/lock.c
new file mode 100644
index 00000000..20780e15
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/fs/lock.c
@@ -0,0 +1,1517 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+** DAV filesystem lock implementation
+*/
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_file_io.h"
+#include "apr_uuid.h"
+
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_log.h"
+
+#include "mod_dav.h"
+#include "repos.h"
+
+
+/* ---------------------------------------------------------------
+**
+** Lock database primitives
+**
+*/
+
+/*
+** LOCK DATABASES
+**
+** Lockdiscovery information is stored in the single lock database specified
+** by the DAVLockDB directive. Information about this db is stored in the
+** global server configuration.
+**
+** KEY
+**
+** The database is keyed by a key_type unsigned char (DAV_TYPE_INODE or
+** DAV_TYPE_FNAME) followed by inode and device number if possible,
+** otherwise full path (in the case of Win32 or lock-null resources).
+**
+** VALUE
+**
+** The value consists of a list of elements.
+** DIRECT LOCK: [char (DAV_LOCK_DIRECT),
+** char (dav_lock_scope),
+** char (dav_lock_type),
+** int depth,
+** time_t expires,
+** apr_uuid_t locktoken,
+** char[] owner,
+** char[] auth_user]
+**
+** INDIRECT LOCK: [char (DAV_LOCK_INDIRECT),
+** apr_uuid_t locktoken,
+** time_t expires,
+** apr_size_t key_size,
+** char[] key]
+** The key is to the collection lock that resulted in this indirect lock
+*/
+
+#define DAV_TRUE 1
+#define DAV_FALSE 0
+
+#define DAV_CREATE_LIST 23
+#define DAV_APPEND_LIST 24
+
+/* Stored lock_discovery prefix */
+#define DAV_LOCK_DIRECT 1
+#define DAV_LOCK_INDIRECT 2
+
+#define DAV_TYPE_INODE 10
+#define DAV_TYPE_FNAME 11
+
+
+/* ack. forward declare. */
+static dav_error * dav_fs_remove_locknull_member(apr_pool_t *p,
+ const char *filename,
+ dav_buffer *pbuf);
+
+/*
+** Use the opaquelock scheme for locktokens
+*/
+struct dav_locktoken {
+ apr_uuid_t uuid;
+};
+#define dav_compare_locktoken(plt1, plt2) \
+ memcmp(&(plt1)->uuid, &(plt2)->uuid, sizeof((plt1)->uuid))
+
+
+/* #################################################################
+** ### keep these structures (internal) or move fully to dav_lock?
+*/
+
+/*
+** We need to reliably size the fixed-length portion of
+** dav_lock_discovery; best to separate it into another
+** struct for a convenient sizeof, unless we pack lock_discovery.
+*/
+typedef struct dav_lock_discovery_fixed
+{
+ char scope;
+ char type;
+ int depth;
+ time_t timeout;
+} dav_lock_discovery_fixed;
+
+typedef struct dav_lock_discovery
+{
+ struct dav_lock_discovery_fixed f;
+
+ dav_locktoken *locktoken;
+ const char *owner; /* owner field from activelock */
+ const char *auth_user; /* authenticated user who created the lock */
+ struct dav_lock_discovery *next;
+} dav_lock_discovery;
+
+/* Indirect locks represent locks inherited from containing collections.
+ * They reference the lock token for the collection the lock is
+ * inherited from. A lock provider may also define a key to the
+ * inherited lock, for fast datbase lookup. The key is opaque outside
+ * the lock provider.
+ */
+typedef struct dav_lock_indirect
+{
+ dav_locktoken *locktoken;
+ apr_datum_t key;
+ struct dav_lock_indirect *next;
+ time_t timeout;
+} dav_lock_indirect;
+
+/* ################################################################# */
+
+
+/*
+** Stored direct lock info - full lock_discovery length:
+** prefix + Fixed length + lock token + 2 strings + 2 nulls (one for each string)
+*/
+#define dav_size_direct(a) (1 + sizeof(dav_lock_discovery_fixed) \
+ + sizeof(apr_uuid_t) \
+ + ((a)->owner ? strlen((a)->owner) : 0) \
+ + ((a)->auth_user ? strlen((a)->auth_user) : 0) \
+ + 2)
+
+/* Stored indirect lock info - lock token and apr_datum_t */
+#define dav_size_indirect(a) (1 + sizeof(apr_uuid_t) \
+ + sizeof(time_t) \
+ + sizeof((a)->key.dsize) + (a)->key.dsize)
+
+/*
+** The lockdb structure.
+**
+** The <db> field may be NULL, meaning one of two things:
+** 1) That we have not actually opened the underlying database (yet). The
+** <opened> field should be false.
+** 2) We opened it readonly and it wasn't present.
+**
+** The delayed opening (determined by <opened>) makes creating a lockdb
+** quick, while deferring the underlying I/O until it is actually required.
+**
+** We export the notion of a lockdb, but hide the details of it. Most
+** implementations will use a database of some kind, but it is certainly
+** possible that alternatives could be used.
+*/
+struct dav_lockdb_private
+{
+ request_rec *r; /* for accessing the uuid state */
+ apr_pool_t *pool; /* a pool to use */
+ const char *lockdb_path; /* where is the lock database? */
+
+ int opened; /* we opened the database */
+ dav_db *db; /* if non-NULL, the lock database */
+};
+typedef struct
+{
+ dav_lockdb pub;
+ dav_lockdb_private priv;
+} dav_lockdb_combined;
+
+/*
+** The private part of the lock structure.
+*/
+struct dav_lock_private
+{
+ apr_datum_t key; /* key into the lock database */
+};
+typedef struct
+{
+ dav_lock pub;
+ dav_lock_private priv;
+ dav_locktoken token;
+} dav_lock_combined;
+
+/*
+** This must be forward-declared so the open_lockdb function can use it.
+*/
+extern const dav_hooks_locks dav_hooks_locks_fs;
+
+
+/* internal function for creating locks */
+static dav_lock *dav_fs_alloc_lock(dav_lockdb *lockdb, apr_datum_t key,
+ const dav_locktoken *locktoken)
+{
+ dav_lock_combined *comb;
+
+ comb = apr_pcalloc(lockdb->info->pool, sizeof(*comb));
+ comb->pub.rectype = DAV_LOCKREC_DIRECT;
+ comb->pub.info = &comb->priv;
+ comb->priv.key = key;
+
+ if (locktoken == NULL) {
+ comb->pub.locktoken = &comb->token;
+ apr_uuid_get(&comb->token.uuid);
+ }
+ else {
+ comb->pub.locktoken = locktoken;
+ }
+
+ return &comb->pub;
+}
+
+/*
+** dav_fs_parse_locktoken
+**
+** Parse an opaquelocktoken URI into a locktoken.
+*/
+static dav_error * dav_fs_parse_locktoken(
+ apr_pool_t *p,
+ const char *char_token,
+ dav_locktoken **locktoken_p)
+{
+ dav_locktoken *locktoken;
+
+ if (ap_strstr_c(char_token, "opaquelocktoken:") != char_token) {
+ return dav_new_error(p,
+ HTTP_BAD_REQUEST, DAV_ERR_LOCK_UNK_STATE_TOKEN,
+ "The lock token uses an unknown State-token "
+ "format and could not be parsed.");
+ }
+ char_token += 16;
+
+ locktoken = apr_pcalloc(p, sizeof(*locktoken));
+ if (apr_uuid_parse(&locktoken->uuid, char_token)) {
+ return dav_new_error(p, HTTP_BAD_REQUEST, DAV_ERR_LOCK_PARSE_TOKEN,
+ "The opaquelocktoken has an incorrect format "
+ "and could not be parsed.");
+ }
+
+ *locktoken_p = locktoken;
+ return NULL;
+}
+
+/*
+** dav_fs_format_locktoken
+**
+** Generate the URI for a locktoken
+*/
+static const char *dav_fs_format_locktoken(
+ apr_pool_t *p,
+ const dav_locktoken *locktoken)
+{
+ char buf[APR_UUID_FORMATTED_LENGTH + 1];
+
+ apr_uuid_format(buf, &locktoken->uuid);
+ return apr_pstrcat(p, "opaquelocktoken:", buf, NULL);
+}
+
+/*
+** dav_fs_compare_locktoken
+**
+** Determine whether two locktokens are the same
+*/
+static int dav_fs_compare_locktoken(
+ const dav_locktoken *lt1,
+ const dav_locktoken *lt2)
+{
+ return dav_compare_locktoken(lt1, lt2);
+}
+
+/*
+** dav_fs_really_open_lockdb:
+**
+** If the database hasn't been opened yet, then open the thing.
+*/
+static dav_error * dav_fs_really_open_lockdb(dav_lockdb *lockdb)
+{
+ dav_error *err;
+
+ if (lockdb->info->opened)
+ return NULL;
+
+ err = dav_dbm_open_direct(lockdb->info->pool,
+ lockdb->info->lockdb_path,
+ lockdb->ro,
+ &lockdb->info->db);
+ if (err != NULL) {
+ return dav_push_error(lockdb->info->pool,
+ HTTP_INTERNAL_SERVER_ERROR,
+ DAV_ERR_LOCK_OPENDB,
+ "Could not open the lock database.",
+ err);
+ }
+
+ /* all right. it is opened now. */
+ lockdb->info->opened = 1;
+
+ return NULL;
+}
+
+/*
+** dav_fs_open_lockdb:
+**
+** "open" the lock database, as specified in the global server configuration.
+** If force is TRUE, then the database is opened now, rather than lazily.
+**
+** Note that only one can be open read/write.
+*/
+static dav_error * dav_fs_open_lockdb(request_rec *r, int ro, int force,
+ dav_lockdb **lockdb)
+{
+ dav_lockdb_combined *comb;
+
+ comb = apr_pcalloc(r->pool, sizeof(*comb));
+ comb->pub.hooks = &dav_hooks_locks_fs;
+ comb->pub.ro = ro;
+ comb->pub.info = &comb->priv;
+ comb->priv.r = r;
+ comb->priv.pool = r->pool;
+
+ comb->priv.lockdb_path = dav_get_lockdb_path(r);
+ if (comb->priv.lockdb_path == NULL) {
+ return dav_new_error(r->pool, HTTP_INTERNAL_SERVER_ERROR,
+ DAV_ERR_LOCK_NO_DB,
+ "A lock database was not specified with the "
+ "DAVLockDB directive. One must be specified "
+ "to use the locking functionality.");
+ }
+
+ /* done initializing. return it. */
+ *lockdb = &comb->pub;
+
+ if (force) {
+ /* ### add a higher-level comment? */
+ return dav_fs_really_open_lockdb(*lockdb);
+ }
+
+ return NULL;
+}
+
+/*
+** dav_fs_close_lockdb:
+**
+** Close it. Duh.
+*/
+static void dav_fs_close_lockdb(dav_lockdb *lockdb)
+{
+ if (lockdb->info->db != NULL)
+ dav_dbm_close(lockdb->info->db);
+}
+
+/*
+** dav_fs_build_fname_key
+**
+** Given a pathname, build a DAV_TYPE_FNAME lock database key.
+*/
+static apr_datum_t dav_fs_build_fname_key(apr_pool_t *p, const char *pathname)
+{
+ apr_datum_t key;
+
+ /* ### does this allocation have a proper lifetime? need to check */
+ /* ### can we use a buffer for this? */
+
+ /* size is TYPE + pathname + null */
+ key.dsize = strlen(pathname) + 2;
+ key.dptr = apr_palloc(p, key.dsize);
+ *key.dptr = DAV_TYPE_FNAME;
+ memcpy(key.dptr + 1, pathname, key.dsize - 1);
+ if (key.dptr[key.dsize - 2] == '/')
+ key.dptr[--key.dsize - 1] = '\0';
+ return key;
+}
+
+/*
+** dav_fs_build_key: Given a resource, return a apr_datum_t key
+** to look up lock information for this file.
+**
+** (inode/dev not supported or file is lock-null):
+** apr_datum_t->dvalue = full path
+**
+** (inode/dev supported and file exists ):
+** apr_datum_t->dvalue = inode, dev
+*/
+static apr_datum_t dav_fs_build_key(apr_pool_t *p,
+ const dav_resource *resource)
+{
+ const char *file = dav_fs_pathname(resource);
+ apr_datum_t key;
+ apr_finfo_t finfo;
+ apr_status_t rv;
+
+ /* ### use lstat() ?? */
+ /*
+ * XXX: What for platforms with no IDENT (dev/inode)?
+ */
+ rv = apr_stat(&finfo, file, APR_FINFO_IDENT, p);
+ if ((rv == APR_SUCCESS || rv == APR_INCOMPLETE)
+ && ((finfo.valid & APR_FINFO_IDENT) == APR_FINFO_IDENT))
+ {
+ /* ### can we use a buffer for this? */
+ key.dsize = 1 + sizeof(finfo.inode) + sizeof(finfo.device);
+ key.dptr = apr_palloc(p, key.dsize);
+ *key.dptr = DAV_TYPE_INODE;
+ memcpy(key.dptr + 1, &finfo.inode, sizeof(finfo.inode));
+ memcpy(key.dptr + 1 + sizeof(finfo.inode), &finfo.device,
+ sizeof(finfo.device));
+
+ return key;
+ }
+
+ return dav_fs_build_fname_key(p, file);
+}
+
+/*
+** dav_fs_lock_expired: return 1 (true) if the given timeout is in the past
+** or present (the lock has expired), or 0 (false) if in the future
+** (the lock has not yet expired).
+*/
+static int dav_fs_lock_expired(time_t expires)
+{
+ return expires != DAV_TIMEOUT_INFINITE && time(NULL) >= expires;
+}
+
+/*
+** dav_fs_save_lock_record: Saves the lock information specified in the
+** direct and indirect lock lists about path into the lock database.
+** If direct and indirect == NULL, the key is removed.
+*/
+static dav_error * dav_fs_save_lock_record(dav_lockdb *lockdb, apr_datum_t key,
+ dav_lock_discovery *direct,
+ dav_lock_indirect *indirect)
+{
+ dav_error *err;
+ apr_datum_t val = { 0 };
+ char *ptr;
+ dav_lock_discovery *dp = direct;
+ dav_lock_indirect *ip = indirect;
+
+#if DAV_DEBUG
+ if (lockdb->ro) {
+ return dav_new_error(lockdb->info->pool,
+ HTTP_INTERNAL_SERVER_ERROR, 0,
+ "INTERNAL DESIGN ERROR: the lockdb was opened "
+ "readonly, but an attempt to save locks was "
+ "performed.");
+ }
+#endif
+
+ if ((err = dav_fs_really_open_lockdb(lockdb)) != NULL) {
+ /* ### add a higher-level error? */
+ return err;
+ }
+
+ /* If nothing to save, delete key */
+ if (dp == NULL && ip == NULL) {
+ /* don't fail if the key is not present */
+ /* ### but what about other errors? */
+ (void) dav_dbm_delete(lockdb->info->db, key);
+ return NULL;
+ }
+
+ while(dp) {
+ val.dsize += dav_size_direct(dp);
+ dp = dp->next;
+ }
+ while(ip) {
+ val.dsize += dav_size_indirect(ip);
+ ip = ip->next;
+ }
+
+ /* ### can this be apr_palloc() ? */
+ /* ### hmmm.... investigate the use of a buffer here */
+ ptr = val.dptr = apr_pcalloc(lockdb->info->pool, val.dsize);
+ dp = direct;
+ ip = indirect;
+
+ while(dp) {
+ *ptr++ = DAV_LOCK_DIRECT; /* Direct lock - lock_discovery struct follows */
+ memcpy(ptr, dp, sizeof(dp->f)); /* Fixed portion of struct */
+ ptr += sizeof(dp->f);
+ memcpy(ptr, dp->locktoken, sizeof(*dp->locktoken));
+ ptr += sizeof(*dp->locktoken);
+ if (dp->owner == NULL) {
+ *ptr++ = '\0';
+ }
+ else {
+ memcpy(ptr, dp->owner, strlen(dp->owner) + 1);
+ ptr += strlen(dp->owner) + 1;
+ }
+ if (dp->auth_user == NULL) {
+ *ptr++ = '\0';
+ }
+ else {
+ memcpy(ptr, dp->auth_user, strlen(dp->auth_user) + 1);
+ ptr += strlen(dp->auth_user) + 1;
+ }
+
+ dp = dp->next;
+ }
+
+ while(ip) {
+ *ptr++ = DAV_LOCK_INDIRECT; /* Indirect lock prefix */
+ memcpy(ptr, ip->locktoken, sizeof(*ip->locktoken)); /* Locktoken */
+ ptr += sizeof(*ip->locktoken);
+ memcpy(ptr, &ip->timeout, sizeof(ip->timeout)); /* Expire time */
+ ptr += sizeof(ip->timeout);
+ memcpy(ptr, &ip->key.dsize, sizeof(ip->key.dsize)); /* Size of key */
+ ptr += sizeof(ip->key.dsize);
+ memcpy(ptr, ip->key.dptr, ip->key.dsize); /* Key data */
+ ptr += ip->key.dsize;
+ ip = ip->next;
+ }
+
+ if ((err = dav_dbm_store(lockdb->info->db, key, val)) != NULL) {
+ /* ### more details? add an error_id? */
+ return dav_push_error(lockdb->info->pool,
+ HTTP_INTERNAL_SERVER_ERROR,
+ DAV_ERR_LOCK_SAVE_LOCK,
+ "Could not save lock information.",
+ err);
+ }
+
+ return NULL;
+}
+
+/*
+** dav_load_lock_record: Reads lock information about key from lock db;
+** creates linked lists of the direct and indirect locks.
+**
+** If add_method = DAV_APPEND_LIST, the result will be appended to the
+** head of the direct and indirect lists supplied.
+**
+** Passive lock removal: If lock has timed out, it will not be returned.
+** ### How much "logging" does RFC 2518 require?
+*/
+static dav_error * dav_fs_load_lock_record(dav_lockdb *lockdb, apr_datum_t key,
+ int add_method,
+ dav_lock_discovery **direct,
+ dav_lock_indirect **indirect)
+{
+ apr_pool_t *p = lockdb->info->pool;
+ dav_error *err;
+ apr_size_t offset = 0;
+ int need_save = DAV_FALSE;
+ apr_datum_t val = { 0 };
+ dav_lock_discovery *dp;
+ dav_lock_indirect *ip;
+ dav_buffer buf = { 0 };
+
+ if (add_method != DAV_APPEND_LIST) {
+ *direct = NULL;
+ *indirect = NULL;
+ }
+
+ if ((err = dav_fs_really_open_lockdb(lockdb)) != NULL) {
+ /* ### add a higher-level error? */
+ return err;
+ }
+
+ /*
+ ** If we opened readonly and the db wasn't there, then there are no
+ ** locks for this resource. Just exit.
+ */
+ if (lockdb->info->db == NULL)
+ return NULL;
+
+ if ((err = dav_dbm_fetch(lockdb->info->db, key, &val)) != NULL)
+ return err;
+
+ if (!val.dsize)
+ return NULL;
+
+ while (offset < val.dsize) {
+ switch (*(val.dptr + offset++)) {
+ case DAV_LOCK_DIRECT:
+ /* Create and fill a dav_lock_discovery structure */
+
+ dp = apr_pcalloc(p, sizeof(*dp));
+ memcpy(dp, val.dptr + offset, sizeof(dp->f));
+ offset += sizeof(dp->f);
+ dp->locktoken = apr_palloc(p, sizeof(*dp->locktoken));
+ memcpy(dp->locktoken, val.dptr + offset, sizeof(*dp->locktoken));
+ offset += sizeof(*dp->locktoken);
+ if (*(val.dptr + offset) == '\0') {
+ ++offset;
+ }
+ else {
+ dp->owner = apr_pstrdup(p, val.dptr + offset);
+ offset += strlen(dp->owner) + 1;
+ }
+
+ if (*(val.dptr + offset) == '\0') {
+ ++offset;
+ }
+ else {
+ dp->auth_user = apr_pstrdup(p, val.dptr + offset);
+ offset += strlen(dp->auth_user) + 1;
+ }
+
+ if (!dav_fs_lock_expired(dp->f.timeout)) {
+ dp->next = *direct;
+ *direct = dp;
+ }
+ else {
+ need_save = DAV_TRUE;
+
+ /* Remove timed-out locknull fm .locknull list */
+ if (*key.dptr == DAV_TYPE_FNAME) {
+ const char *fname = key.dptr + 1;
+ apr_finfo_t finfo;
+ apr_status_t rv;
+
+ /* if we don't see the file, then it's a locknull */
+ rv = apr_lstat(&finfo, fname, APR_FINFO_MIN, p);
+ if (rv != APR_SUCCESS && rv != APR_INCOMPLETE) {
+ if ((err = dav_fs_remove_locknull_member(p, fname, &buf)) != NULL) {
+ /* ### push a higher-level description? */
+ return err;
+ }
+ }
+ }
+ }
+ break;
+
+ case DAV_LOCK_INDIRECT:
+ /* Create and fill a dav_lock_indirect structure */
+
+ ip = apr_pcalloc(p, sizeof(*ip));
+ ip->locktoken = apr_palloc(p, sizeof(*ip->locktoken));
+ memcpy(ip->locktoken, val.dptr + offset, sizeof(*ip->locktoken));
+ offset += sizeof(*ip->locktoken);
+ memcpy(&ip->timeout, val.dptr + offset, sizeof(ip->timeout));
+ offset += sizeof(ip->timeout);
+ memcpy(&ip->key.dsize, val.dptr + offset, sizeof(ip->key.dsize)); /* length of datum */
+ offset += sizeof(ip->key.dsize);
+ ip->key.dptr = apr_palloc(p, ip->key.dsize);
+ memcpy(ip->key.dptr, val.dptr + offset, ip->key.dsize);
+ offset += ip->key.dsize;
+
+ if (!dav_fs_lock_expired(ip->timeout)) {
+ ip->next = *indirect;
+ *indirect = ip;
+ }
+ else {
+ need_save = DAV_TRUE;
+ /* A locknull resource will never be locked indirectly */
+ }
+
+ break;
+
+ default:
+ dav_dbm_freedatum(lockdb->info->db, val);
+
+ /* ### should use a computed_desc and insert corrupt token data */
+ --offset;
+ return dav_new_error(p,
+ HTTP_INTERNAL_SERVER_ERROR,
+ DAV_ERR_LOCK_CORRUPT_DB,
+ apr_psprintf(p,
+ "The lock database was found to "
+ "be corrupt. offset %"
+ APR_SIZE_T_FMT ", c=%02x",
+ offset, val.dptr[offset]));
+ }
+ }
+
+ dav_dbm_freedatum(lockdb->info->db, val);
+
+ /* Clean up this record if we found expired locks */
+ /*
+ ** ### shouldn't do this if we've been opened READONLY. elide the
+ ** ### timed-out locks from the response, but don't save that info back
+ */
+ if (need_save == DAV_TRUE) {
+ return dav_fs_save_lock_record(lockdb, key, *direct, *indirect);
+ }
+
+ return NULL;
+}
+
+/* resolve <indirect>, returning <*direct> */
+static dav_error * dav_fs_resolve(dav_lockdb *lockdb,
+ dav_lock_indirect *indirect,
+ dav_lock_discovery **direct,
+ dav_lock_discovery **ref_dp,
+ dav_lock_indirect **ref_ip)
+{
+ dav_error *err;
+ dav_lock_discovery *dir;
+ dav_lock_indirect *ind;
+
+ if ((err = dav_fs_load_lock_record(lockdb, indirect->key,
+ DAV_CREATE_LIST,
+ &dir, &ind)) != NULL) {
+ /* ### insert a higher-level description? */
+ return err;
+ }
+ if (ref_dp != NULL) {
+ *ref_dp = dir;
+ *ref_ip = ind;
+ }
+
+ for (; dir != NULL; dir = dir->next) {
+ if (!dav_compare_locktoken(indirect->locktoken, dir->locktoken)) {
+ *direct = dir;
+ return NULL;
+ }
+ }
+
+ /* No match found (but we should have found one!) */
+
+ /* ### use a different description and/or error ID? */
+ return dav_new_error(lockdb->info->pool,
+ HTTP_INTERNAL_SERVER_ERROR,
+ DAV_ERR_LOCK_CORRUPT_DB,
+ "The lock database was found to be corrupt. "
+ "An indirect lock's direct lock could not "
+ "be found.");
+}
+
+/* ---------------------------------------------------------------
+**
+** Property-related lock functions
+**
+*/
+
+/*
+** dav_fs_get_supportedlock: Returns a static string for all supportedlock
+** properties. I think we save more returning a static string than
+** constructing it every time, though it might look cleaner.
+*/
+static const char *dav_fs_get_supportedlock(const dav_resource *resource)
+{
+ static const char supported[] = DEBUG_CR
+ "<D:lockentry>" DEBUG_CR
+ "<D:lockscope><D:exclusive/></D:lockscope>" DEBUG_CR
+ "<D:locktype><D:write/></D:locktype>" DEBUG_CR
+ "</D:lockentry>" DEBUG_CR
+ "<D:lockentry>" DEBUG_CR
+ "<D:lockscope><D:shared/></D:lockscope>" DEBUG_CR
+ "<D:locktype><D:write/></D:locktype>" DEBUG_CR
+ "</D:lockentry>" DEBUG_CR;
+
+ return supported;
+}
+
+/* ---------------------------------------------------------------
+**
+** General lock functions
+**
+*/
+
+/* ---------------------------------------------------------------
+**
+** Functions dealing with lock-null resources
+**
+*/
+
+/*
+** dav_fs_load_locknull_list: Returns a dav_buffer dump of the locknull file
+** for the given directory.
+*/
+static dav_error * dav_fs_load_locknull_list(apr_pool_t *p, const char *dirpath,
+ dav_buffer *pbuf)
+{
+ apr_finfo_t finfo;
+ apr_file_t *file = NULL;
+ dav_error *err = NULL;
+ apr_size_t amt;
+ apr_status_t rv;
+
+ dav_buffer_init(p, pbuf, dirpath);
+
+ if (pbuf->buf[pbuf->cur_len - 1] == '/')
+ pbuf->buf[--pbuf->cur_len] = '\0';
+
+ dav_buffer_place(p, pbuf, "/" DAV_FS_STATE_DIR "/" DAV_FS_LOCK_NULL_FILE);
+
+ /* reset this in case we leave w/o reading into the buffer */
+ pbuf->cur_len = 0;
+
+ if (apr_file_open(&file, pbuf->buf, APR_READ | APR_BINARY, APR_OS_DEFAULT,
+ p) != APR_SUCCESS) {
+ return NULL;
+ }
+
+ rv = apr_file_info_get(&finfo, APR_FINFO_SIZE, file);
+ if (rv != APR_SUCCESS) {
+ err = dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ apr_psprintf(p,
+ "Opened but could not stat file %s",
+ pbuf->buf));
+ goto loaderror;
+ }
+
+ if (finfo.size != (apr_size_t)finfo.size) {
+ err = dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ apr_psprintf(p,
+ "Opened but rejected huge file %s",
+ pbuf->buf));
+ goto loaderror;
+ }
+
+ amt = (apr_size_t)finfo.size;
+ dav_set_bufsize(p, pbuf, amt);
+ if (apr_file_read(file, pbuf->buf, &amt) != APR_SUCCESS
+ || amt != finfo.size) {
+ err = dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ apr_psprintf(p,
+ "Failure reading locknull file "
+ "for %s", dirpath));
+
+ /* just in case the caller disregards the returned error */
+ pbuf->cur_len = 0;
+ goto loaderror;
+ }
+
+ loaderror:
+ apr_file_close(file);
+ return err;
+}
+
+/*
+** dav_fs_save_locknull_list: Saves contents of pbuf into the
+** locknull file for dirpath.
+*/
+static dav_error * dav_fs_save_locknull_list(apr_pool_t *p, const char *dirpath,
+ dav_buffer *pbuf)
+{
+ const char *pathname;
+ apr_file_t *file = NULL;
+ dav_error *err = NULL;
+ apr_size_t amt;
+
+ if (pbuf->buf == NULL)
+ return NULL;
+
+ dav_fs_ensure_state_dir(p, dirpath);
+ pathname = apr_pstrcat(p,
+ dirpath,
+ dirpath[strlen(dirpath) - 1] == '/' ? "" : "/",
+ DAV_FS_STATE_DIR "/" DAV_FS_LOCK_NULL_FILE,
+ NULL);
+
+ if (pbuf->cur_len == 0) {
+ /* delete the file if cur_len == 0 */
+ if (apr_file_remove(pathname, p) != 0) {
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ apr_psprintf(p,
+ "Error removing %s", pathname));
+ }
+ return NULL;
+ }
+
+ if (apr_file_open(&file, pathname,
+ APR_WRITE | APR_CREATE | APR_TRUNCATE | APR_BINARY,
+ APR_OS_DEFAULT, p) != APR_SUCCESS) {
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ apr_psprintf(p,
+ "Error opening %s for writing",
+ pathname));
+ }
+
+ amt = pbuf->cur_len;
+ if (apr_file_write(file, pbuf->buf, &amt) != APR_SUCCESS
+ || amt != pbuf->cur_len) {
+ err = dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ apr_psprintf(p,
+ "Error writing %" APR_SIZE_T_FMT
+ " bytes to %s",
+ pbuf->cur_len, pathname));
+ }
+
+ apr_file_close(file);
+ return err;
+}
+
+/*
+** dav_fs_remove_locknull_member: Removes filename from the locknull list
+** for directory path.
+*/
+static dav_error * dav_fs_remove_locknull_member(apr_pool_t *p,
+ const char *filename,
+ dav_buffer *pbuf)
+{
+ dav_error *err;
+ apr_size_t len;
+ apr_size_t scanlen;
+ char *scan;
+ const char *scanend;
+ char *dirpath = apr_pstrdup(p, filename);
+ char *fname = strrchr(dirpath, '/');
+ int dirty = 0;
+
+ if (fname != NULL)
+ *fname++ = '\0';
+ else
+ fname = dirpath;
+ len = strlen(fname) + 1;
+
+ if ((err = dav_fs_load_locknull_list(p, dirpath, pbuf)) != NULL) {
+ /* ### add a higher level description? */
+ return err;
+ }
+
+ for (scan = pbuf->buf, scanend = scan + pbuf->cur_len;
+ scan < scanend;
+ scan += scanlen) {
+ scanlen = strlen(scan) + 1;
+ if (len == scanlen && memcmp(fname, scan, scanlen) == 0) {
+ pbuf->cur_len -= scanlen;
+ memmove(scan, scan + scanlen, scanend - (scan + scanlen));
+ dirty = 1;
+ break;
+ }
+ }
+
+ if (dirty) {
+ if ((err = dav_fs_save_locknull_list(p, dirpath, pbuf)) != NULL) {
+ /* ### add a higher level description? */
+ return err;
+ }
+ }
+
+ return NULL;
+}
+
+/* Note: used by dav_fs_repos.c */
+dav_error * dav_fs_get_locknull_members(
+ const dav_resource *resource,
+ dav_buffer *pbuf)
+{
+ const char *dirpath;
+
+ /* ### should test this result value... */
+ (void) dav_fs_dir_file_name(resource, &dirpath, NULL);
+ return dav_fs_load_locknull_list(dav_fs_pool(resource), dirpath, pbuf);
+}
+
+/* ### fold into append_lock? */
+/* ### take an optional buf parameter? */
+static dav_error * dav_fs_add_locknull_state(
+ dav_lockdb *lockdb,
+ const dav_resource *resource)
+{
+ dav_buffer buf = { 0 };
+ apr_pool_t *p = lockdb->info->pool;
+ const char *dirpath;
+ const char *fname;
+ dav_error *err;
+
+ /* ### should test this result value... */
+ (void) dav_fs_dir_file_name(resource, &dirpath, &fname);
+
+ if ((err = dav_fs_load_locknull_list(p, dirpath, &buf)) != NULL) {
+ return dav_push_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not load .locknull file.", err);
+ }
+
+ dav_buffer_append(p, &buf, fname);
+ buf.cur_len++; /* we want the null-term here */
+
+ if ((err = dav_fs_save_locknull_list(p, dirpath, &buf)) != NULL) {
+ return dav_push_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not save .locknull file.", err);
+ }
+
+ return NULL;
+}
+
+/*
+** dav_fs_remove_locknull_state: Given a request, check to see if r->filename
+** is/was a lock-null resource. If so, return it to an existant state.
+**
+** ### this function is broken... it doesn't check!
+**
+** In this implementation, this involves two things:
+** (a) remove it from the list in the appropriate .DAV/locknull file
+** (b) on *nix, convert the key from a filename to an inode.
+*/
+static dav_error * dav_fs_remove_locknull_state(
+ dav_lockdb *lockdb,
+ const dav_resource *resource)
+{
+ dav_buffer buf = { 0 };
+ dav_error *err;
+ apr_pool_t *p = lockdb->info->pool;
+ const char *pathname = dav_fs_pathname(resource);
+
+ if ((err = dav_fs_remove_locknull_member(p, pathname, &buf)) != NULL) {
+ /* ### add a higher-level description? */
+ return err;
+ }
+
+ {
+ dav_lock_discovery *ld;
+ dav_lock_indirect *id;
+ apr_datum_t key;
+
+ /*
+ ** Fetch the lock(s) that made the resource lock-null. Remove
+ ** them under the filename key. Obtain the new inode key, and
+ ** save the same lock information under it.
+ */
+ key = dav_fs_build_fname_key(p, pathname);
+ if ((err = dav_fs_load_lock_record(lockdb, key, DAV_CREATE_LIST,
+ &ld, &id)) != NULL) {
+ /* ### insert a higher-level error description */
+ return err;
+ }
+
+ if ((err = dav_fs_save_lock_record(lockdb, key, NULL, NULL)) != NULL) {
+ /* ### insert a higher-level error description */
+ return err;
+ }
+
+ key = dav_fs_build_key(p, resource);
+ if ((err = dav_fs_save_lock_record(lockdb, key, ld, id)) != NULL) {
+ /* ### insert a higher-level error description */
+ return err;
+ }
+ }
+
+ return NULL;
+}
+
+static dav_error * dav_fs_create_lock(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ dav_lock **lock)
+{
+ apr_datum_t key;
+
+ key = dav_fs_build_key(lockdb->info->pool, resource);
+
+ *lock = dav_fs_alloc_lock(lockdb,
+ key,
+ NULL);
+
+ (*lock)->is_locknull = !resource->exists;
+
+ return NULL;
+}
+
+static dav_error * dav_fs_get_locks(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ int calltype,
+ dav_lock **locks)
+{
+ apr_pool_t *p = lockdb->info->pool;
+ apr_datum_t key;
+ dav_error *err;
+ dav_lock *lock = NULL;
+ dav_lock *newlock;
+ dav_lock_discovery *dp;
+ dav_lock_indirect *ip;
+
+#if DAV_DEBUG
+ if (calltype == DAV_GETLOCKS_COMPLETE) {
+ return dav_new_error(lockdb->info->pool,
+ HTTP_INTERNAL_SERVER_ERROR, 0,
+ "INTERNAL DESIGN ERROR: DAV_GETLOCKS_COMPLETE "
+ "is not yet supported");
+ }
+#endif
+
+ key = dav_fs_build_key(p, resource);
+ if ((err = dav_fs_load_lock_record(lockdb, key, DAV_CREATE_LIST,
+ &dp, &ip)) != NULL) {
+ /* ### push a higher-level desc? */
+ return err;
+ }
+
+ /* copy all direct locks to the result list */
+ for (; dp != NULL; dp = dp->next) {
+ newlock = dav_fs_alloc_lock(lockdb, key, dp->locktoken);
+ newlock->is_locknull = !resource->exists;
+ newlock->scope = dp->f.scope;
+ newlock->type = dp->f.type;
+ newlock->depth = dp->f.depth;
+ newlock->timeout = dp->f.timeout;
+ newlock->owner = dp->owner;
+ newlock->auth_user = dp->auth_user;
+
+ /* hook into the result list */
+ newlock->next = lock;
+ lock = newlock;
+ }
+
+ /* copy all the indirect locks to the result list. resolve as needed. */
+ for (; ip != NULL; ip = ip->next) {
+ newlock = dav_fs_alloc_lock(lockdb, ip->key, ip->locktoken);
+ newlock->is_locknull = !resource->exists;
+
+ if (calltype == DAV_GETLOCKS_RESOLVED) {
+ if ((err = dav_fs_resolve(lockdb, ip, &dp, NULL, NULL)) != NULL) {
+ /* ### push a higher-level desc? */
+ return err;
+ }
+
+ newlock->scope = dp->f.scope;
+ newlock->type = dp->f.type;
+ newlock->depth = dp->f.depth;
+ newlock->timeout = dp->f.timeout;
+ newlock->owner = dp->owner;
+ newlock->auth_user = dp->auth_user;
+ }
+ else {
+ /* DAV_GETLOCKS_PARTIAL */
+ newlock->rectype = DAV_LOCKREC_INDIRECT_PARTIAL;
+ }
+
+ /* hook into the result list */
+ newlock->next = lock;
+ lock = newlock;
+ }
+
+ *locks = lock;
+ return NULL;
+}
+
+static dav_error * dav_fs_find_lock(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ const dav_locktoken *locktoken,
+ int partial_ok,
+ dav_lock **lock)
+{
+ dav_error *err;
+ apr_datum_t key;
+ dav_lock_discovery *dp;
+ dav_lock_indirect *ip;
+
+ *lock = NULL;
+
+ key = dav_fs_build_key(lockdb->info->pool, resource);
+ if ((err = dav_fs_load_lock_record(lockdb, key, DAV_CREATE_LIST,
+ &dp, &ip)) != NULL) {
+ /* ### push a higher-level desc? */
+ return err;
+ }
+
+ for (; dp != NULL; dp = dp->next) {
+ if (!dav_compare_locktoken(locktoken, dp->locktoken)) {
+ *lock = dav_fs_alloc_lock(lockdb, key, locktoken);
+ (*lock)->is_locknull = !resource->exists;
+ (*lock)->scope = dp->f.scope;
+ (*lock)->type = dp->f.type;
+ (*lock)->depth = dp->f.depth;
+ (*lock)->timeout = dp->f.timeout;
+ (*lock)->owner = dp->owner;
+ (*lock)->auth_user = dp->auth_user;
+ return NULL;
+ }
+ }
+
+ for (; ip != NULL; ip = ip->next) {
+ if (!dav_compare_locktoken(locktoken, ip->locktoken)) {
+ *lock = dav_fs_alloc_lock(lockdb, ip->key, locktoken);
+ (*lock)->is_locknull = !resource->exists;
+
+ /* ### nobody uses the resolving right now! */
+ if (partial_ok) {
+ (*lock)->rectype = DAV_LOCKREC_INDIRECT_PARTIAL;
+ }
+ else {
+ (*lock)->rectype = DAV_LOCKREC_INDIRECT;
+ if ((err = dav_fs_resolve(lockdb, ip, &dp,
+ NULL, NULL)) != NULL) {
+ /* ### push a higher-level desc? */
+ return err;
+ }
+ (*lock)->scope = dp->f.scope;
+ (*lock)->type = dp->f.type;
+ (*lock)->depth = dp->f.depth;
+ (*lock)->timeout = dp->f.timeout;
+ (*lock)->owner = dp->owner;
+ (*lock)->auth_user = dp->auth_user;
+ }
+ return NULL;
+ }
+ }
+
+ return NULL;
+}
+
+static dav_error * dav_fs_has_locks(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ int *locks_present)
+{
+ dav_error *err;
+ apr_datum_t key;
+
+ *locks_present = 0;
+
+ if ((err = dav_fs_really_open_lockdb(lockdb)) != NULL) {
+ /* ### insert a higher-level error description */
+ return err;
+ }
+
+ /*
+ ** If we opened readonly and the db wasn't there, then there are no
+ ** locks for this resource. Just exit.
+ */
+ if (lockdb->info->db == NULL)
+ return NULL;
+
+ key = dav_fs_build_key(lockdb->info->pool, resource);
+
+ *locks_present = dav_dbm_exists(lockdb->info->db, key);
+
+ return NULL;
+}
+
+static dav_error * dav_fs_append_locks(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ int make_indirect,
+ const dav_lock *lock)
+{
+ apr_pool_t *p = lockdb->info->pool;
+ dav_error *err;
+ dav_lock_indirect *ip;
+ dav_lock_discovery *dp;
+ apr_datum_t key;
+
+ key = dav_fs_build_key(lockdb->info->pool, resource);
+ if ((err = dav_fs_load_lock_record(lockdb, key, 0, &dp, &ip)) != NULL) {
+ /* ### maybe add in a higher-level description */
+ return err;
+ }
+
+ /*
+ ** ### when we store the lock more directly, we need to update
+ ** ### lock->rectype and lock->is_locknull
+ */
+
+ if (make_indirect) {
+ for (; lock != NULL; lock = lock->next) {
+
+ /* ### this works for any <lock> rectype */
+ dav_lock_indirect *newi = apr_pcalloc(p, sizeof(*newi));
+
+ /* ### shut off the const warning for now */
+ newi->locktoken = (dav_locktoken *)lock->locktoken;
+ newi->timeout = lock->timeout;
+ newi->key = lock->info->key;
+ newi->next = ip;
+ ip = newi;
+ }
+ }
+ else {
+ for (; lock != NULL; lock = lock->next) {
+ /* create and link in the right kind of lock */
+
+ if (lock->rectype == DAV_LOCKREC_DIRECT) {
+ dav_lock_discovery *newd = apr_pcalloc(p, sizeof(*newd));
+
+ newd->f.scope = lock->scope;
+ newd->f.type = lock->type;
+ newd->f.depth = lock->depth;
+ newd->f.timeout = lock->timeout;
+ /* ### shut off the const warning for now */
+ newd->locktoken = (dav_locktoken *)lock->locktoken;
+ newd->owner = lock->owner;
+ newd->auth_user = lock->auth_user;
+ newd->next = dp;
+ dp = newd;
+ }
+ else {
+ /* DAV_LOCKREC_INDIRECT(_PARTIAL) */
+
+ dav_lock_indirect *newi = apr_pcalloc(p, sizeof(*newi));
+
+ /* ### shut off the const warning for now */
+ newi->locktoken = (dav_locktoken *)lock->locktoken;
+ newi->key = lock->info->key;
+ newi->next = ip;
+ ip = newi;
+ }
+ }
+ }
+
+ if ((err = dav_fs_save_lock_record(lockdb, key, dp, ip)) != NULL) {
+ /* ### maybe add a higher-level description */
+ return err;
+ }
+
+ /* we have a special list for recording locknull resources */
+ /* ### ack! this can add two copies to the locknull list */
+ if (!resource->exists
+ && (err = dav_fs_add_locknull_state(lockdb, resource)) != NULL) {
+ /* ### maybe add a higher-level description */
+ return err;
+ }
+
+ return NULL;
+}
+
+static dav_error * dav_fs_remove_lock(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ const dav_locktoken *locktoken)
+{
+ dav_error *err;
+ dav_buffer buf = { 0 };
+ dav_lock_discovery *dh = NULL;
+ dav_lock_indirect *ih = NULL;
+ apr_datum_t key;
+
+ key = dav_fs_build_key(lockdb->info->pool, resource);
+
+ if (locktoken != NULL) {
+ dav_lock_discovery *dp;
+ dav_lock_discovery *dprev = NULL;
+ dav_lock_indirect *ip;
+ dav_lock_indirect *iprev = NULL;
+
+ if ((err = dav_fs_load_lock_record(lockdb, key, DAV_CREATE_LIST,
+ &dh, &ih)) != NULL) {
+ /* ### maybe add a higher-level description */
+ return err;
+ }
+
+ for (dp = dh; dp != NULL; dp = dp->next) {
+ if (dav_compare_locktoken(locktoken, dp->locktoken) == 0) {
+ if (dprev)
+ dprev->next = dp->next;
+ else
+ dh = dh->next;
+ }
+ dprev = dp;
+ }
+
+ for (ip = ih; ip != NULL; ip = ip->next) {
+ if (dav_compare_locktoken(locktoken, ip->locktoken) == 0) {
+ if (iprev)
+ iprev->next = ip->next;
+ else
+ ih = ih->next;
+ }
+ iprev = ip;
+ }
+
+ }
+
+ /* save the modified locks, or remove all locks (dh=ih=NULL). */
+ if ((err = dav_fs_save_lock_record(lockdb, key, dh, ih)) != NULL) {
+ /* ### maybe add a higher-level description */
+ return err;
+ }
+
+ /*
+ ** If this resource is a locknull resource AND no more locks exist,
+ ** then remove the locknull member.
+ **
+ ** Note: remove_locknull_state() attempts to convert a locknull member
+ ** to a real member. In this case, all locks are gone, so the
+ ** locknull resource returns to the null state (ie. doesn't exist),
+ ** so there is no need to update the lockdb (and it won't find
+ ** any because a precondition is that none exist).
+ */
+ if (!resource->exists && dh == NULL && ih == NULL
+ && (err = dav_fs_remove_locknull_member(lockdb->info->pool,
+ dav_fs_pathname(resource),
+ &buf)) != NULL) {
+ /* ### maybe add a higher-level description */
+ return err;
+ }
+
+ return NULL;
+}
+
+static int dav_fs_do_refresh(dav_lock_discovery *dp,
+ const dav_locktoken_list *ltl,
+ time_t new_time)
+{
+ int dirty = 0;
+
+ for (; ltl != NULL; ltl = ltl->next) {
+ if (dav_compare_locktoken(dp->locktoken, ltl->locktoken) == 0)
+ {
+ dp->f.timeout = new_time;
+ dirty = 1;
+ }
+ }
+
+ return dirty;
+}
+
+static dav_error * dav_fs_refresh_locks(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ const dav_locktoken_list *ltl,
+ time_t new_time,
+ dav_lock **locks)
+{
+ dav_error *err;
+ apr_datum_t key;
+ dav_lock_discovery *dp;
+ dav_lock_discovery *dp_scan;
+ dav_lock_indirect *ip;
+ int dirty = 0;
+ dav_lock *newlock;
+
+ *locks = NULL;
+
+ key = dav_fs_build_key(lockdb->info->pool, resource);
+ if ((err = dav_fs_load_lock_record(lockdb, key, DAV_CREATE_LIST,
+ &dp, &ip)) != NULL) {
+ /* ### maybe add in a higher-level description */
+ return err;
+ }
+
+ /* ### we should be refreshing direct AND (resolved) indirect locks! */
+
+ /* refresh all of the direct locks on this resource */
+ for (dp_scan = dp; dp_scan != NULL; dp_scan = dp_scan->next) {
+ if (dav_fs_do_refresh(dp_scan, ltl, new_time)) {
+ /* the lock was refreshed. return the lock. */
+ newlock = dav_fs_alloc_lock(lockdb, key, dp_scan->locktoken);
+ newlock->is_locknull = !resource->exists;
+ newlock->scope = dp_scan->f.scope;
+ newlock->type = dp_scan->f.type;
+ newlock->depth = dp_scan->f.depth;
+ newlock->timeout = dp_scan->f.timeout;
+ newlock->owner = dp_scan->owner;
+ newlock->auth_user = dp_scan->auth_user;
+
+ newlock->next = *locks;
+ *locks = newlock;
+
+ dirty = 1;
+ }
+ }
+
+ /* if we refreshed any locks, then save them back. */
+ if (dirty
+ && (err = dav_fs_save_lock_record(lockdb, key, dp, ip)) != NULL) {
+ /* ### maybe add in a higher-level description */
+ return err;
+ }
+
+ /* for each indirect lock, find its direct lock and refresh it. */
+ for (; ip != NULL; ip = ip->next) {
+ dav_lock_discovery *ref_dp;
+ dav_lock_indirect *ref_ip;
+
+ if ((err = dav_fs_resolve(lockdb, ip, &dp_scan,
+ &ref_dp, &ref_ip)) != NULL) {
+ /* ### push a higher-level desc? */
+ return err;
+ }
+ if (dav_fs_do_refresh(dp_scan, ltl, new_time)) {
+ /* the lock was refreshed. return the lock. */
+ newlock = dav_fs_alloc_lock(lockdb, ip->key, dp_scan->locktoken);
+ newlock->is_locknull = !resource->exists;
+ newlock->scope = dp_scan->f.scope;
+ newlock->type = dp_scan->f.type;
+ newlock->depth = dp_scan->f.depth;
+ newlock->timeout = dp_scan->f.timeout;
+ newlock->owner = dp_scan->owner;
+ newlock->auth_user = dp_scan->auth_user;
+
+ newlock->next = *locks;
+ *locks = newlock;
+
+ /* save the (resolved) direct lock back */
+ if ((err = dav_fs_save_lock_record(lockdb, ip->key, ref_dp,
+ ref_ip)) != NULL) {
+ /* ### push a higher-level desc? */
+ return err;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+
+const dav_hooks_locks dav_hooks_locks_fs =
+{
+ dav_fs_get_supportedlock,
+ dav_fs_parse_locktoken,
+ dav_fs_format_locktoken,
+ dav_fs_compare_locktoken,
+ dav_fs_open_lockdb,
+ dav_fs_close_lockdb,
+ dav_fs_remove_locknull_state,
+ dav_fs_create_lock,
+ dav_fs_get_locks,
+ dav_fs_find_lock,
+ dav_fs_has_locks,
+ dav_fs_append_locks,
+ dav_fs_remove_lock,
+ dav_fs_refresh_locks,
+ NULL, /* lookup_resource */
+
+ NULL /* ctx */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/fs/mod_dav_fs.c b/rubbos/app/httpd-2.0.64/modules/dav/fs/mod_dav_fs.c
new file mode 100644
index 00000000..dfd190b3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/fs/mod_dav_fs.c
@@ -0,0 +1,108 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "apr_strings.h"
+
+#include "mod_dav.h"
+#include "repos.h"
+
+/* per-server configuration */
+typedef struct {
+ const char *lockdb_path;
+
+} dav_fs_server_conf;
+
+extern module AP_MODULE_DECLARE_DATA dav_fs_module;
+
+const char *dav_get_lockdb_path(const request_rec *r)
+{
+ dav_fs_server_conf *conf;
+
+ conf = ap_get_module_config(r->server->module_config, &dav_fs_module);
+ return conf->lockdb_path;
+}
+
+static void *dav_fs_create_server_config(apr_pool_t *p, server_rec *s)
+{
+ return apr_pcalloc(p, sizeof(dav_fs_server_conf));
+}
+
+static void *dav_fs_merge_server_config(apr_pool_t *p,
+ void *base, void *overrides)
+{
+ dav_fs_server_conf *parent = base;
+ dav_fs_server_conf *child = overrides;
+ dav_fs_server_conf *newconf;
+
+ newconf = apr_pcalloc(p, sizeof(*newconf));
+
+ newconf->lockdb_path =
+ child->lockdb_path ? child->lockdb_path : parent->lockdb_path;
+
+ return newconf;
+}
+
+/*
+ * Command handler for the DAVLockDB directive, which is TAKE1
+ */
+static const char *dav_fs_cmd_davlockdb(cmd_parms *cmd, void *config,
+ const char *arg1)
+{
+ dav_fs_server_conf *conf;
+ conf = ap_get_module_config(cmd->server->module_config,
+ &dav_fs_module);
+ conf->lockdb_path = ap_server_root_relative(cmd->pool, arg1);
+
+ if (!conf->lockdb_path) {
+ return apr_pstrcat(cmd->pool, "Invalid DAVLockDB path ",
+ arg1, NULL);
+ }
+
+ return NULL;
+}
+
+static const command_rec dav_fs_cmds[] =
+{
+ /* per server */
+ AP_INIT_TAKE1("DAVLockDB", dav_fs_cmd_davlockdb, NULL, RSRC_CONF,
+ "specify a lock database"),
+
+ { NULL }
+};
+
+static void register_hooks(apr_pool_t *p)
+{
+ dav_hook_gather_propsets(dav_fs_gather_propsets, NULL, NULL,
+ APR_HOOK_MIDDLE);
+ dav_hook_find_liveprop(dav_fs_find_liveprop, NULL, NULL, APR_HOOK_MIDDLE);
+ dav_hook_insert_all_liveprops(dav_fs_insert_all_liveprops, NULL, NULL,
+ APR_HOOK_MIDDLE);
+
+ dav_fs_register(p);
+}
+
+module AP_MODULE_DECLARE_DATA dav_fs_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ dav_fs_create_server_config, /* server config */
+ dav_fs_merge_server_config, /* merge server config */
+ dav_fs_cmds, /* command table */
+ register_hooks, /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/fs/mod_dav_fs.dsp b/rubbos/app/httpd-2.0.64/modules/dav/fs/mod_dav_fs.dsp
new file mode 100644
index 00000000..5c2239c9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/fs/mod_dav_fs.dsp
@@ -0,0 +1,152 @@
+# Microsoft Developer Studio Project File - Name="mod_dav_fs" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_dav_fs - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_dav_fs.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_dav_fs.mak" CFG="mod_dav_fs - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_dav_fs - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_dav_fs - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_dav_fs - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../../include" /I "../../../srclib/apr/include" /I "../../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_dav_fs_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /out:"Release/mod_dav_fs.so" /base:@..\..\..\os\win32\BaseAddr.ref,mod_dav_fs.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_dav_fs.so" /base:@..\..\..\os\win32\BaseAddr.ref,mod_dav_fs.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_dav_fs - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../../include" /I "../../../srclib/apr/include" /I "../../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_dav_fs_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_dav_fs.so" /base:@..\..\..\os\win32\BaseAddr.ref,mod_dav_fs.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_dav_fs.so" /base:@..\..\..\os\win32\BaseAddr.ref,mod_dav_fs.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_dav_fs - Win32 Release"
+# Name "mod_dav_fs - Win32 Debug"
+# Begin Group "Source Files"
+
+# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90"
+# Begin Source File
+
+SOURCE=.\dbm.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\lock.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_dav_fs.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\repos.c
+# End Source File
+# End Group
+# Begin Group "Header Files"
+
+# PROP Default_Filter "h;hpp;hxx;hm;inl;fi;fd"
+# Begin Source File
+
+SOURCE=.\repos.h
+# End Source File
+# End Group
+# Begin Source File
+
+SOURCE=.\mod_dav_fs.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_dav_fs - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\..\build\win32\win32ver.awk
+
+".\mod_dav_fs.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../../build/win32/win32ver.awk mod_dav_fs.so "dav_fs_module for Apache" ../../../include/ap_release.h > .\mod_dav_fs.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_dav_fs - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\..\build\win32\win32ver.awk
+
+".\mod_dav_fs.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../../build/win32/win32ver.awk mod_dav_fs.so "dav_fs_module for Apache" ../../../include/ap_release.h > .\mod_dav_fs.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/fs/modules.mk b/rubbos/app/httpd-2.0.64/modules/dav/fs/modules.mk
new file mode 100644
index 00000000..ceb52a1b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/fs/modules.mk
@@ -0,0 +1,3 @@
+DISTCLEAN_TARGETS = modules.mk
+static =
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/fs/repos.c b/rubbos/app/httpd-2.0.64/modules/dav/fs/repos.c
new file mode 100644
index 00000000..bf8da8d0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/fs/repos.c
@@ -0,0 +1,2130 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+** DAV filesystem-based repository provider
+*/
+
+#include "apr.h"
+#include "apr_file_io.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+
+#if APR_HAVE_STDIO_H
+#include <stdio.h> /* for sprintf() */
+#endif
+
+#include "httpd.h"
+#include "http_log.h"
+#include "http_protocol.h" /* for ap_set_* (in dav_fs_set_headers) */
+#include "http_request.h" /* for ap_update_mtime() */
+
+#include "mod_dav.h"
+#include "repos.h"
+
+
+/* to assist in debugging mod_dav's GET handling */
+#define DEBUG_GET_HANDLER 0
+
+#define DAV_FS_COPY_BLOCKSIZE 16384 /* copy 16k at a time */
+
+/* context needed to identify a resource */
+struct dav_resource_private {
+ apr_pool_t *pool; /* memory storage pool associated with request */
+ const char *pathname; /* full pathname to resource */
+ apr_finfo_t finfo; /* filesystem info */
+};
+
+/* private context for doing a filesystem walk */
+typedef struct {
+ /* the input walk parameters */
+ const dav_walk_params *params;
+
+ /* reused as we walk */
+ dav_walk_resource wres;
+
+ dav_resource res1;
+ dav_resource_private info1;
+ dav_buffer path1;
+ dav_buffer uri_buf;
+
+ /* MOVE/COPY need a secondary path */
+ dav_resource res2;
+ dav_resource_private info2;
+ dav_buffer path2;
+
+ dav_buffer locknull_buf;
+
+} dav_fs_walker_context;
+
+typedef struct {
+ int is_move; /* is this a MOVE? */
+ dav_buffer work_buf; /* handy buffer for copymove_file() */
+
+ /* CALLBACK: this is a secondary resource managed specially for us */
+ const dav_resource *res_dst;
+
+ /* copied from dav_walk_params (they are invariant across the walk) */
+ const dav_resource *root;
+ apr_pool_t *pool;
+
+} dav_fs_copymove_walk_ctx;
+
+/* an internal WALKTYPE to walk hidden files (the .DAV directory) */
+#define DAV_WALKTYPE_HIDDEN 0x4000
+
+/* an internal WALKTYPE to call collections (again) after their contents */
+#define DAV_WALKTYPE_POSTFIX 0x8000
+
+#define DAV_CALLTYPE_POSTFIX 1000 /* a private call type */
+
+
+/* pull this in from the other source file */
+extern const dav_hooks_locks dav_hooks_locks_fs;
+
+/* forward-declare the hook structures */
+static const dav_hooks_repository dav_hooks_repository_fs;
+static const dav_hooks_liveprop dav_hooks_liveprop_fs;
+
+/*
+** The namespace URIs that we use. This list and the enumeration must
+** stay in sync.
+*/
+static const char * const dav_fs_namespace_uris[] =
+{
+ "DAV:",
+ "http://apache.org/dav/props/",
+
+ NULL /* sentinel */
+};
+enum {
+ DAV_FS_URI_DAV, /* the DAV: namespace URI */
+ DAV_FS_URI_MYPROPS /* the namespace URI for our custom props */
+};
+
+/*
+** Does this platform support an executable flag?
+**
+** ### need a way to portably abstract this query
+*/
+#ifndef WIN32
+#define DAV_FS_HAS_EXECUTABLE
+#endif
+
+/*
+** The single property that we define (in the DAV_FS_URI_MYPROPS namespace)
+*/
+#define DAV_PROPID_FS_executable 1
+
+static const dav_liveprop_spec dav_fs_props[] =
+{
+ /* standard DAV properties */
+ {
+ DAV_FS_URI_DAV,
+ "creationdate",
+ DAV_PROPID_creationdate,
+ 0
+ },
+ {
+ DAV_FS_URI_DAV,
+ "getcontentlength",
+ DAV_PROPID_getcontentlength,
+ 0
+ },
+ {
+ DAV_FS_URI_DAV,
+ "getetag",
+ DAV_PROPID_getetag,
+ 0
+ },
+ {
+ DAV_FS_URI_DAV,
+ "getlastmodified",
+ DAV_PROPID_getlastmodified,
+ 0
+ },
+
+ /* our custom properties */
+ {
+ DAV_FS_URI_MYPROPS,
+ "executable",
+ DAV_PROPID_FS_executable,
+ 0 /* handled special in dav_fs_is_writable */
+ },
+
+ { 0 } /* sentinel */
+};
+
+static const dav_liveprop_group dav_fs_liveprop_group =
+{
+ dav_fs_props,
+ dav_fs_namespace_uris,
+ &dav_hooks_liveprop_fs
+};
+
+
+/* define the dav_stream structure for our use */
+struct dav_stream {
+ apr_pool_t *p;
+ apr_file_t *f;
+ const char *pathname; /* we may need to remove it at close time */
+};
+
+/* returns an appropriate HTTP status code given an APR status code for a
+ * failed I/O operation. ### use something besides 500? */
+#define MAP_IO2HTTP(e) (APR_STATUS_IS_ENOSPC(e) ? HTTP_INSUFFICIENT_STORAGE : \
+ HTTP_INTERNAL_SERVER_ERROR)
+
+/* forward declaration for internal treewalkers */
+static dav_error * dav_fs_walk(const dav_walk_params *params, int depth,
+ dav_response **response);
+static dav_error * dav_fs_internal_walk(const dav_walk_params *params,
+ int depth, int is_move,
+ const dav_resource *root_dst,
+ dav_response **response);
+
+/* --------------------------------------------------------------------
+**
+** PRIVATE REPOSITORY FUNCTIONS
+*/
+apr_pool_t *dav_fs_pool(const dav_resource *resource)
+{
+ return resource->info->pool;
+}
+
+const char *dav_fs_pathname(const dav_resource *resource)
+{
+ return resource->info->pathname;
+}
+
+dav_error * dav_fs_dir_file_name(
+ const dav_resource *resource,
+ const char **dirpath_p,
+ const char **fname_p)
+{
+ dav_resource_private *ctx = resource->info;
+
+ if (resource->collection) {
+ *dirpath_p = ctx->pathname;
+ if (fname_p != NULL)
+ *fname_p = NULL;
+ }
+ else {
+ const char *testpath, *rootpath;
+ char *dirpath = ap_make_dirstr_parent(ctx->pool, ctx->pathname);
+ apr_size_t dirlen = strlen(dirpath);
+ apr_status_t rv = APR_SUCCESS;
+
+ testpath = dirpath;
+ if (dirlen > 0) {
+ rv = apr_filepath_root(&rootpath, &testpath, 0, ctx->pool);
+ }
+
+ /* remove trailing slash from dirpath, unless it's a root path
+ */
+ if ((rv == APR_SUCCESS && testpath && *testpath)
+ || rv == APR_ERELATIVE) {
+ if (dirpath[dirlen - 1] == '/') {
+ dirpath[dirlen - 1] = '\0';
+ }
+ }
+
+ /* ###: Looks like a response could be appropriate
+ *
+ * APR_SUCCESS here tells us the dir is a root
+ * APR_ERELATIVE told us we had no root (ok)
+ * APR_EINCOMPLETE an incomplete testpath told us
+ * there was no -file- name here!
+ * APR_EBADPATH or other errors tell us this file
+ * path is undecipherable
+ */
+
+ if (rv == APR_SUCCESS || rv == APR_ERELATIVE) {
+ *dirpath_p = dirpath;
+ if (fname_p != NULL)
+ *fname_p = ctx->pathname + dirlen;
+ }
+ else {
+ return dav_new_error(ctx->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "An incomplete/bad path was found in "
+ "dav_fs_dir_file_name.");
+ }
+ }
+
+ return NULL;
+}
+
+/* Note: picked up from ap_gm_timestr_822() */
+/* NOTE: buf must be at least DAV_TIMEBUF_SIZE chars in size */
+static void dav_format_time(int style, apr_time_t sec, char *buf)
+{
+ apr_time_exp_t tms;
+
+ /* ### what to do if fails? */
+ (void) apr_time_exp_gmt(&tms, sec);
+
+ if (style == DAV_STYLE_ISO8601) {
+ /* ### should we use "-00:00" instead of "Z" ?? */
+
+ /* 20 chars plus null term */
+ sprintf(buf, "%.4d-%.2d-%.2dT%.2d:%.2d:%.2dZ",
+ tms.tm_year + 1900, tms.tm_mon + 1, tms.tm_mday,
+ tms.tm_hour, tms.tm_min, tms.tm_sec);
+ return;
+ }
+
+ /* RFC 822 date format; as strftime '%a, %d %b %Y %T GMT' */
+
+ /* 29 chars plus null term */
+ sprintf(buf,
+ "%s, %.2d %s %d %.2d:%.2d:%.2d GMT",
+ apr_day_snames[tms.tm_wday],
+ tms.tm_mday, apr_month_snames[tms.tm_mon],
+ tms.tm_year + 1900,
+ tms.tm_hour, tms.tm_min, tms.tm_sec);
+}
+
+static dav_error * dav_fs_copymove_file(
+ int is_move,
+ apr_pool_t * p,
+ const char *src,
+ const char *dst,
+ dav_buffer *pbuf)
+{
+ dav_buffer work_buf = { 0 };
+ apr_file_t *inf = NULL;
+ apr_file_t *outf = NULL;
+ apr_status_t status;
+
+ if (pbuf == NULL)
+ pbuf = &work_buf;
+
+ dav_set_bufsize(p, pbuf, DAV_FS_COPY_BLOCKSIZE);
+
+ if ((apr_file_open(&inf, src, APR_READ | APR_BINARY, APR_OS_DEFAULT, p))
+ != APR_SUCCESS) {
+ /* ### use something besides 500? */
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not open file for reading");
+ }
+
+ /* ### do we need to deal with the umask? */
+ status = apr_file_open(&outf, dst, APR_WRITE | APR_CREATE | APR_TRUNCATE
+ | APR_BINARY, APR_OS_DEFAULT, p);
+ if (status != APR_SUCCESS) {
+ apr_file_close(inf);
+
+ return dav_new_error(p, MAP_IO2HTTP(status), 0,
+ "Could not open file for writing");
+ }
+
+ while (1) {
+ apr_size_t len = DAV_FS_COPY_BLOCKSIZE;
+
+ status = apr_file_read(inf, pbuf->buf, &len);
+ if (status != APR_SUCCESS && status != APR_EOF) {
+ apr_file_close(inf);
+ apr_file_close(outf);
+
+ if (apr_file_remove(dst, p) != APR_SUCCESS) {
+ /* ### ACK! Inconsistent state... */
+
+ /* ### use something besides 500? */
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not delete output after read "
+ "failure. Server is now in an "
+ "inconsistent state.");
+ }
+
+ /* ### use something besides 500? */
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not read input file");
+ }
+
+ if (status == APR_EOF)
+ break;
+
+ /* write any bytes that were read */
+ status = apr_file_write_full(outf, pbuf->buf, len, NULL);
+ if (status != APR_SUCCESS) {
+ apr_file_close(inf);
+ apr_file_close(outf);
+
+ if (apr_file_remove(dst, p) != APR_SUCCESS) {
+ /* ### ACK! Inconsistent state... */
+
+ /* ### use something besides 500? */
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not delete output after write "
+ "failure. Server is now in an "
+ "inconsistent state.");
+ }
+
+ return dav_new_error(p, MAP_IO2HTTP(status), 0,
+ "Could not write output file");
+ }
+ }
+
+ apr_file_close(inf);
+ apr_file_close(outf);
+
+ if (is_move && apr_file_remove(src, p) != APR_SUCCESS) {
+ dav_error *err;
+ int save_errno = errno; /* save the errno that got us here */
+
+ if (apr_file_remove(dst, p) != APR_SUCCESS) {
+ /* ### ACK. this creates an inconsistency. do more!? */
+
+ /* ### use something besides 500? */
+ /* Note that we use the latest errno */
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not remove source or destination "
+ "file. Server is now in an inconsistent "
+ "state.");
+ }
+
+ /* ### use something besides 500? */
+ err = dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not remove source file after move. "
+ "Destination was removed to ensure consistency.");
+ err->save_errno = save_errno;
+ return err;
+ }
+
+ return NULL;
+}
+
+/* copy/move a file from within a state dir to another state dir */
+/* ### need more buffers to replace the pool argument */
+static dav_error * dav_fs_copymove_state(
+ int is_move,
+ apr_pool_t * p,
+ const char *src_dir, const char *src_file,
+ const char *dst_dir, const char *dst_file,
+ dav_buffer *pbuf)
+{
+ apr_finfo_t src_finfo; /* finfo for source file */
+ apr_finfo_t dst_state_finfo; /* finfo for STATE directory */
+ apr_status_t rv;
+ const char *src;
+ const char *dst;
+
+ /* build the propset pathname for the source file */
+ src = apr_pstrcat(p, src_dir, "/" DAV_FS_STATE_DIR "/", src_file, NULL);
+
+ /* the source file doesn't exist */
+ rv = apr_stat(&src_finfo, src, APR_FINFO_NORM, p);
+ if (rv != APR_SUCCESS && rv != APR_INCOMPLETE) {
+ return NULL;
+ }
+
+ /* build the pathname for the destination state dir */
+ dst = apr_pstrcat(p, dst_dir, "/" DAV_FS_STATE_DIR, NULL);
+
+ /* ### do we need to deal with the umask? */
+
+ /* ensure that it exists */
+ rv = apr_dir_make(dst, APR_OS_DEFAULT, p);
+ if (rv != APR_SUCCESS) {
+ if (!APR_STATUS_IS_EEXIST(rv)) {
+ /* ### use something besides 500? */
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not create internal state directory");
+ }
+ }
+
+ /* get info about the state directory */
+ rv = apr_stat(&dst_state_finfo, dst, APR_FINFO_NORM, p);
+ if (rv != APR_SUCCESS && rv != APR_INCOMPLETE) {
+ /* Ack! Where'd it go? */
+ /* ### use something besides 500? */
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "State directory disappeared");
+ }
+
+ /* The mkdir() may have failed because a *file* exists there already */
+ if (dst_state_finfo.filetype != APR_DIR) {
+ /* ### try to recover by deleting this file? (and mkdir again) */
+ /* ### use something besides 500? */
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "State directory is actually a file");
+ }
+
+ /* append the target file to the state directory pathname */
+ dst = apr_pstrcat(p, dst, "/", dst_file, NULL);
+
+ /* copy/move the file now */
+ if (is_move && src_finfo.device == dst_state_finfo.device) {
+ /* simple rename is possible since it is on the same device */
+ if (apr_file_rename(src, dst, p) != APR_SUCCESS) {
+ /* ### use something besides 500? */
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not move state file.");
+ }
+ }
+ else
+ {
+ /* gotta copy (and delete) */
+ return dav_fs_copymove_file(is_move, p, src, dst, pbuf);
+ }
+
+ return NULL;
+}
+
+static dav_error *dav_fs_copymoveset(int is_move, apr_pool_t *p,
+ const dav_resource *src,
+ const dav_resource *dst,
+ dav_buffer *pbuf)
+{
+ const char *src_dir;
+ const char *src_file;
+ const char *src_state1;
+ const char *src_state2;
+ const char *dst_dir;
+ const char *dst_file;
+ const char *dst_state1;
+ const char *dst_state2;
+ dav_error *err;
+
+ /* Get directory and filename for resources */
+ /* ### should test these result values... */
+ (void) dav_fs_dir_file_name(src, &src_dir, &src_file);
+ (void) dav_fs_dir_file_name(dst, &dst_dir, &dst_file);
+
+ /* Get the corresponding state files for each resource */
+ dav_dbm_get_statefiles(p, src_file, &src_state1, &src_state2);
+ dav_dbm_get_statefiles(p, dst_file, &dst_state1, &dst_state2);
+#if DAV_DEBUG
+ if ((src_state2 != NULL && dst_state2 == NULL) ||
+ (src_state2 == NULL && dst_state2 != NULL)) {
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "DESIGN ERROR: dav_dbm_get_statefiles() "
+ "returned inconsistent results.");
+ }
+#endif
+
+ err = dav_fs_copymove_state(is_move, p,
+ src_dir, src_state1,
+ dst_dir, dst_state1,
+ pbuf);
+
+ if (err == NULL && src_state2 != NULL) {
+ err = dav_fs_copymove_state(is_move, p,
+ src_dir, src_state2,
+ dst_dir, dst_state2,
+ pbuf);
+
+ if (err != NULL) {
+ /* ### CRAP. inconsistency. */
+ /* ### should perform some cleanup at the target if we still
+ ### have the original files */
+
+ /* Change the error to reflect the bad server state. */
+ err->status = HTTP_INTERNAL_SERVER_ERROR;
+ err->desc =
+ "Could not fully copy/move the properties. "
+ "The server is now in an inconsistent state.";
+ }
+ }
+
+ return err;
+}
+
+static dav_error *dav_fs_deleteset(apr_pool_t *p, const dav_resource *resource)
+{
+ const char *dirpath;
+ const char *fname;
+ const char *state1;
+ const char *state2;
+ const char *pathname;
+ apr_status_t status;
+
+ /* Get directory, filename, and state-file names for the resource */
+ /* ### should test this result value... */
+ (void) dav_fs_dir_file_name(resource, &dirpath, &fname);
+ dav_dbm_get_statefiles(p, fname, &state1, &state2);
+
+ /* build the propset pathname for the file */
+ pathname = apr_pstrcat(p,
+ dirpath,
+ "/" DAV_FS_STATE_DIR "/",
+ state1,
+ NULL);
+
+ /* note: we may get ENOENT if the state dir is not present */
+ if ((status = apr_file_remove(pathname, p)) != APR_SUCCESS
+ && !APR_STATUS_IS_ENOENT(status)) {
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not remove properties.");
+ }
+
+ if (state2 != NULL) {
+ /* build the propset pathname for the file */
+ pathname = apr_pstrcat(p,
+ dirpath,
+ "/" DAV_FS_STATE_DIR "/",
+ state2,
+ NULL);
+
+ if ((status = apr_file_remove(pathname, p)) != APR_SUCCESS
+ && !APR_STATUS_IS_ENOENT(status)) {
+ /* ### CRAP. only removed half. */
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not fully remove properties. "
+ "The server is now in an inconsistent "
+ "state.");
+ }
+ }
+
+ return NULL;
+}
+
+/* --------------------------------------------------------------------
+**
+** REPOSITORY HOOK FUNCTIONS
+*/
+
+static dav_error * dav_fs_get_resource(
+ request_rec *r,
+ const char *root_dir,
+ const char *label,
+ int use_checked_in,
+ dav_resource **result_resource)
+{
+ dav_resource_private *ctx;
+ dav_resource *resource;
+ char *s;
+ char *filename;
+ apr_size_t len;
+
+ /* ### optimize this into a single allocation! */
+
+ /* Create private resource context descriptor */
+ ctx = apr_pcalloc(r->pool, sizeof(*ctx));
+ ctx->finfo = r->finfo;
+
+ /* ### this should go away */
+ ctx->pool = r->pool;
+
+ /* Preserve case on OSes which fold canonical filenames */
+#if 0
+ /* ### not available in Apache 2.0 yet */
+ filename = r->case_preserved_filename;
+#else
+ filename = r->filename;
+#endif
+
+ /*
+ ** If there is anything in the path_info, then this indicates that the
+ ** entire path was not used to specify the file/dir. We want to append
+ ** it onto the filename so that we get a "valid" pathname for null
+ ** resources.
+ */
+ s = apr_pstrcat(r->pool, filename, r->path_info, NULL);
+
+ /* make sure the pathname does not have a trailing "/" */
+ len = strlen(s);
+ if (len > 1 && s[len - 1] == '/') {
+ s[len - 1] = '\0';
+ }
+ ctx->pathname = s;
+
+ /* Create resource descriptor */
+ resource = apr_pcalloc(r->pool, sizeof(*resource));
+ resource->type = DAV_RESOURCE_TYPE_REGULAR;
+ resource->info = ctx;
+ resource->hooks = &dav_hooks_repository_fs;
+ resource->pool = r->pool;
+
+ /* make sure the URI does not have a trailing "/" */
+ len = strlen(r->uri);
+ if (len > 1 && r->uri[len - 1] == '/') {
+ s = apr_pstrdup(r->pool, r->uri);
+ s[len - 1] = '\0';
+ resource->uri = s;
+ }
+ else {
+ resource->uri = r->uri;
+ }
+
+ if (r->finfo.filetype != 0) {
+ resource->exists = 1;
+ resource->collection = r->finfo.filetype == APR_DIR;
+
+ /* unused info in the URL will indicate a null resource */
+
+ if (r->path_info != NULL && *r->path_info != '\0') {
+ if (resource->collection) {
+ /* only a trailing "/" is allowed */
+ if (*r->path_info != '/' || r->path_info[1] != '\0') {
+
+ /*
+ ** This URL/filename represents a locknull resource or
+ ** possibly a destination of a MOVE/COPY
+ */
+ resource->exists = 0;
+ resource->collection = 0;
+ }
+ }
+ else
+ {
+ /*
+ ** The base of the path refers to a file -- nothing should
+ ** be in path_info. The resource is simply an error: it
+ ** can't be a null or a locknull resource.
+ */
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST, 0,
+ "The URL contains extraneous path "
+ "components. The resource could not "
+ "be identified.");
+ }
+
+ /* retain proper integrity across the structures */
+ if (!resource->exists) {
+ ctx->finfo.filetype = 0;
+ }
+ }
+ }
+
+ *result_resource = resource;
+ return NULL;
+}
+
+static dav_error * dav_fs_get_parent_resource(const dav_resource *resource,
+ dav_resource **result_parent)
+{
+ dav_resource_private *ctx = resource->info;
+ dav_resource_private *parent_ctx;
+ dav_resource *parent_resource;
+ apr_status_t rv;
+ char *dirpath;
+ const char *testroot;
+ const char *testpath;
+
+ /* If we're at the root of the URL space, then there is no parent. */
+ if (strcmp(resource->uri, "/") == 0) {
+ *result_parent = NULL;
+ return NULL;
+ }
+
+ /* If given resource is root, then there is no parent.
+ * Unless we can retrieve the filepath root, this is
+ * intendend to fail. If we split the root and
+ * no path info remains, then we also fail.
+ */
+ testpath = ctx->pathname;
+ rv = apr_filepath_root(&testroot, &testpath, 0, ctx->pool);
+ if ((rv != APR_SUCCESS && rv != APR_ERELATIVE)
+ || !testpath || !*testpath) {
+ *result_parent = NULL;
+ return NULL;
+ }
+
+ /* ### optimize this into a single allocation! */
+
+ /* Create private resource context descriptor */
+ parent_ctx = apr_pcalloc(ctx->pool, sizeof(*parent_ctx));
+
+ /* ### this should go away */
+ parent_ctx->pool = ctx->pool;
+
+ dirpath = ap_make_dirstr_parent(ctx->pool, ctx->pathname);
+ if (strlen(dirpath) > 1 && dirpath[strlen(dirpath) - 1] == '/')
+ dirpath[strlen(dirpath) - 1] = '\0';
+ parent_ctx->pathname = dirpath;
+
+ parent_resource = apr_pcalloc(ctx->pool, sizeof(*parent_resource));
+ parent_resource->info = parent_ctx;
+ parent_resource->collection = 1;
+ parent_resource->hooks = &dav_hooks_repository_fs;
+ parent_resource->pool = resource->pool;
+
+ if (resource->uri != NULL) {
+ char *uri = ap_make_dirstr_parent(ctx->pool, resource->uri);
+ if (strlen(uri) > 1 && uri[strlen(uri) - 1] == '/')
+ uri[strlen(uri) - 1] = '\0';
+ parent_resource->uri = uri;
+ }
+
+ rv = apr_stat(&parent_ctx->finfo, parent_ctx->pathname,
+ APR_FINFO_NORM, ctx->pool);
+ if (rv == APR_SUCCESS || rv == APR_INCOMPLETE) {
+ parent_resource->exists = 1;
+ }
+
+ *result_parent = parent_resource;
+ return NULL;
+}
+
+static int dav_fs_is_same_resource(
+ const dav_resource *res1,
+ const dav_resource *res2)
+{
+ dav_resource_private *ctx1 = res1->info;
+ dav_resource_private *ctx2 = res2->info;
+
+ if (res1->hooks != res2->hooks)
+ return 0;
+
+ if ((ctx1->finfo.filetype != 0) && (ctx2->finfo.filetype != 0)
+ && (ctx1->finfo.valid & ctx2->finfo.valid & APR_FINFO_INODE)) {
+ return ctx1->finfo.inode == ctx2->finfo.inode;
+ }
+ else {
+ return strcmp(ctx1->pathname, ctx2->pathname) == 0;
+ }
+}
+
+static int dav_fs_is_parent_resource(
+ const dav_resource *res1,
+ const dav_resource *res2)
+{
+ dav_resource_private *ctx1 = res1->info;
+ dav_resource_private *ctx2 = res2->info;
+ apr_size_t len1 = strlen(ctx1->pathname);
+ apr_size_t len2;
+
+ if (res1->hooks != res2->hooks)
+ return 0;
+
+ /* it is safe to use ctx2 now */
+ len2 = strlen(ctx2->pathname);
+
+ return (len2 > len1
+ && memcmp(ctx1->pathname, ctx2->pathname, len1) == 0
+ && ctx2->pathname[len1] == '/');
+}
+
+static dav_error * dav_fs_open_stream(const dav_resource *resource,
+ dav_stream_mode mode,
+ dav_stream **stream)
+{
+ apr_pool_t *p = resource->info->pool;
+ dav_stream *ds = apr_pcalloc(p, sizeof(*ds));
+ apr_int32_t flags;
+ apr_status_t rv;
+
+ switch (mode) {
+ default:
+ flags = APR_READ | APR_BINARY;
+ break;
+
+ case DAV_MODE_WRITE_TRUNC:
+ flags = APR_WRITE | APR_CREATE | APR_TRUNCATE | APR_BINARY;
+ break;
+ case DAV_MODE_WRITE_SEEKABLE:
+ flags = APR_WRITE | APR_CREATE | APR_BINARY;
+ break;
+ }
+
+ ds->p = p;
+ ds->pathname = resource->info->pathname;
+ rv = apr_file_open(&ds->f, ds->pathname, flags, APR_OS_DEFAULT, ds->p);
+ if (rv != APR_SUCCESS) {
+ return dav_new_error(p, MAP_IO2HTTP(rv), 0,
+ "An error occurred while opening a resource.");
+ }
+
+ /* (APR registers cleanups for the fd with the pool) */
+
+ *stream = ds;
+ return NULL;
+}
+
+static dav_error * dav_fs_close_stream(dav_stream *stream, int commit)
+{
+ apr_file_close(stream->f);
+
+ if (!commit) {
+ if (apr_file_remove(stream->pathname, stream->p) != APR_SUCCESS) {
+ /* ### use a better description? */
+ return dav_new_error(stream->p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "There was a problem removing (rolling "
+ "back) the resource "
+ "when it was being closed.");
+ }
+ }
+
+ return NULL;
+}
+
+static dav_error * dav_fs_write_stream(dav_stream *stream,
+ const void *buf, apr_size_t bufsize)
+{
+ apr_status_t status;
+
+ status = apr_file_write_full(stream->f, buf, bufsize, NULL);
+ if (APR_STATUS_IS_ENOSPC(status)) {
+ return dav_new_error(stream->p, HTTP_INSUFFICIENT_STORAGE, 0,
+ "There is not enough storage to write to "
+ "this resource.");
+ }
+ else if (status != APR_SUCCESS) {
+ /* ### use something besides 500? */
+ return dav_new_error(stream->p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "An error occurred while writing to a "
+ "resource.");
+ }
+ return NULL;
+}
+
+static dav_error * dav_fs_seek_stream(dav_stream *stream, apr_off_t abs_pos)
+{
+ if (apr_file_seek(stream->f, APR_SET, &abs_pos) != APR_SUCCESS) {
+ /* ### should check whether apr_file_seek set abs_pos was set to the
+ * correct position? */
+ /* ### use something besides 500? */
+ return dav_new_error(stream->p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not seek to specified position in the "
+ "resource.");
+ }
+ return NULL;
+}
+
+
+#if DEBUG_GET_HANDLER
+
+/* only define set_headers() and deliver() for debug purposes */
+
+
+static dav_error * dav_fs_set_headers(request_rec *r,
+ const dav_resource *resource)
+{
+ /* ### this function isn't really used since we have a get_pathname */
+ if (!resource->exists)
+ return NULL;
+
+ /* make sure the proper mtime is in the request record */
+ ap_update_mtime(r, resource->info->finfo.mtime);
+
+ /* ### note that these use r->filename rather than <resource> */
+ ap_set_last_modified(r);
+ ap_set_etag(r);
+
+ /* we accept byte-ranges */
+ apr_table_setn(r->headers_out, "Accept-Ranges", "bytes");
+
+ /* set up the Content-Length header */
+ ap_set_content_length(r, resource->info->finfo.size);
+
+ /* ### how to set the content type? */
+ /* ### until this is resolved, the Content-Type header is busted */
+
+ return NULL;
+}
+
+static dav_error * dav_fs_deliver(const dav_resource *resource,
+ ap_filter_t *output)
+{
+ apr_pool_t *pool = resource->pool;
+ apr_bucket_brigade *bb;
+ apr_file_t *fd;
+ apr_status_t status;
+ apr_bucket *bkt;
+
+ /* Check resource type */
+ if (resource->type != DAV_RESOURCE_TYPE_REGULAR
+ && resource->type != DAV_RESOURCE_TYPE_VERSION
+ && resource->type != DAV_RESOURCE_TYPE_WORKING) {
+ return dav_new_error(pool, HTTP_CONFLICT, 0,
+ "Cannot GET this type of resource.");
+ }
+ if (resource->collection) {
+ return dav_new_error(pool, HTTP_CONFLICT, 0,
+ "There is no default response to GET for a "
+ "collection.");
+ }
+
+ if ((status = apr_file_open(&fd, resource->info->pathname,
+ APR_READ | APR_BINARY, 0,
+ pool)) != APR_SUCCESS) {
+ return dav_new_error(pool, HTTP_FORBIDDEN, 0,
+ "File permissions deny server access.");
+ }
+
+ bb = apr_brigade_create(pool, output->c->bucket_alloc);
+
+ /* ### this does not handle large files. but this is test code anyway */
+ bkt = apr_bucket_file_create(fd, 0,
+ (apr_size_t)resource->info->finfo.size,
+ pool, output->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, bkt);
+
+ bkt = apr_bucket_eos_create(output->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, bkt);
+
+ if ((status = ap_pass_brigade(output, bb)) != APR_SUCCESS) {
+ return dav_new_error(pool, HTTP_FORBIDDEN, 0,
+ "Could not write contents to filter.");
+ }
+
+ return NULL;
+}
+
+#endif /* DEBUG_GET_HANDLER */
+
+
+static dav_error * dav_fs_create_collection(dav_resource *resource)
+{
+ dav_resource_private *ctx = resource->info;
+ apr_status_t status;
+
+ status = apr_dir_make(ctx->pathname, APR_OS_DEFAULT, ctx->pool);
+ if (APR_STATUS_IS_ENOSPC(status)) {
+ return dav_new_error(ctx->pool, HTTP_INSUFFICIENT_STORAGE, 0,
+ "There is not enough storage to create "
+ "this collection.");
+ }
+ else if (APR_STATUS_IS_ENOENT(status)) {
+ return dav_new_error(ctx->pool, HTTP_CONFLICT, 0,
+ "Cannot create collection; intermediate "
+ "collection does not exist.");
+ }
+ else if (status != APR_SUCCESS) {
+ /* ### refine this error message? */
+ return dav_new_error(ctx->pool, HTTP_FORBIDDEN, 0,
+ "Unable to create collection.");
+ }
+
+ /* update resource state to show it exists as a collection */
+ resource->exists = 1;
+ resource->collection = 1;
+
+ return NULL;
+}
+
+static dav_error * dav_fs_copymove_walker(dav_walk_resource *wres,
+ int calltype)
+{
+ dav_fs_copymove_walk_ctx *ctx = wres->walk_ctx;
+ dav_resource_private *srcinfo = wres->resource->info;
+ dav_resource_private *dstinfo = ctx->res_dst->info;
+ dav_error *err = NULL;
+
+ if (wres->resource->collection) {
+ if (calltype == DAV_CALLTYPE_POSTFIX) {
+ /* Postfix call for MOVE. delete the source dir.
+ * Note: when copying, we do not enable the postfix-traversal.
+ */
+ /* ### we are ignoring any error here; what should we do? */
+ (void) apr_dir_remove(srcinfo->pathname, ctx->pool);
+ }
+ else {
+ /* copy/move of a collection. Create the new, target collection */
+ if (apr_dir_make(dstinfo->pathname, APR_OS_DEFAULT,
+ ctx->pool) != APR_SUCCESS) {
+ /* ### assume it was a permissions problem */
+ /* ### need a description here */
+ err = dav_new_error(ctx->pool, HTTP_FORBIDDEN, 0, NULL);
+ }
+ }
+ }
+ else {
+ err = dav_fs_copymove_file(ctx->is_move, ctx->pool,
+ srcinfo->pathname, dstinfo->pathname,
+ &ctx->work_buf);
+ /* ### push a higher-level description? */
+ }
+
+ /*
+ ** If we have a "not so bad" error, then it might need to go into a
+ ** multistatus response.
+ **
+ ** For a MOVE, it will always go into the multistatus. It could be
+ ** that everything has been moved *except* for the root. Using a
+ ** multistatus (with no errors for the other resources) will signify
+ ** this condition.
+ **
+ ** For a COPY, we are traversing in a prefix fashion. If the root fails,
+ ** then we can just bail out now.
+ */
+ if (err != NULL
+ && !ap_is_HTTP_SERVER_ERROR(err->status)
+ && (ctx->is_move
+ || !dav_fs_is_same_resource(wres->resource, ctx->root))) {
+ /* ### use errno to generate DAV:responsedescription? */
+ dav_add_response(wres, err->status, NULL);
+
+ /* the error is in the multistatus now. do not stop the traversal. */
+ return NULL;
+ }
+
+ return err;
+}
+
+static dav_error *dav_fs_copymove_resource(
+ int is_move,
+ const dav_resource *src,
+ const dav_resource *dst,
+ int depth,
+ dav_response **response)
+{
+ dav_error *err = NULL;
+ dav_buffer work_buf = { 0 };
+
+ *response = NULL;
+
+ /* if a collection, recursively copy/move it and its children,
+ * including the state dirs
+ */
+ if (src->collection) {
+ dav_walk_params params = { 0 };
+ dav_response *multi_status;
+
+ params.walk_type = DAV_WALKTYPE_NORMAL | DAV_WALKTYPE_HIDDEN;
+ params.func = dav_fs_copymove_walker;
+ params.pool = src->info->pool;
+ params.root = src;
+
+ /* params.walk_ctx is managed by dav_fs_internal_walk() */
+
+ /* postfix is needed for MOVE to delete source dirs */
+ if (is_move)
+ params.walk_type |= DAV_WALKTYPE_POSTFIX;
+
+ /* note that we return the error OR the multistatus. never both */
+
+ if ((err = dav_fs_internal_walk(&params, depth, is_move, dst,
+ &multi_status)) != NULL) {
+ /* on a "real" error, then just punt. nothing else to do. */
+ return err;
+ }
+
+ if ((*response = multi_status) != NULL) {
+ /* some multistatus responses exist. wrap them in a 207 */
+ return dav_new_error(src->info->pool, HTTP_MULTI_STATUS, 0,
+ "Error(s) occurred on some resources during "
+ "the COPY/MOVE process.");
+ }
+
+ return NULL;
+ }
+
+ /* not a collection */
+ if ((err = dav_fs_copymove_file(is_move, src->info->pool,
+ src->info->pathname, dst->info->pathname,
+ &work_buf)) != NULL) {
+ /* ### push a higher-level description? */
+ return err;
+ }
+
+ /* copy/move properties as well */
+ return dav_fs_copymoveset(is_move, src->info->pool, src, dst, &work_buf);
+}
+
+static dav_error * dav_fs_copy_resource(
+ const dav_resource *src,
+ dav_resource *dst,
+ int depth,
+ dav_response **response)
+{
+ dav_error *err;
+
+#if DAV_DEBUG
+ if (src->hooks != dst->hooks) {
+ /*
+ ** ### strictly speaking, this is a design error; we should not
+ ** ### have reached this point.
+ */
+ return dav_new_error(src->info->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "DESIGN ERROR: a mix of repositories "
+ "was passed to copy_resource.");
+ }
+#endif
+
+ if ((err = dav_fs_copymove_resource(0, src, dst, depth,
+ response)) == NULL) {
+
+ /* update state of destination resource to show it exists */
+ dst->exists = 1;
+ dst->collection = src->collection;
+ }
+
+ return err;
+}
+
+static dav_error * dav_fs_move_resource(
+ dav_resource *src,
+ dav_resource *dst,
+ dav_response **response)
+{
+ dav_resource_private *srcinfo = src->info;
+ dav_resource_private *dstinfo = dst->info;
+ dav_error *err;
+ int can_rename = 0;
+
+#if DAV_DEBUG
+ if (src->hooks != dst->hooks) {
+ /*
+ ** ### strictly speaking, this is a design error; we should not
+ ** ### have reached this point.
+ */
+ return dav_new_error(src->info->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "DESIGN ERROR: a mix of repositories "
+ "was passed to move_resource.");
+ }
+#endif
+
+ /* determine whether a simple rename will work.
+ * Assume source exists, else we wouldn't get called.
+ */
+ if (dstinfo->finfo.filetype != 0) {
+ if (dstinfo->finfo.device == srcinfo->finfo.device) {
+ /* target exists and is on the same device. */
+ can_rename = 1;
+ }
+ }
+ else {
+ const char *dirpath;
+ apr_finfo_t finfo;
+ apr_status_t rv;
+
+ /* destination does not exist, but the parent directory should,
+ * so try it
+ */
+ dirpath = ap_make_dirstr_parent(dstinfo->pool, dstinfo->pathname);
+ /*
+ * XXX: If missing dev ... then what test?
+ * Really need a try and failover for those platforms.
+ *
+ */
+ rv = apr_stat(&finfo, dirpath, APR_FINFO_DEV, dstinfo->pool);
+ if ((rv == APR_SUCCESS || rv == APR_INCOMPLETE)
+ && (finfo.valid & srcinfo->finfo.valid & APR_FINFO_DEV)
+ && (finfo.device == srcinfo->finfo.device)) {
+ can_rename = 1;
+ }
+ }
+
+ /* if we can't simply rename, then do it the hard way... */
+ if (!can_rename) {
+ if ((err = dav_fs_copymove_resource(1, src, dst, DAV_INFINITY,
+ response)) == NULL) {
+ /* update resource states */
+ dst->exists = 1;
+ dst->collection = src->collection;
+ src->exists = 0;
+ src->collection = 0;
+ }
+
+ return err;
+ }
+
+ /* a rename should work. do it, and move properties as well */
+
+ /* no multistatus response */
+ *response = NULL;
+
+ /* ### APR has no rename? */
+ if (apr_file_rename(srcinfo->pathname, dstinfo->pathname,
+ srcinfo->pool) != APR_SUCCESS) {
+ /* ### should have a better error than this. */
+ return dav_new_error(srcinfo->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not rename resource.");
+ }
+
+ /* update resource states */
+ dst->exists = 1;
+ dst->collection = src->collection;
+ src->exists = 0;
+ src->collection = 0;
+
+ if ((err = dav_fs_copymoveset(1, src->info->pool,
+ src, dst, NULL)) == NULL) {
+ /* no error. we're done. go ahead and return now. */
+ return NULL;
+ }
+
+ /* error occurred during properties move; try to put resource back */
+ if (apr_file_rename(dstinfo->pathname, srcinfo->pathname,
+ srcinfo->pool) != APR_SUCCESS) {
+ /* couldn't put it back! */
+ return dav_push_error(srcinfo->pool,
+ HTTP_INTERNAL_SERVER_ERROR, 0,
+ "The resource was moved, but a failure "
+ "occurred during the move of its "
+ "properties. The resource could not be "
+ "restored to its original location. The "
+ "server is now in an inconsistent state.",
+ err);
+ }
+
+ /* update resource states again */
+ src->exists = 1;
+ src->collection = dst->collection;
+ dst->exists = 0;
+ dst->collection = 0;
+
+ /* resource moved back, but properties may be inconsistent */
+ return dav_push_error(srcinfo->pool,
+ HTTP_INTERNAL_SERVER_ERROR, 0,
+ "The resource was moved, but a failure "
+ "occurred during the move of its properties. "
+ "The resource was moved back to its original "
+ "location, but its properties may have been "
+ "partially moved. The server may be in an "
+ "inconsistent state.",
+ err);
+}
+
+static dav_error * dav_fs_delete_walker(dav_walk_resource *wres, int calltype)
+{
+ dav_resource_private *info = wres->resource->info;
+
+ /* do not attempt to remove a null resource,
+ * or a collection with children
+ */
+ if (wres->resource->exists &&
+ (!wres->resource->collection || calltype == DAV_CALLTYPE_POSTFIX)) {
+ /* try to remove the resource */
+ apr_status_t result;
+
+ result = wres->resource->collection
+ ? apr_dir_remove(info->pathname, wres->pool)
+ : apr_file_remove(info->pathname, wres->pool);
+
+ /*
+ ** If an error occurred, then add it to multistatus response.
+ ** Note that we add it for the root resource, too. It is quite
+ ** possible to delete the whole darn tree, yet fail on the root.
+ **
+ ** (also: remember we are deleting via a postfix traversal)
+ */
+ if (result != APR_SUCCESS) {
+ /* ### assume there is a permissions problem */
+
+ /* ### use errno to generate DAV:responsedescription? */
+ dav_add_response(wres, HTTP_FORBIDDEN, NULL);
+ }
+ }
+
+ return NULL;
+}
+
+static dav_error * dav_fs_remove_resource(dav_resource *resource,
+ dav_response **response)
+{
+ dav_resource_private *info = resource->info;
+
+ *response = NULL;
+
+ /* if a collection, recursively remove it and its children,
+ * including the state dirs
+ */
+ if (resource->collection) {
+ dav_walk_params params = { 0 };
+ dav_error *err = NULL;
+ dav_response *multi_status;
+
+ params.walk_type = (DAV_WALKTYPE_NORMAL
+ | DAV_WALKTYPE_HIDDEN
+ | DAV_WALKTYPE_POSTFIX);
+ params.func = dav_fs_delete_walker;
+ params.pool = info->pool;
+ params.root = resource;
+
+ if ((err = dav_fs_walk(&params, DAV_INFINITY,
+ &multi_status)) != NULL) {
+ /* on a "real" error, then just punt. nothing else to do. */
+ return err;
+ }
+
+ if ((*response = multi_status) != NULL) {
+ /* some multistatus responses exist. wrap them in a 207 */
+ return dav_new_error(info->pool, HTTP_MULTI_STATUS, 0,
+ "Error(s) occurred on some resources during "
+ "the deletion process.");
+ }
+
+ /* no errors... update resource state */
+ resource->exists = 0;
+ resource->collection = 0;
+
+ return NULL;
+ }
+
+ /* not a collection; remove the file and its properties */
+ if (apr_file_remove(info->pathname, info->pool) != APR_SUCCESS) {
+ /* ### put a description in here */
+ return dav_new_error(info->pool, HTTP_FORBIDDEN, 0, NULL);
+ }
+
+ /* update resource state */
+ resource->exists = 0;
+ resource->collection = 0;
+
+ /* remove properties and return its result */
+ return dav_fs_deleteset(info->pool, resource);
+}
+
+/* ### move this to dav_util? */
+/* Walk recursively down through directories, *
+ * including lock-null resources as we go. */
+static dav_error * dav_fs_walker(dav_fs_walker_context *fsctx, int depth)
+{
+ const dav_walk_params *params = fsctx->params;
+ apr_pool_t *pool = params->pool;
+ dav_error *err = NULL;
+ int isdir = fsctx->res1.collection;
+ apr_finfo_t dirent;
+ apr_dir_t *dirp;
+
+ /* ensure the context is prepared properly, then call the func */
+ err = (*params->func)(&fsctx->wres,
+ isdir
+ ? DAV_CALLTYPE_COLLECTION
+ : DAV_CALLTYPE_MEMBER);
+ if (err != NULL) {
+ return err;
+ }
+
+ if (depth == 0 || !isdir) {
+ return NULL;
+ }
+
+ /* put a trailing slash onto the directory, in preparation for appending
+ * files to it as we discovery them within the directory */
+ dav_check_bufsize(pool, &fsctx->path1, DAV_BUFFER_PAD);
+ fsctx->path1.buf[fsctx->path1.cur_len++] = '/';
+ fsctx->path1.buf[fsctx->path1.cur_len] = '\0'; /* in pad area */
+
+ /* if a secondary path is present, then do that, too */
+ if (fsctx->path2.buf != NULL) {
+ dav_check_bufsize(pool, &fsctx->path2, DAV_BUFFER_PAD);
+ fsctx->path2.buf[fsctx->path2.cur_len++] = '/';
+ fsctx->path2.buf[fsctx->path2.cur_len] = '\0'; /* in pad area */
+ }
+
+ /* Note: the URI should ALREADY have a trailing "/" */
+
+ /* for this first pass of files, all resources exist */
+ fsctx->res1.exists = 1;
+
+ /* a file is the default; we'll adjust if we hit a directory */
+ fsctx->res1.collection = 0;
+ fsctx->res2.collection = 0;
+
+ /* open and scan the directory */
+ if ((apr_dir_open(&dirp, fsctx->path1.buf, pool)) != APR_SUCCESS) {
+ /* ### need a better error */
+ return dav_new_error(pool, HTTP_NOT_FOUND, 0, NULL);
+ }
+ while ((apr_dir_read(&dirent, APR_FINFO_DIRENT, dirp)) == APR_SUCCESS) {
+ apr_size_t len;
+ apr_status_t status;
+
+ len = strlen(dirent.name);
+
+ /* avoid recursing into our current, parent, or state directories */
+ if (dirent.name[0] == '.'
+ && (len == 1 || (dirent.name[1] == '.' && len == 2))) {
+ continue;
+ }
+
+ if (params->walk_type & DAV_WALKTYPE_AUTH) {
+ /* ### need to authorize each file */
+ /* ### example: .htaccess is normally configured to fail auth */
+
+ /* stuff in the state directory is never authorized! */
+ if (!strcmp(dirent.name, DAV_FS_STATE_DIR)) {
+ continue;
+ }
+ }
+ /* skip the state dir unless a HIDDEN is performed */
+ if (!(params->walk_type & DAV_WALKTYPE_HIDDEN)
+ && !strcmp(dirent.name, DAV_FS_STATE_DIR)) {
+ continue;
+ }
+
+ /* append this file onto the path buffer (copy null term) */
+ dav_buffer_place_mem(pool, &fsctx->path1, dirent.name, len + 1, 0);
+
+
+ /* ### Optimize me, dirent can give us what we need! */
+ status = apr_lstat(&fsctx->info1.finfo, fsctx->path1.buf,
+ APR_FINFO_NORM, pool);
+ if (status != APR_SUCCESS && status != APR_INCOMPLETE) {
+ /* woah! where'd it go? */
+ /* ### should have a better error here */
+ err = dav_new_error(pool, HTTP_NOT_FOUND, 0, NULL);
+ break;
+ }
+
+ /* copy the file to the URI, too. NOTE: we will pad an extra byte
+ for the trailing slash later. */
+ dav_buffer_place_mem(pool, &fsctx->uri_buf, dirent.name, len + 1, 1);
+
+ /* if there is a secondary path, then do that, too */
+ if (fsctx->path2.buf != NULL) {
+ dav_buffer_place_mem(pool, &fsctx->path2, dirent.name, len + 1, 0);
+ }
+
+ /* set up the (internal) pathnames for the two resources */
+ fsctx->info1.pathname = fsctx->path1.buf;
+ fsctx->info2.pathname = fsctx->path2.buf;
+
+ /* set up the URI for the current resource */
+ fsctx->res1.uri = fsctx->uri_buf.buf;
+
+ /* ### for now, only process regular files (e.g. skip symlinks) */
+ if (fsctx->info1.finfo.filetype == APR_REG) {
+ /* call the function for the specified dir + file */
+ if ((err = (*params->func)(&fsctx->wres,
+ DAV_CALLTYPE_MEMBER)) != NULL) {
+ /* ### maybe add a higher-level description? */
+ break;
+ }
+ }
+ else if (fsctx->info1.finfo.filetype == APR_DIR) {
+ apr_size_t save_path_len = fsctx->path1.cur_len;
+ apr_size_t save_uri_len = fsctx->uri_buf.cur_len;
+ apr_size_t save_path2_len = fsctx->path2.cur_len;
+
+ /* adjust length to incorporate the subdir name */
+ fsctx->path1.cur_len += len;
+ fsctx->path2.cur_len += len;
+
+ /* adjust URI length to incorporate subdir and a slash */
+ fsctx->uri_buf.cur_len += len + 1;
+ fsctx->uri_buf.buf[fsctx->uri_buf.cur_len - 1] = '/';
+ fsctx->uri_buf.buf[fsctx->uri_buf.cur_len] = '\0';
+
+ /* switch over to a collection */
+ fsctx->res1.collection = 1;
+ fsctx->res2.collection = 1;
+
+ /* recurse on the subdir */
+ /* ### don't always want to quit on error from single child */
+ if ((err = dav_fs_walker(fsctx, depth - 1)) != NULL) {
+ /* ### maybe add a higher-level description? */
+ break;
+ }
+
+ /* put the various information back */
+ fsctx->path1.cur_len = save_path_len;
+ fsctx->path2.cur_len = save_path2_len;
+ fsctx->uri_buf.cur_len = save_uri_len;
+
+ fsctx->res1.collection = 0;
+ fsctx->res2.collection = 0;
+
+ /* assert: res1.exists == 1 */
+ }
+ }
+
+ /* ### check the return value of this? */
+ apr_dir_close(dirp);
+
+ if (err != NULL)
+ return err;
+
+ if (params->walk_type & DAV_WALKTYPE_LOCKNULL) {
+ apr_size_t offset = 0;
+
+ /* null terminate the directory name */
+ fsctx->path1.buf[fsctx->path1.cur_len - 1] = '\0';
+
+ /* Include any lock null resources found in this collection */
+ fsctx->res1.collection = 1;
+ if ((err = dav_fs_get_locknull_members(&fsctx->res1,
+ &fsctx->locknull_buf)) != NULL) {
+ /* ### maybe add a higher-level description? */
+ return err;
+ }
+
+ /* put a slash back on the end of the directory */
+ fsctx->path1.buf[fsctx->path1.cur_len - 1] = '/';
+
+ /* these are all non-existant (files) */
+ fsctx->res1.exists = 0;
+ fsctx->res1.collection = 0;
+ memset(&fsctx->info1.finfo, 0, sizeof(fsctx->info1.finfo));
+
+ while (offset < fsctx->locknull_buf.cur_len) {
+ apr_size_t len = strlen(fsctx->locknull_buf.buf + offset);
+ dav_lock *locks = NULL;
+
+ /*
+ ** Append the locknull file to the paths and the URI. Note that
+ ** we don't have to pad the URI for a slash since a locknull
+ ** resource is not a collection.
+ */
+ dav_buffer_place_mem(pool, &fsctx->path1,
+ fsctx->locknull_buf.buf + offset, len + 1, 0);
+ dav_buffer_place_mem(pool, &fsctx->uri_buf,
+ fsctx->locknull_buf.buf + offset, len + 1, 0);
+ if (fsctx->path2.buf != NULL) {
+ dav_buffer_place_mem(pool, &fsctx->path2,
+ fsctx->locknull_buf.buf + offset,
+ len + 1, 0);
+ }
+
+ /* set up the (internal) pathnames for the two resources */
+ fsctx->info1.pathname = fsctx->path1.buf;
+ fsctx->info2.pathname = fsctx->path2.buf;
+
+ /* set up the URI for the current resource */
+ fsctx->res1.uri = fsctx->uri_buf.buf;
+
+ /*
+ ** To prevent a PROPFIND showing an expired locknull
+ ** resource, query the lock database to force removal
+ ** of both the lock entry and .locknull, if necessary..
+ ** Sure, the query in PROPFIND would do this.. after
+ ** the locknull resource was already included in the
+ ** return.
+ **
+ ** NOTE: we assume the caller has opened the lock database
+ ** if they have provided DAV_WALKTYPE_LOCKNULL.
+ */
+ /* ### we should also look into opening it read-only and
+ ### eliding timed-out items from the walk, yet leaving
+ ### them in the locknull database until somebody opens
+ ### the thing writable.
+ */
+ /* ### probably ought to use has_locks. note the problem
+ ### mentioned above, though... we would traverse this as
+ ### a locknull, but then a PROPFIND would load the lock
+ ### info, causing a timeout and the locks would not be
+ ### reported. Therefore, a null resource would be returned
+ ### in the PROPFIND.
+ ###
+ ### alternative: just load unresolved locks. any direct
+ ### locks will be timed out (correct). any indirect will
+ ### not (correct; consider if a parent timed out -- the
+ ### timeout routines do not walk and remove indirects;
+ ### even the resolve func would probably fail when it
+ ### tried to find a timed-out direct lock).
+ */
+ if ((err = dav_lock_query(params->lockdb, &fsctx->res1,
+ &locks)) != NULL) {
+ /* ### maybe add a higher-level description? */
+ return err;
+ }
+
+ /* call the function for the specified dir + file */
+ if (locks != NULL &&
+ (err = (*params->func)(&fsctx->wres,
+ DAV_CALLTYPE_LOCKNULL)) != NULL) {
+ /* ### maybe add a higher-level description? */
+ return err;
+ }
+
+ offset += len + 1;
+ }
+
+ /* reset the exists flag */
+ fsctx->res1.exists = 1;
+ }
+
+ if (params->walk_type & DAV_WALKTYPE_POSTFIX) {
+ /* replace the dirs' trailing slashes with null terms */
+ fsctx->path1.buf[--fsctx->path1.cur_len] = '\0';
+ fsctx->uri_buf.buf[--fsctx->uri_buf.cur_len] = '\0';
+ if (fsctx->path2.buf != NULL) {
+ fsctx->path2.buf[--fsctx->path2.cur_len] = '\0';
+ }
+
+ /* this is a collection which exists */
+ fsctx->res1.collection = 1;
+
+ return (*params->func)(&fsctx->wres, DAV_CALLTYPE_POSTFIX);
+ }
+
+ return NULL;
+}
+
+static dav_error * dav_fs_internal_walk(const dav_walk_params *params,
+ int depth, int is_move,
+ const dav_resource *root_dst,
+ dav_response **response)
+{
+ dav_fs_walker_context fsctx = { 0 };
+ dav_error *err;
+ dav_fs_copymove_walk_ctx cm_ctx = { 0 };
+
+#if DAV_DEBUG
+ if ((params->walk_type & DAV_WALKTYPE_LOCKNULL) != 0
+ && params->lockdb == NULL) {
+ return dav_new_error(params->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "DESIGN ERROR: walker called to walk locknull "
+ "resources, but a lockdb was not provided.");
+ }
+#endif
+
+ fsctx.params = params;
+ fsctx.wres.walk_ctx = params->walk_ctx;
+ fsctx.wres.pool = params->pool;
+
+ /* ### zero out versioned, working, baselined? */
+
+ fsctx.res1 = *params->root;
+ fsctx.res1.pool = params->pool;
+
+ fsctx.res1.info = &fsctx.info1;
+ fsctx.info1 = *params->root->info;
+
+ /* the pathname is stored in the path1 buffer */
+ dav_buffer_init(params->pool, &fsctx.path1, fsctx.info1.pathname);
+ fsctx.info1.pathname = fsctx.path1.buf;
+
+ if (root_dst != NULL) {
+ /* internal call from the COPY/MOVE code. set it up. */
+
+ fsctx.wres.walk_ctx = &cm_ctx;
+ cm_ctx.is_move = is_move;
+ cm_ctx.res_dst = &fsctx.res2;
+ cm_ctx.root = params->root;
+ cm_ctx.pool = params->pool;
+
+ fsctx.res2 = *root_dst;
+ fsctx.res2.exists = 0;
+ fsctx.res2.collection = 0;
+ fsctx.res2.uri = NULL; /* we don't track this */
+ fsctx.res2.pool = params->pool;
+
+ fsctx.res2.info = &fsctx.info2;
+ fsctx.info2 = *root_dst->info;
+
+ /* res2 does not exist -- clear its finfo structure */
+ memset(&fsctx.info2.finfo, 0, sizeof(fsctx.info2.finfo));
+
+ /* the pathname is stored in the path2 buffer */
+ dav_buffer_init(params->pool, &fsctx.path2, fsctx.info2.pathname);
+ fsctx.info2.pathname = fsctx.path2.buf;
+ }
+
+ /* prep the URI buffer */
+ dav_buffer_init(params->pool, &fsctx.uri_buf, params->root->uri);
+
+ /* if we have a directory, then ensure the URI has a trailing "/" */
+ if (fsctx.res1.collection
+ && fsctx.uri_buf.buf[fsctx.uri_buf.cur_len - 1] != '/') {
+
+ /* this will fall into the pad area */
+ fsctx.uri_buf.buf[fsctx.uri_buf.cur_len++] = '/';
+ fsctx.uri_buf.buf[fsctx.uri_buf.cur_len] = '\0';
+ }
+
+ /* the current resource's URI is stored in the uri_buf buffer */
+ fsctx.res1.uri = fsctx.uri_buf.buf;
+
+ /* point the callback's resource at our structure */
+ fsctx.wres.resource = &fsctx.res1;
+
+ /* always return the error, and any/all multistatus responses */
+ err = dav_fs_walker(&fsctx, depth);
+ *response = fsctx.wres.response;
+ return err;
+}
+
+static dav_error * dav_fs_walk(const dav_walk_params *params, int depth,
+ dav_response **response)
+{
+ /* always return the error, and any/all multistatus responses */
+ return dav_fs_internal_walk(params, depth, 0, NULL, response);
+}
+
+/* dav_fs_etag: Stolen from ap_make_etag. Creates a strong etag
+ * for file path.
+ * ### do we need to return weak tags sometimes?
+ */
+static const char *dav_fs_getetag(const dav_resource *resource)
+{
+ dav_resource_private *ctx = resource->info;
+
+ if (!resource->exists)
+ return apr_pstrdup(ctx->pool, "");
+
+ if (ctx->finfo.filetype != 0) {
+ return apr_psprintf(ctx->pool, "\"%lx-%lx-%lx\"",
+ (unsigned long) ctx->finfo.inode,
+ (unsigned long) ctx->finfo.size,
+ (unsigned long) ctx->finfo.mtime);
+ }
+
+ return apr_psprintf(ctx->pool, "\"%lx\"", (unsigned long) ctx->finfo.mtime);
+}
+
+static const dav_hooks_repository dav_hooks_repository_fs =
+{
+ DEBUG_GET_HANDLER, /* normally: special GET handling not required */
+ dav_fs_get_resource,
+ dav_fs_get_parent_resource,
+ dav_fs_is_same_resource,
+ dav_fs_is_parent_resource,
+ dav_fs_open_stream,
+ dav_fs_close_stream,
+ dav_fs_write_stream,
+ dav_fs_seek_stream,
+#if DEBUG_GET_HANDLER
+ dav_fs_set_headers,
+ dav_fs_deliver,
+#else
+ NULL,
+ NULL,
+#endif
+ dav_fs_create_collection,
+ dav_fs_copy_resource,
+ dav_fs_move_resource,
+ dav_fs_remove_resource,
+ dav_fs_walk,
+ dav_fs_getetag,
+};
+
+static dav_prop_insert dav_fs_insert_prop(const dav_resource *resource,
+ int propid, dav_prop_insert what,
+ apr_text_header *phdr)
+{
+ const char *value;
+ const char *s;
+ apr_pool_t *p = resource->info->pool;
+ const dav_liveprop_spec *info;
+ int global_ns;
+
+ /* an HTTP-date can be 29 chars plus a null term */
+ /* a 64-bit size can be 20 chars plus a null term */
+ char buf[DAV_TIMEBUF_SIZE];
+
+ /*
+ ** None of FS provider properties are defined if the resource does not
+ ** exist. Just bail for this case.
+ **
+ ** Even though we state that the FS properties are not defined, the
+ ** client cannot store dead values -- we deny that thru the is_writable
+ ** hook function.
+ */
+ if (!resource->exists)
+ return DAV_PROP_INSERT_NOTDEF;
+
+ switch (propid) {
+ case DAV_PROPID_creationdate:
+ /*
+ ** Closest thing to a creation date. since we don't actually
+ ** perform the operations that would modify ctime (after we
+ ** create the file), then we should be pretty safe here.
+ */
+ dav_format_time(DAV_STYLE_ISO8601,
+ resource->info->finfo.ctime,
+ buf);
+ value = buf;
+ break;
+
+ case DAV_PROPID_getcontentlength:
+ /* our property, but not defined on collection resources */
+ if (resource->collection)
+ return DAV_PROP_INSERT_NOTDEF;
+
+ (void) sprintf(buf, "%" APR_OFF_T_FMT, resource->info->finfo.size);
+ value = buf;
+ break;
+
+ case DAV_PROPID_getetag:
+ value = dav_fs_getetag(resource);
+ break;
+
+ case DAV_PROPID_getlastmodified:
+ dav_format_time(DAV_STYLE_RFC822,
+ resource->info->finfo.mtime,
+ buf);
+ value = buf;
+ break;
+
+ case DAV_PROPID_FS_executable:
+ /* our property, but not defined on collection resources */
+ if (resource->collection)
+ return DAV_PROP_INSERT_NOTDEF;
+
+ /* our property, but not defined on this platform */
+ if (!(resource->info->finfo.valid & APR_FINFO_UPROT))
+ return DAV_PROP_INSERT_NOTDEF;
+
+ /* the files are "ours" so we only need to check owner exec privs */
+ if (resource->info->finfo.protection & APR_UEXECUTE)
+ value = "T";
+ else
+ value = "F";
+ break;
+
+ default:
+ /* ### what the heck was this property? */
+ return DAV_PROP_INSERT_NOTDEF;
+ }
+
+ /* assert: value != NULL */
+
+ /* get the information and global NS index for the property */
+ global_ns = dav_get_liveprop_info(propid, &dav_fs_liveprop_group, &info);
+
+ /* assert: info != NULL && info->name != NULL */
+
+ /* DBG3("FS: inserting lp%d:%s (local %d)", ns, scan->name, scan->ns); */
+
+ if (what == DAV_PROP_INSERT_VALUE) {
+ s = apr_psprintf(p, "<lp%d:%s>%s</lp%d:%s>" DEBUG_CR,
+ global_ns, info->name, value, global_ns, info->name);
+ }
+ else if (what == DAV_PROP_INSERT_NAME) {
+ s = apr_psprintf(p, "<lp%d:%s/>" DEBUG_CR, global_ns, info->name);
+ }
+ else {
+ /* assert: what == DAV_PROP_INSERT_SUPPORTED */
+ s = apr_psprintf(p,
+ "<D:supported-live-property D:name=\"%s\" "
+ "D:namespace=\"%s\"/>" DEBUG_CR,
+ info->name, dav_fs_namespace_uris[info->ns]);
+ }
+ apr_text_append(p, phdr, s);
+
+ /* we inserted what was asked for */
+ return what;
+}
+
+static int dav_fs_is_writable(const dav_resource *resource, int propid)
+{
+ const dav_liveprop_spec *info;
+
+#ifdef DAV_FS_HAS_EXECUTABLE
+ /* if we have the executable property, and this isn't a collection,
+ then the property is writable. */
+ if (propid == DAV_PROPID_FS_executable && !resource->collection)
+ return 1;
+#endif
+
+ (void) dav_get_liveprop_info(propid, &dav_fs_liveprop_group, &info);
+ return info->is_writable;
+}
+
+static dav_error *dav_fs_patch_validate(const dav_resource *resource,
+ const apr_xml_elem *elem,
+ int operation,
+ void **context,
+ int *defer_to_dead)
+{
+ const apr_text *cdata;
+ const apr_text *f_cdata;
+ char value;
+ dav_elem_private *priv = elem->priv;
+
+ if (priv->propid != DAV_PROPID_FS_executable) {
+ *defer_to_dead = 1;
+ return NULL;
+ }
+
+ if (operation == DAV_PROP_OP_DELETE) {
+ return dav_new_error(resource->info->pool, HTTP_CONFLICT, 0,
+ "The 'executable' property cannot be removed.");
+ }
+
+ cdata = elem->first_cdata.first;
+
+ /* ### hmm. this isn't actually looking at all the possible text items */
+ f_cdata = elem->first_child == NULL
+ ? NULL
+ : elem->first_child->following_cdata.first;
+
+ /* DBG3("name=%s cdata=%s f_cdata=%s",elem->name,cdata ? cdata->text : "[null]",f_cdata ? f_cdata->text : "[null]"); */
+
+ if (cdata == NULL) {
+ if (f_cdata == NULL) {
+ return dav_new_error(resource->info->pool, HTTP_CONFLICT, 0,
+ "The 'executable' property expects a single "
+ "character, valued 'T' or 'F'. There was no "
+ "value submitted.");
+ }
+ cdata = f_cdata;
+ }
+ else if (f_cdata != NULL)
+ goto too_long;
+
+ if (cdata->next != NULL || strlen(cdata->text) != 1)
+ goto too_long;
+
+ value = cdata->text[0];
+ if (value != 'T' && value != 'F') {
+ return dav_new_error(resource->info->pool, HTTP_CONFLICT, 0,
+ "The 'executable' property expects a single "
+ "character, valued 'T' or 'F'. The value "
+ "submitted is invalid.");
+ }
+
+ *context = (void *)(value == 'T');
+
+ return NULL;
+
+ too_long:
+ return dav_new_error(resource->info->pool, HTTP_CONFLICT, 0,
+ "The 'executable' property expects a single "
+ "character, valued 'T' or 'F'. The value submitted "
+ "has too many characters.");
+
+}
+
+static dav_error *dav_fs_patch_exec(const dav_resource *resource,
+ const apr_xml_elem *elem,
+ int operation,
+ void *context,
+ dav_liveprop_rollback **rollback_ctx)
+{
+ int value = context != NULL;
+ apr_fileperms_t perms = resource->info->finfo.protection;
+ int old_value = (perms & APR_UEXECUTE) != 0;
+
+ /* assert: prop == executable. operation == SET. */
+
+ /* don't do anything if there is no change. no rollback info either. */
+ /* DBG2("new value=%d (old=%d)", value, old_value); */
+ if (value == old_value)
+ return NULL;
+
+ perms &= ~APR_UEXECUTE;
+ if (value)
+ perms |= APR_UEXECUTE;
+
+ if (apr_file_perms_set(resource->info->pathname, perms) != APR_SUCCESS) {
+ return dav_new_error(resource->info->pool,
+ HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not set the executable flag of the "
+ "target resource.");
+ }
+
+ /* update the resource and set up the rollback context */
+ resource->info->finfo.protection = perms;
+ *rollback_ctx = (dav_liveprop_rollback *)old_value;
+
+ return NULL;
+}
+
+static void dav_fs_patch_commit(const dav_resource *resource,
+ int operation,
+ void *context,
+ dav_liveprop_rollback *rollback_ctx)
+{
+ /* nothing to do */
+}
+
+static dav_error *dav_fs_patch_rollback(const dav_resource *resource,
+ int operation,
+ void *context,
+ dav_liveprop_rollback *rollback_ctx)
+{
+ apr_fileperms_t perms = resource->info->finfo.protection & ~APR_UEXECUTE;
+ int value = rollback_ctx != NULL;
+
+ /* assert: prop == executable. operation == SET. */
+
+ /* restore the executable bit */
+ if (value)
+ perms |= APR_UEXECUTE;
+
+ if (apr_file_perms_set(resource->info->pathname, perms) != APR_SUCCESS) {
+ return dav_new_error(resource->info->pool,
+ HTTP_INTERNAL_SERVER_ERROR, 0,
+ "After a failure occurred, the resource's "
+ "executable flag could not be restored.");
+ }
+
+ /* restore the resource's state */
+ resource->info->finfo.protection = perms;
+
+ return NULL;
+}
+
+
+static const dav_hooks_liveprop dav_hooks_liveprop_fs =
+{
+ dav_fs_insert_prop,
+ dav_fs_is_writable,
+ dav_fs_namespace_uris,
+ dav_fs_patch_validate,
+ dav_fs_patch_exec,
+ dav_fs_patch_commit,
+ dav_fs_patch_rollback
+};
+
+static const dav_provider dav_fs_provider =
+{
+ &dav_hooks_repository_fs,
+ &dav_hooks_db_dbm,
+ &dav_hooks_locks_fs,
+ NULL, /* vsn */
+ NULL, /* binding */
+ NULL, /* search */
+
+ NULL /* ctx */
+};
+
+void dav_fs_gather_propsets(apr_array_header_t *uris)
+{
+#ifdef DAV_FS_HAS_EXECUTABLE
+ *(const char **)apr_array_push(uris) =
+ "<http://apache.org/dav/propset/fs/1>";
+#endif
+}
+
+int dav_fs_find_liveprop(const dav_resource *resource,
+ const char *ns_uri, const char *name,
+ const dav_hooks_liveprop **hooks)
+{
+ /* don't try to find any liveprops if this isn't "our" resource */
+ if (resource->hooks != &dav_hooks_repository_fs)
+ return 0;
+ return dav_do_find_liveprop(ns_uri, name, &dav_fs_liveprop_group, hooks);
+}
+
+void dav_fs_insert_all_liveprops(request_rec *r, const dav_resource *resource,
+ dav_prop_insert what, apr_text_header *phdr)
+{
+ /* don't insert any liveprops if this isn't "our" resource */
+ if (resource->hooks != &dav_hooks_repository_fs)
+ return;
+
+ if (!resource->exists) {
+ /* a lock-null resource */
+ /*
+ ** ### technically, we should insert empty properties. dunno offhand
+ ** ### what part of the spec said this, but it was essentially thus:
+ ** ### "the properties should be defined, but may have no value".
+ */
+ return;
+ }
+
+ (void) dav_fs_insert_prop(resource, DAV_PROPID_creationdate,
+ what, phdr);
+ (void) dav_fs_insert_prop(resource, DAV_PROPID_getcontentlength,
+ what, phdr);
+ (void) dav_fs_insert_prop(resource, DAV_PROPID_getlastmodified,
+ what, phdr);
+ (void) dav_fs_insert_prop(resource, DAV_PROPID_getetag,
+ what, phdr);
+
+#ifdef DAV_FS_HAS_EXECUTABLE
+ /* Only insert this property if it is defined for this platform. */
+ (void) dav_fs_insert_prop(resource, DAV_PROPID_FS_executable,
+ what, phdr);
+#endif
+
+ /* ### we know the others aren't defined as liveprops */
+}
+
+void dav_fs_register(apr_pool_t *p)
+{
+ /* register the namespace URIs */
+ dav_register_liveprop_group(p, &dav_fs_liveprop_group);
+
+ /* register the repository provider */
+ dav_register_provider(p, "filesystem", &dav_fs_provider);
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/fs/repos.h b/rubbos/app/httpd-2.0.64/modules/dav/fs/repos.h
new file mode 100644
index 00000000..d7962d56
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/fs/repos.h
@@ -0,0 +1,78 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+** Declarations for the filesystem repository implementation
+*/
+
+#ifndef _DAV_FS_REPOS_H_
+#define _DAV_FS_REPOS_H_
+
+/* the subdirectory to hold all DAV-related information for a directory */
+#define DAV_FS_STATE_DIR ".DAV"
+#define DAV_FS_STATE_FILE_FOR_DIR ".state_for_dir"
+#define DAV_FS_LOCK_NULL_FILE ".locknull"
+
+
+/* ensure that our state subdirectory is present */
+void dav_fs_ensure_state_dir(apr_pool_t *p, const char *dirname);
+
+/* return the storage pool associated with a resource */
+apr_pool_t *dav_fs_pool(const dav_resource *resource);
+
+/* return the full pathname for a resource */
+const char *dav_fs_pathname(const dav_resource *resource);
+
+/* return the directory and filename for a resource */
+dav_error * dav_fs_dir_file_name(const dav_resource *resource,
+ const char **dirpath,
+ const char **fname);
+
+/* return the list of locknull members in this resource's directory */
+dav_error * dav_fs_get_locknull_members(const dav_resource *resource,
+ dav_buffer *pbuf);
+
+
+/* DBM functions used by the repository and locking providers */
+extern const dav_hooks_db dav_hooks_db_dbm;
+
+dav_error * dav_dbm_open_direct(apr_pool_t *p, const char *pathname, int ro,
+ dav_db **pdb);
+void dav_dbm_get_statefiles(apr_pool_t *p, const char *fname,
+ const char **state1, const char **state2);
+dav_error * dav_dbm_delete(dav_db *db, apr_datum_t key);
+dav_error * dav_dbm_store(dav_db *db, apr_datum_t key, apr_datum_t value);
+dav_error * dav_dbm_fetch(dav_db *db, apr_datum_t key, apr_datum_t *pvalue);
+void dav_dbm_freedatum(dav_db *db, apr_datum_t data);
+int dav_dbm_exists(dav_db *db, apr_datum_t key);
+void dav_dbm_close(dav_db *db);
+
+/* where is the lock database located? */
+const char *dav_get_lockdb_path(const request_rec *r);
+
+const dav_hooks_locks *dav_fs_get_lock_hooks(request_rec *r);
+const dav_hooks_propdb *dav_fs_get_propdb_hooks(request_rec *r);
+
+void dav_fs_gather_propsets(apr_array_header_t *uris);
+int dav_fs_find_liveprop(const dav_resource *resource,
+ const char *ns_uri, const char *name,
+ const dav_hooks_liveprop **hooks);
+void dav_fs_insert_all_liveprops(request_rec *r, const dav_resource *resource,
+ dav_prop_insert what, apr_text_header *phdr);
+
+void dav_fs_register(apr_pool_t *p);
+
+#endif /* _DAV_FS_REPOS_H_ */
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/.deps b/rubbos/app/httpd-2.0.64/modules/dav/main/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/Makefile b/rubbos/app/httpd-2.0.64/modules/dav/main/Makefile
new file mode 100644
index 00000000..509e3634
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/dav/main
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/dav/main
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/dav/main
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/Makefile.in b/rubbos/app/httpd-2.0.64/modules/dav/main/Makefile.in
new file mode 100644
index 00000000..7c5c149d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/Makefile.in
@@ -0,0 +1,3 @@
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/dav/main/NWGNUmakefile
new file mode 100644
index 00000000..8546d6a3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/NWGNUmakefile
@@ -0,0 +1,268 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(AP_WORK)/server/mpm/NetWare \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = mod_DAV
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) DAV module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = mod_DAV
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/mod_dav.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_dav.o \
+ $(OBJDIR)/props.o \
+ $(OBJDIR)/util.o \
+ $(OBJDIR)/util_lock.o \
+ $(OBJDIR)/liveprop.o \
+ $(OBJDIR)/providers.o \
+ $(OBJDIR)/std_liveprop.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Apache2 \
+ Libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @$(APR)/aprlib.imp \
+ @httpd.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ dav_module \
+ @dav.imp \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\mod_dav.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/config5.m4 b/rubbos/app/httpd-2.0.64/modules/dav/main/config5.m4
new file mode 100644
index 00000000..fa2eee47
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/config5.m4
@@ -0,0 +1,22 @@
+dnl modules enabled in this directory by default
+
+APACHE_MODPATH_INIT(dav/main)
+
+dav_objects="mod_dav.lo props.lo util.lo util_lock.lo liveprop.lo providers.lo std_liveprop.lo"
+
+if test "$enable_http" = "no"; then
+ dav_enable=no
+else
+ dav_enable=most
+fi
+
+APACHE_MODULE(dav, WebDAV protocol handling, $dav_objects, , $dav_enable)
+
+if test "$dav_enable" != "no" -o "$enable_dav" != "no"; then
+ apache_need_expat=yes
+
+ APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current])
+fi
+
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/dav.imp b/rubbos/app/httpd-2.0.64/modules/dav/main/dav.imp
new file mode 100644
index 00000000..88b306da
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/dav.imp
@@ -0,0 +1,64 @@
+
+ (mod_dav)
+ dav_add_all_liveprop_xmlns,
+ dav_add_lock,
+ dav_add_response,
+ dav_add_vary_header,
+ dav_auto_checkin,
+ dav_auto_checkout,
+ dav_buffer_append,
+ dav_buffer_init,
+ dav_buffer_place,
+ dav_buffer_place_mem,
+ dav_check_bufsize,
+ dav_close_propdb,
+ dav_core_find_liveprop,
+ dav_core_insert_all_liveprops,
+ dav_core_register_uris,
+ dav_do_find_liveprop,
+ dav_find_child,
+ dav_get_allprops,
+ dav_get_binding_hooks,
+ dav_get_depth,
+ dav_get_liveprop_info,
+ dav_get_liveprop_ns_count,
+ dav_get_liveprop_ns_index,
+ dav_get_liveprop_supported,
+ dav_get_lock_hooks,
+ dav_get_locktoken_list,
+ dav_get_propdb_hooks,
+ dav_get_props,
+ dav_get_resource_state,
+ dav_get_search_hooks,
+ dav_get_timeout,
+ dav_get_vsn_hooks,
+ dav_hook_find_liveprop,
+ dav_hook_gather_propsets,
+ dav_hook_insert_all_liveprops,
+ dav_lock_get_activelock,
+ dav_lock_parse_lockinfo,
+ dav_lock_query,
+ dav_lookup_provider,
+ dav_lookup_uri,
+ dav_new_error,
+ dav_new_error_tag,
+ dav_notify_created,
+ dav_open_propdb,
+ dav_prop_commit,
+ dav_prop_exec,
+ dav_prop_rollback,
+ dav_prop_validate,
+ dav_push_error,
+ dav_register_liveprop_group,
+ dav_register_provider,
+ dav_set_bufsize,
+ dav_unlock,
+ dav_validate_request,
+ dav_validate_root,
+ dav_xml_get_cdata,
+ dav_xmlns_add,
+ dav_xmlns_add_uri,
+ dav_xmlns_create,
+ dav_xmlns_generate,
+ dav_xmlns_get_prefix,
+ dav_xmlns_get_uri
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/liveprop.c b/rubbos/app/httpd-2.0.64/modules/dav/main/liveprop.c
new file mode 100644
index 00000000..88461a80
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/liveprop.c
@@ -0,0 +1,140 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_pools.h"
+#include "apr_hash.h"
+#include "apr_errno.h"
+#include "apr_strings.h"
+#include "util_xml.h" /* for apr_text_header */
+#include "mod_dav.h"
+
+
+static apr_hash_t *dav_liveprop_uris = NULL;
+static int dav_liveprop_count = 0;
+
+
+static apr_status_t dav_cleanup_liveprops(void *ctx)
+{
+ dav_liveprop_uris = NULL;
+ dav_liveprop_count = 0;
+ return APR_SUCCESS;
+}
+
+static void dav_register_liveprop_namespace(apr_pool_t *p, const char *uri)
+{
+ int value;
+
+ if (dav_liveprop_uris == NULL) {
+ dav_liveprop_uris = apr_hash_make(p);
+ apr_pool_cleanup_register(p, NULL, dav_cleanup_liveprops, apr_pool_cleanup_null);
+ }
+
+ value = (int)apr_hash_get(dav_liveprop_uris, uri, APR_HASH_KEY_STRING);
+ if (value != 0) {
+ /* already registered */
+ return;
+ }
+
+ /* start at 1, and count up */
+ apr_hash_set(dav_liveprop_uris, uri, APR_HASH_KEY_STRING,
+ (void *)++dav_liveprop_count);
+}
+
+DAV_DECLARE(int) dav_get_liveprop_ns_index(const char *uri)
+{
+ return (int)apr_hash_get(dav_liveprop_uris, uri, APR_HASH_KEY_STRING);
+}
+
+DAV_DECLARE(int) dav_get_liveprop_ns_count(void)
+{
+ return dav_liveprop_count;
+}
+
+DAV_DECLARE(void) dav_add_all_liveprop_xmlns(apr_pool_t *p,
+ apr_text_header *phdr)
+{
+ apr_hash_index_t *idx = apr_hash_first(p, dav_liveprop_uris);
+
+ for ( ; idx != NULL; idx = apr_hash_next(idx) ) {
+ const void *key;
+ void *val;
+ const char *s;
+
+ apr_hash_this(idx, &key, NULL, &val);
+
+ s = apr_psprintf(p, " xmlns:lp%d=\"%s\"", (int)val, (const char *)key);
+ apr_text_append(p, phdr, s);
+ }
+}
+
+DAV_DECLARE(int) dav_do_find_liveprop(const char *ns_uri, const char *name,
+ const dav_liveprop_group *group,
+ const dav_hooks_liveprop **hooks)
+{
+ const char * const *uris = group->namespace_uris;
+ const dav_liveprop_spec *scan;
+ int ns;
+
+ /* first: locate the namespace in the namespace table */
+ for (ns = 0; uris[ns] != NULL; ++ns)
+ if (strcmp(ns_uri, uris[ns]) == 0)
+ break;
+ if (uris[ns] == NULL) {
+ /* not our property (the namespace matched none of ours) */
+ return 0;
+ }
+
+ /* second: look for the property in the liveprop specs */
+ for (scan = group->specs; scan->name != NULL; ++scan)
+ if (ns == scan->ns && strcmp(name, scan->name) == 0) {
+ *hooks = group->hooks;
+ return scan->propid;
+ }
+
+ /* not our property (same namespace, but no matching prop name) */
+ return 0;
+}
+
+DAV_DECLARE(int) dav_get_liveprop_info(int propid,
+ const dav_liveprop_group *group,
+ const dav_liveprop_spec **info)
+{
+ const dav_liveprop_spec *scan;
+
+ for (scan = group->specs; scan->name != NULL; ++scan) {
+ if (scan->propid == propid) {
+ *info = scan;
+
+ /* map the provider-local NS into a global NS index */
+ return dav_get_liveprop_ns_index(group->namespace_uris[scan->ns]);
+ }
+ }
+
+ /* assert: should not reach this point */
+ *info = NULL;
+ return 0;
+}
+
+DAV_DECLARE(void) dav_register_liveprop_group(apr_pool_t *p,
+ const dav_liveprop_group *group)
+{
+ /* register the namespace URIs */
+ const char * const * uris = group->namespace_uris;
+
+ for ( ; *uris != NULL; ++uris) {
+ dav_register_liveprop_namespace(p, *uris);
+ }
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/mod_dav.c b/rubbos/app/httpd-2.0.64/modules/dav/main/mod_dav.c
new file mode 100644
index 00000000..3d3b47bb
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/mod_dav.c
@@ -0,0 +1,4834 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * DAV extension module for Apache 2.0.*
+ *
+ * This module is repository-independent. It depends on hooks provided by a
+ * repository implementation.
+ *
+ * APACHE ISSUES:
+ * - within a DAV hierarchy, if an unknown method is used and we default
+ * to Apache's implementation, it sends back an OPTIONS with the wrong
+ * set of methods -- there is NO HOOK for us.
+ * therefore: we need to manually handle the HTTP_METHOD_NOT_ALLOWED
+ * and HTTP_NOT_IMPLEMENTED responses (not ap_send_error_response).
+ * - process_mkcol_body() had to dup code from ap_setup_client_block().
+ * - it would be nice to get status lines from Apache for arbitrary
+ * status codes
+ * - it would be nice to be able to extend Apache's set of response
+ * codes so that it doesn't return 500 when an unknown code is placed
+ * into r->status.
+ * - http_vhost functions should apply "const" to their params
+ *
+ * DESIGN NOTES:
+ * - For PROPFIND, we batch up the entire response in memory before
+ * sending it. We may want to reorganize around sending the information
+ * as we suck it in from the propdb. Alternatively, we should at least
+ * generate a total Content-Length if we're going to buffer in memory
+ * so that we can keep the connection open.
+ */
+
+#include "apr_strings.h"
+#include "apr_lib.h" /* for apr_is* */
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_script.h"
+
+#include "mod_dav.h"
+
+
+/* ### what is the best way to set this? */
+#define DAV_DEFAULT_PROVIDER "filesystem"
+
+/* used to denote that mod_dav will be handling this request */
+#define DAV_HANDLER_NAME "dav-handler"
+
+enum {
+ DAV_ENABLED_UNSET = 0,
+ DAV_ENABLED_OFF,
+ DAV_ENABLED_ON
+};
+
+/* per-dir configuration */
+typedef struct {
+ const char *provider_name;
+ const dav_provider *provider;
+ const char *dir;
+ int locktimeout;
+ int allow_depthinfinity;
+
+} dav_dir_conf;
+
+/* per-server configuration */
+typedef struct {
+ int unused;
+
+} dav_server_conf;
+
+#define DAV_INHERIT_VALUE(parent, child, field) \
+ ((child)->field ? (child)->field : (parent)->field)
+
+
+/* forward-declare for use in configuration lookup */
+extern module DAV_DECLARE_DATA dav_module;
+
+/* DAV methods */
+enum {
+ DAV_M_BIND = 0,
+ DAV_M_SEARCH,
+ DAV_M_LAST
+};
+static int dav_methods[DAV_M_LAST];
+
+
+static int dav_init_handler(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp,
+ server_rec *s)
+{
+ /* DBG0("dav_init_handler"); */
+
+ /* Register DAV methods */
+ dav_methods[DAV_M_BIND] = ap_method_register(p, "BIND");
+ dav_methods[DAV_M_SEARCH] = ap_method_register(p, "SEARCH");
+
+ ap_add_version_component(p, "DAV/2");
+
+ return OK;
+}
+
+static void *dav_create_server_config(apr_pool_t *p, server_rec *s)
+{
+ dav_server_conf *newconf;
+
+ newconf = (dav_server_conf *)apr_pcalloc(p, sizeof(*newconf));
+
+ /* ### this isn't used at the moment... */
+
+ return newconf;
+}
+
+static void *dav_merge_server_config(apr_pool_t *p, void *base, void *overrides)
+{
+#if 0
+ dav_server_conf *child = overrides;
+#endif
+ dav_server_conf *newconf;
+
+ newconf = (dav_server_conf *)apr_pcalloc(p, sizeof(*newconf));
+
+ /* ### nothing to merge right now... */
+
+ return newconf;
+}
+
+static void *dav_create_dir_config(apr_pool_t *p, char *dir)
+{
+ /* NOTE: dir==NULL creates the default per-dir config */
+
+ dav_dir_conf *conf;
+
+ conf = (dav_dir_conf *)apr_pcalloc(p, sizeof(*conf));
+
+ /* clean up the directory to remove any trailing slash */
+ if (dir != NULL) {
+ char *d;
+ apr_size_t l;
+
+ d = apr_pstrdup(p, dir);
+ l = strlen(d);
+ if (l > 1 && d[l - 1] == '/')
+ d[l - 1] = '\0';
+ conf->dir = d;
+ }
+
+ return conf;
+}
+
+static void *dav_merge_dir_config(apr_pool_t *p, void *base, void *overrides)
+{
+ dav_dir_conf *parent = base;
+ dav_dir_conf *child = overrides;
+ dav_dir_conf *newconf = (dav_dir_conf *)apr_pcalloc(p, sizeof(*newconf));
+
+ /* DBG3("dav_merge_dir_config: new=%08lx base=%08lx overrides=%08lx",
+ (long)newconf, (long)base, (long)overrides); */
+
+ newconf->provider_name = DAV_INHERIT_VALUE(parent, child, provider_name);
+ newconf->provider = DAV_INHERIT_VALUE(parent, child, provider);
+ if (parent->provider_name != NULL) {
+ if (child->provider_name == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL,
+ "\"DAV Off\" cannot be used to turn off a subtree "
+ "of a DAV-enabled location.");
+ }
+ else if (strcasecmp(child->provider_name,
+ parent->provider_name) != 0) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL,
+ "A subtree cannot specify a different DAV provider "
+ "than its parent.");
+ }
+ }
+
+ newconf->locktimeout = DAV_INHERIT_VALUE(parent, child, locktimeout);
+ newconf->dir = DAV_INHERIT_VALUE(parent, child, dir);
+ newconf->allow_depthinfinity = DAV_INHERIT_VALUE(parent, child,
+ allow_depthinfinity);
+
+ return newconf;
+}
+
+static const dav_provider *dav_get_provider(request_rec *r)
+{
+ dav_dir_conf *conf;
+
+ conf = ap_get_module_config(r->per_dir_config, &dav_module);
+ /* assert: conf->provider_name != NULL
+ (otherwise, DAV is disabled, and we wouldn't be here) */
+
+ /* assert: conf->provider != NULL
+ (checked when conf->provider_name is set) */
+ return conf->provider;
+}
+
+DAV_DECLARE(const dav_hooks_locks *) dav_get_lock_hooks(request_rec *r)
+{
+ return dav_get_provider(r)->locks;
+}
+
+DAV_DECLARE(const dav_hooks_propdb *) dav_get_propdb_hooks(request_rec *r)
+{
+ return dav_get_provider(r)->propdb;
+}
+
+DAV_DECLARE(const dav_hooks_vsn *) dav_get_vsn_hooks(request_rec *r)
+{
+ return dav_get_provider(r)->vsn;
+}
+
+DAV_DECLARE(const dav_hooks_binding *) dav_get_binding_hooks(request_rec *r)
+{
+ return dav_get_provider(r)->binding;
+}
+
+DAV_DECLARE(const dav_hooks_search *) dav_get_search_hooks(request_rec *r)
+{
+ return dav_get_provider(r)->search;
+}
+
+/*
+ * Command handler for the DAV directive, which is TAKE1.
+ */
+static const char *dav_cmd_dav(cmd_parms *cmd, void *config, const char *arg1)
+{
+ dav_dir_conf *conf = (dav_dir_conf *)config;
+
+ if (strcasecmp(arg1, "on") == 0) {
+ conf->provider_name = DAV_DEFAULT_PROVIDER;
+ }
+ else if (strcasecmp(arg1, "off") == 0) {
+ conf->provider_name = NULL;
+ conf->provider = NULL;
+ }
+ else {
+ conf->provider_name = apr_pstrdup(cmd->pool, arg1);
+ }
+
+ if (conf->provider_name != NULL) {
+ /* lookup and cache the actual provider now */
+ conf->provider = dav_lookup_provider(conf->provider_name);
+
+ if (conf->provider == NULL) {
+ /* by the time they use it, the provider should be loaded and
+ registered with us. */
+ return apr_psprintf(cmd->pool,
+ "Unknown DAV provider: %s",
+ conf->provider_name);
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Command handler for the DAVDepthInfinity directive, which is FLAG.
+ */
+static const char *dav_cmd_davdepthinfinity(cmd_parms *cmd, void *config,
+ int arg)
+{
+ dav_dir_conf *conf = (dav_dir_conf *)config;
+
+ if (arg)
+ conf->allow_depthinfinity = DAV_ENABLED_ON;
+ else
+ conf->allow_depthinfinity = DAV_ENABLED_OFF;
+ return NULL;
+}
+
+/*
+ * Command handler for DAVMinTimeout directive, which is TAKE1
+ */
+static const char *dav_cmd_davmintimeout(cmd_parms *cmd, void *config,
+ const char *arg1)
+{
+ dav_dir_conf *conf = (dav_dir_conf *)config;
+
+ conf->locktimeout = atoi(arg1);
+ if (conf->locktimeout < 0)
+ return "DAVMinTimeout requires a non-negative integer.";
+
+ return NULL;
+}
+
+/*
+** dav_error_response()
+**
+** Send a nice response back to the user. In most cases, Apache doesn't
+** allow us to provide details in the body about what happened. This
+** function allows us to completely specify the response body.
+**
+** ### this function is not logging any errors! (e.g. the body)
+*/
+static int dav_error_response(request_rec *r, int status, const char *body)
+{
+ r->status = status;
+
+ /* ### I really don't think this is needed; gotta test */
+ r->status_line = ap_get_status_line(status);
+
+ ap_set_content_type(r, "text/html; charset=ISO-8859-1");
+
+ /* begin the response now... */
+ ap_rvputs(r,
+ DAV_RESPONSE_BODY_1,
+ r->status_line,
+ DAV_RESPONSE_BODY_2,
+ &r->status_line[4],
+ DAV_RESPONSE_BODY_3,
+ body,
+ DAV_RESPONSE_BODY_4,
+ ap_psignature("<hr />\n", r),
+ DAV_RESPONSE_BODY_5,
+ NULL);
+
+ /* the response has been sent. */
+ /*
+ * ### Use of DONE obviates logging..!
+ */
+ return DONE;
+}
+
+
+/*
+ * Send a "standardized" error response based on the error's namespace & tag
+ */
+static int dav_error_response_tag(request_rec *r,
+ dav_error *err)
+{
+ r->status = err->status;
+
+ /* ### I really don't think this is needed; gotta test */
+ r->status_line = ap_get_status_line(err->status);
+
+ ap_set_content_type(r, DAV_XML_CONTENT_TYPE);
+
+ ap_rputs(DAV_XML_HEADER DEBUG_CR
+ "<D:error xmlns:D=\"DAV:\"", r);
+
+ if (err->desc != NULL) {
+ /* ### should move this namespace somewhere (with the others!) */
+ ap_rputs(" xmlns:m=\"http://apache.org/dav/xmlns\"", r);
+ }
+
+ if (err->namespace != NULL) {
+ ap_rprintf(r,
+ " xmlns:C=\"%s\">" DEBUG_CR
+ "<C:%s/>" DEBUG_CR,
+ err->namespace, err->tagname);
+ }
+ else {
+ ap_rprintf(r,
+ ">" DEBUG_CR
+ "<D:%s/>" DEBUG_CR, err->tagname);
+ }
+
+ /* here's our mod_dav specific tag: */
+ if (err->desc != NULL) {
+ ap_rprintf(r,
+ "<m:human-readable errcode=\"%d\">" DEBUG_CR
+ "%s" DEBUG_CR
+ "</m:human-readable>" DEBUG_CR,
+ err->error_id,
+ apr_xml_quote_string(r->pool, err->desc, 0));
+ }
+
+ ap_rputs("</D:error>" DEBUG_CR, r);
+
+ /* the response has been sent. */
+ /*
+ * ### Use of DONE obviates logging..!
+ */
+ return DONE;
+}
+
+
+/*
+ * Apache's URI escaping does not replace '&' since that is a valid character
+ * in a URI (to form a query section). We must explicitly handle it so that
+ * we can embed the URI into an XML document.
+ */
+static const char *dav_xml_escape_uri(apr_pool_t *p, const char *uri)
+{
+ const char *e_uri = ap_escape_uri(p, uri);
+
+ /* check the easy case... */
+ if (ap_strchr_c(e_uri, '&') == NULL)
+ return e_uri;
+
+ /* there was a '&', so more work is needed... sigh. */
+
+ /*
+ * Note: this is a teeny bit of overkill since we know there are no
+ * '<' or '>' characters, but who cares.
+ */
+ return apr_xml_quote_string(p, e_uri, 0);
+}
+
+
+/* Write a complete RESPONSE object out as a <DAV:repsonse> xml
+ element. Data is sent into brigade BB, which is auto-flushed into
+ OUTPUT filter stack. Use POOL for any temporary allocations.
+
+ [Presumably the <multistatus> tag has already been written; this
+ routine is shared by dav_send_multistatus and dav_stream_response.]
+*/
+static void dav_send_one_response(dav_response *response,
+ apr_bucket_brigade *bb,
+ ap_filter_t *output,
+ apr_pool_t *pool)
+{
+ apr_text *t = NULL;
+
+ if (response->propresult.xmlns == NULL) {
+ ap_fputs(output, bb, "<D:response>");
+ }
+ else {
+ ap_fputs(output, bb, "<D:response");
+ for (t = response->propresult.xmlns; t; t = t->next) {
+ ap_fputs(output, bb, t->text);
+ }
+ ap_fputc(output, bb, '>');
+ }
+
+ ap_fputstrs(output, bb,
+ DEBUG_CR "<D:href>",
+ dav_xml_escape_uri(pool, response->href),
+ "</D:href>" DEBUG_CR,
+ NULL);
+
+ if (response->propresult.propstats == NULL) {
+ /* use the Status-Line text from Apache. Note, this will
+ * default to 500 Internal Server Error if first->status
+ * is not a known (or valid) status code.
+ */
+ ap_fputstrs(output, bb,
+ "<D:status>HTTP/1.1 ",
+ ap_get_status_line(response->status),
+ "</D:status>" DEBUG_CR,
+ NULL);
+ }
+ else {
+ /* assume this includes <propstat> and is quoted properly */
+ for (t = response->propresult.propstats; t; t = t->next) {
+ ap_fputs(output, bb, t->text);
+ }
+ }
+
+ if (response->desc != NULL) {
+ /*
+ * We supply the description, so we know it doesn't have to
+ * have any escaping/encoding applied to it.
+ */
+ ap_fputstrs(output, bb,
+ "<D:responsedescription>",
+ response->desc,
+ "</D:responsedescription>" DEBUG_CR,
+ NULL);
+ }
+
+ ap_fputs(output, bb, "</D:response>" DEBUG_CR);
+}
+
+
+/* Factorized helper function: prep request_rec R for a multistatus
+ response and write <multistatus> tag into BB, destined for
+ R->output_filters. Use xml NAMESPACES in initial tag, if
+ non-NULL. */
+static void dav_begin_multistatus(apr_bucket_brigade *bb,
+ request_rec *r, int status,
+ apr_array_header_t *namespaces)
+{
+ /* Set the correct status and Content-Type */
+ r->status = status;
+ ap_set_content_type(r, DAV_XML_CONTENT_TYPE);
+
+ /* Send the headers and actual multistatus response now... */
+ ap_fputs(r->output_filters, bb, DAV_XML_HEADER DEBUG_CR
+ "<D:multistatus xmlns:D=\"DAV:\"");
+
+ if (namespaces != NULL) {
+ int i;
+
+ for (i = namespaces->nelts; i--; ) {
+ ap_fprintf(r->output_filters, bb, " xmlns:ns%d=\"%s\"", i,
+ APR_XML_GET_URI_ITEM(namespaces, i));
+ }
+ }
+
+ ap_fputs(r->output_filters, bb, ">" DEBUG_CR);
+}
+
+/* Finish a multistatus response started by dav_begin_multistatus: */
+static apr_status_t dav_finish_multistatus(request_rec *r,
+ apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+
+ ap_fputs(r->output_filters, bb, "</D:multistatus>" DEBUG_CR);
+
+ /* indicate the end of the response body */
+ b = apr_bucket_eos_create(r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ /* deliver whatever might be remaining in the brigade */
+ return ap_pass_brigade(r->output_filters, bb);
+}
+
+static void dav_send_multistatus(request_rec *r, int status,
+ dav_response *first,
+ apr_array_header_t *namespaces)
+{
+ apr_pool_t *subpool;
+ apr_bucket_brigade *bb = apr_brigade_create(r->pool,
+ r->connection->bucket_alloc);
+
+ dav_begin_multistatus(bb, r, status, namespaces);
+
+ apr_pool_create(&subpool, r->pool);
+
+ for (; first != NULL; first = first->next) {
+ apr_pool_clear(subpool);
+ dav_send_one_response(first, bb, r->output_filters, subpool);
+ }
+ apr_pool_destroy(subpool);
+
+ dav_finish_multistatus(r, bb);
+}
+
+/*
+ * dav_log_err()
+ *
+ * Write error information to the log.
+ */
+static void dav_log_err(request_rec *r, dav_error *err, int level)
+{
+ dav_error *errscan;
+
+ /* Log the errors */
+ /* ### should have a directive to log the first or all */
+ for (errscan = err; errscan != NULL; errscan = errscan->prev) {
+ if (errscan->desc == NULL)
+ continue;
+
+ if (errscan->save_errno != 0) {
+ errno = errscan->save_errno;
+ ap_log_rerror(APLOG_MARK, level, errno, r, "%s [%d, #%d]",
+ errscan->desc, errscan->status, errscan->error_id);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, level, 0, r,
+ "%s [%d, #%d]",
+ errscan->desc, errscan->status, errscan->error_id);
+ }
+ }
+}
+
+/*
+ * dav_handle_err()
+ *
+ * Handle the standard error processing. <err> must be non-NULL.
+ *
+ * <response> is set by the following:
+ * - dav_validate_request()
+ * - dav_add_lock()
+ * - repos_hooks->remove_resource
+ * - repos_hooks->move_resource
+ * - repos_hooks->copy_resource
+ * - vsn_hooks->update
+ */
+static int dav_handle_err(request_rec *r, dav_error *err,
+ dav_response *response)
+{
+ /* log the errors */
+ dav_log_err(r, err, APLOG_ERR);
+
+ if (response == NULL) {
+ dav_error *stackerr = err;
+
+ /* our error messages are safe; tell Apache this */
+ apr_table_setn(r->notes, "verbose-error-to", "*");
+
+ /* Didn't get a multistatus response passed in, but we still
+ might be able to generate a standard <D:error> response.
+ Search the error stack for an errortag. */
+ while (stackerr != NULL && stackerr->tagname == NULL)
+ stackerr = stackerr->prev;
+
+ if (stackerr != NULL && stackerr->tagname != NULL)
+ return dav_error_response_tag(r, stackerr);
+
+ return err->status;
+ }
+
+ /* send the multistatus and tell Apache the request/response is DONE. */
+ dav_send_multistatus(r, err->status, response, NULL);
+ return DONE;
+}
+
+/* handy function for return values of methods that (may) create things */
+static int dav_created(request_rec *r, const char *locn, const char *what,
+ int replaced)
+{
+ const char *body;
+
+ if (locn == NULL) {
+ locn = r->uri;
+ }
+
+ /* did the target resource already exist? */
+ if (replaced) {
+ /* Apache will supply a default message */
+ return HTTP_NO_CONTENT;
+ }
+
+ /* Per HTTP/1.1, S10.2.2: add a Location header to contain the
+ * URI that was created. */
+
+ /* Convert locn to an absolute URI, and return in Location header */
+ apr_table_setn(r->headers_out, "Location", ap_construct_url(r->pool, locn, r));
+
+ /* ### insert an ETag header? see HTTP/1.1 S10.2.2 */
+
+ /* Apache doesn't allow us to set a variable body for HTTP_CREATED, so
+ * we must manufacture the entire response. */
+ body = apr_psprintf(r->pool, "%s %s has been created.",
+ what, ap_escape_html(r->pool, locn));
+ return dav_error_response(r, HTTP_CREATED, body);
+}
+
+/* ### move to dav_util? */
+DAV_DECLARE(int) dav_get_depth(request_rec *r, int def_depth)
+{
+ const char *depth = apr_table_get(r->headers_in, "Depth");
+
+ if (depth == NULL) {
+ return def_depth;
+ }
+
+ if (strcasecmp(depth, "infinity") == 0) {
+ return DAV_INFINITY;
+ }
+ else if (strcmp(depth, "0") == 0) {
+ return 0;
+ }
+ else if (strcmp(depth, "1") == 0) {
+ return 1;
+ }
+
+ /* The caller will return an HTTP_BAD_REQUEST. This will augment the
+ * default message that Apache provides. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "An invalid Depth header was specified.");
+ return -1;
+}
+
+static int dav_get_overwrite(request_rec *r)
+{
+ const char *overwrite = apr_table_get(r->headers_in, "Overwrite");
+
+ if (overwrite == NULL) {
+ return 1; /* default is "T" */
+ }
+
+ if ((*overwrite == 'F' || *overwrite == 'f') && overwrite[1] == '\0') {
+ return 0;
+ }
+
+ if ((*overwrite == 'T' || *overwrite == 't') && overwrite[1] == '\0') {
+ return 1;
+ }
+
+ /* The caller will return an HTTP_BAD_REQUEST. This will augment the
+ * default message that Apache provides. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "An invalid Overwrite header was specified.");
+ return -1;
+}
+
+/* resolve a request URI to a resource descriptor.
+ *
+ * If label_allowed != 0, then allow the request target to be altered by
+ * a Label: header.
+ *
+ * If use_checked_in is true, then the repository provider should return
+ * the resource identified by the DAV:checked-in property of the resource
+ * identified by the Request-URI.
+ */
+static dav_error *dav_get_resource(request_rec *r, int label_allowed,
+ int use_checked_in, dav_resource **res_p)
+{
+ dav_dir_conf *conf;
+ const char *label = NULL;
+ dav_error *err;
+
+ /* if the request target can be overridden, get any target selector */
+ if (label_allowed) {
+ label = apr_table_get(r->headers_in, "label");
+ }
+
+ conf = ap_get_module_config(r->per_dir_config, &dav_module);
+ /* assert: conf->provider != NULL */
+
+ /* resolve the resource */
+ err = (*conf->provider->repos->get_resource)(r, conf->dir,
+ label, use_checked_in,
+ res_p);
+ if (err != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ "Could not fetch resource information.", err);
+ return err;
+ }
+
+ /* Note: this shouldn't happen, but just be sure... */
+ if (*res_p == NULL) {
+ /* ### maybe use HTTP_INTERNAL_SERVER_ERROR */
+ return dav_new_error(r->pool, HTTP_NOT_FOUND, 0,
+ apr_psprintf(r->pool,
+ "The provider did not define a "
+ "resource for %s.",
+ ap_escape_html(r->pool, r->uri)));
+ }
+
+ /* ### hmm. this doesn't feel like the right place or thing to do */
+ /* if there were any input headers requiring a Vary header in the response,
+ * add it now */
+ dav_add_vary_header(r, r, *res_p);
+
+ return NULL;
+}
+
+static dav_error * dav_open_lockdb(request_rec *r, int ro, dav_lockdb **lockdb)
+{
+ const dav_hooks_locks *hooks = DAV_GET_HOOKS_LOCKS(r);
+
+ if (hooks == NULL) {
+ *lockdb = NULL;
+ return NULL;
+ }
+
+ /* open the thing lazily */
+ return (*hooks->open_lockdb)(r, ro, 0, lockdb);
+}
+
+static int dav_parse_range(request_rec *r,
+ apr_off_t *range_start, apr_off_t *range_end)
+{
+ const char *range_c;
+ char *range;
+ char *dash;
+ char *slash;
+
+ range_c = apr_table_get(r->headers_in, "content-range");
+ if (range_c == NULL)
+ return 0;
+
+ range = apr_pstrdup(r->pool, range_c);
+ if (strncasecmp(range, "bytes ", 6) != 0
+ || (dash = ap_strchr(range, '-')) == NULL
+ || (slash = ap_strchr(range, '/')) == NULL) {
+ /* malformed header. ignore it (per S14.16 of RFC2616) */
+ return 0;
+ }
+
+ *dash = *slash = '\0';
+
+ *range_start = apr_atoi64(range + 6);
+ *range_end = apr_atoi64(dash + 1);
+
+ if (*range_end < *range_start
+ || (slash[1] != '*' && apr_atoi64(slash + 1) <= *range_end)) {
+ /* invalid range. ignore it (per S14.16 of RFC2616) */
+ return 0;
+ }
+
+ /* we now have a valid range */
+ return 1;
+}
+
+/* handle the GET method */
+static int dav_method_get(request_rec *r)
+{
+ dav_resource *resource;
+ dav_error *err;
+
+ /* This method should only be called when the resource is not
+ * visible to Apache. We will fetch the resource from the repository,
+ * then create a subrequest for Apache to handle.
+ */
+ err = dav_get_resource(r, 1 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ /* set up the HTTP headers for the response */
+ if ((err = (*resource->hooks->set_headers)(r, resource)) != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ "Unable to set up HTTP headers.",
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ if (r->header_only) {
+ return DONE;
+ }
+
+ /* okay... time to deliver the content */
+ if ((err = (*resource->hooks->deliver)(resource,
+ r->output_filters)) != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ "Unable to deliver content.",
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ return DONE;
+}
+
+/* validate resource/locks on POST, then pass to the default handler */
+static int dav_method_post(request_rec *r)
+{
+ dav_resource *resource;
+ dav_error *err;
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ /* Note: depth == 0. Implies no need for a multistatus response. */
+ if ((err = dav_validate_request(r, resource, 0, NULL, NULL,
+ DAV_VALIDATE_RESOURCE, NULL)) != NULL) {
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, NULL);
+ }
+
+ return DECLINED;
+}
+
+/* handle the PUT method */
+static int dav_method_put(request_rec *r)
+{
+ dav_resource *resource;
+ int resource_state;
+ dav_auto_version_info av_info;
+ const dav_hooks_locks *locks_hooks = DAV_GET_HOOKS_LOCKS(r);
+ const char *body;
+ dav_error *err;
+ dav_error *err2;
+ dav_stream_mode mode;
+ dav_stream *stream;
+ dav_response *multi_response;
+ int has_range;
+ apr_off_t range_start;
+ apr_off_t range_end;
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ /* If not a file or collection resource, PUT not allowed */
+ if (resource->type != DAV_RESOURCE_TYPE_REGULAR
+ && resource->type != DAV_RESOURCE_TYPE_WORKING) {
+ body = apr_psprintf(r->pool,
+ "Cannot create resource %s with PUT.",
+ ap_escape_html(r->pool, r->uri));
+ return dav_error_response(r, HTTP_CONFLICT, body);
+ }
+
+ /* Cannot PUT a collection */
+ if (resource->collection) {
+ return dav_error_response(r, HTTP_CONFLICT,
+ "Cannot PUT to a collection.");
+
+ }
+
+ resource_state = dav_get_resource_state(r, resource);
+
+ /*
+ * Note: depth == 0 normally requires no multistatus response. However,
+ * if we pass DAV_VALIDATE_PARENT, then we could get an error on a URI
+ * other than the Request-URI, thereby requiring a multistatus.
+ *
+ * If the resource does not exist (DAV_RESOURCE_NULL), then we must
+ * check the resource *and* its parent. If the resource exists or is
+ * a locknull resource, then we check only the resource.
+ */
+ if ((err = dav_validate_request(r, resource, 0, NULL, &multi_response,
+ resource_state == DAV_RESOURCE_NULL ?
+ DAV_VALIDATE_PARENT :
+ DAV_VALIDATE_RESOURCE, NULL)) != NULL) {
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, multi_response);
+ }
+
+ /* make sure the resource can be modified (if versioning repository) */
+ if ((err = dav_auto_checkout(r, resource,
+ 0 /* not parent_only */,
+ &av_info)) != NULL) {
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* truncate and rewrite the file unless we see a Content-Range */
+ mode = DAV_MODE_WRITE_TRUNC;
+
+ has_range = dav_parse_range(r, &range_start, &range_end);
+ if (has_range) {
+ mode = DAV_MODE_WRITE_SEEKABLE;
+ }
+
+ /* Create the new file in the repository */
+ if ((err = (*resource->hooks->open_stream)(resource, mode,
+ &stream)) != NULL) {
+ /* ### assuming FORBIDDEN is probably not quite right... */
+ err = dav_push_error(r->pool, HTTP_FORBIDDEN, 0,
+ apr_psprintf(r->pool,
+ "Unable to PUT new contents for %s.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ }
+
+ if (err == NULL && has_range) {
+ /* a range was provided. seek to the start */
+ err = (*resource->hooks->seek_stream)(stream, range_start);
+ }
+
+ if (err == NULL) {
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ int seen_eos = 0;
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+
+ do {
+ apr_status_t rc;
+
+ rc = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, DAV_READ_BLOCKSIZE);
+
+ if (rc != APR_SUCCESS) {
+ err = dav_new_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not get next bucket brigade");
+ break;
+ }
+
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ const char *data;
+ apr_size_t len;
+
+ if (APR_BUCKET_IS_EOS(b)) {
+ seen_eos = 1;
+ break;
+ }
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ continue;
+ }
+
+ rc = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (rc != APR_SUCCESS) {
+ err = dav_new_error(r->pool, HTTP_BAD_REQUEST, 0,
+ "An error occurred while reading "
+ "the request body.");
+ break;
+ }
+
+ if (err == NULL) {
+ /* write whatever we read, until we see an error */
+ err = (*resource->hooks->write_stream)(stream, data, len);
+ }
+ }
+
+ apr_brigade_cleanup(bb);
+ } while (!seen_eos);
+
+ apr_brigade_destroy(bb);
+
+ err2 = (*resource->hooks->close_stream)(stream,
+ err == NULL /* commit */);
+ if (err2 != NULL && err == NULL) {
+ /* no error during the write, but we hit one at close. use it. */
+ err = err2;
+ }
+ }
+
+ /*
+ * Ensure that we think the resource exists now.
+ * ### eek. if an error occurred during the write and we did not commit,
+ * ### then the resource might NOT exist (e.g. dav_fs_repos.c)
+ */
+ if (err == NULL) {
+ resource->exists = 1;
+ }
+
+ /* restore modifiability of resources back to what they were */
+ err2 = dav_auto_checkin(r, resource, err != NULL /* undo if error */,
+ 0 /*unlock*/, &av_info);
+
+ /* check for errors now */
+ if (err != NULL) {
+ return dav_handle_err(r, err, NULL);
+ }
+
+ if (err2 != NULL) {
+ /* just log a warning */
+ err2 = dav_push_error(r->pool, err2->status, 0,
+ "The PUT was successful, but there "
+ "was a problem automatically checking in "
+ "the resource or its parent collection.",
+ err2);
+ dav_log_err(r, err2, APLOG_WARNING);
+ }
+
+ /* ### place the Content-Type and Content-Language into the propdb */
+
+ if (locks_hooks != NULL) {
+ dav_lockdb *lockdb;
+
+ if ((err = (*locks_hooks->open_lockdb)(r, 0, 0, &lockdb)) != NULL) {
+ /* The file creation was successful, but the locking failed. */
+ err = dav_push_error(r->pool, err->status, 0,
+ "The file was PUT successfully, but there "
+ "was a problem opening the lock database "
+ "which prevents inheriting locks from the "
+ "parent resources.",
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* notify lock system that we have created/replaced a resource */
+ err = dav_notify_created(r, lockdb, resource, resource_state, 0);
+
+ (*locks_hooks->close_lockdb)(lockdb);
+
+ if (err != NULL) {
+ /* The file creation was successful, but the locking failed. */
+ err = dav_push_error(r->pool, err->status, 0,
+ "The file was PUT successfully, but there "
+ "was a problem updating its lock "
+ "information.",
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+ }
+
+ /* NOTE: WebDAV spec, S8.7.1 states properties should be unaffected */
+
+ /* return an appropriate response (HTTP_CREATED or HTTP_NO_CONTENT) */
+ return dav_created(r, NULL, "Resource", resource_state == DAV_RESOURCE_EXISTS);
+}
+
+
+/* Use POOL to temporarily construct a dav_response object (from WRES
+ STATUS, and PROPSTATS) and stream it via WRES's ctx->brigade. */
+static void dav_stream_response(dav_walk_resource *wres,
+ int status,
+ dav_get_props_result *propstats,
+ apr_pool_t *pool)
+{
+ dav_response resp = { 0 };
+ dav_walker_ctx *ctx = wres->walk_ctx;
+
+ resp.href = wres->resource->uri;
+ resp.status = status;
+ if (propstats) {
+ resp.propresult = *propstats;
+ }
+
+ dav_send_one_response(&resp, ctx->bb, ctx->r->output_filters, pool);
+}
+
+
+/* ### move this to dav_util? */
+DAV_DECLARE(void) dav_add_response(dav_walk_resource *wres,
+ int status, dav_get_props_result *propstats)
+{
+ dav_response *resp;
+
+ /* just drop some data into an dav_response */
+ resp = apr_pcalloc(wres->pool, sizeof(*resp));
+ resp->href = apr_pstrdup(wres->pool, wres->resource->uri);
+ resp->status = status;
+ if (propstats) {
+ resp->propresult = *propstats;
+ }
+
+ resp->next = wres->response;
+ wres->response = resp;
+}
+
+
+/* handle the DELETE method */
+static int dav_method_delete(request_rec *r)
+{
+ dav_resource *resource;
+ dav_auto_version_info av_info;
+ dav_error *err;
+ dav_error *err2;
+ dav_response *multi_response;
+ int result;
+ int depth;
+
+ /* We don't use the request body right now, so torch it. */
+ if ((result = ap_discard_request_body(r)) != OK) {
+ return result;
+ }
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ /* 2518 says that depth must be infinity only for collections.
+ * For non-collections, depth is ignored, unless it is an illegal value (1).
+ */
+ depth = dav_get_depth(r, DAV_INFINITY);
+
+ if (resource->collection && depth != DAV_INFINITY) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Depth must be \"infinity\" for DELETE of a collection.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ if (!resource->collection && depth == 1) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Depth of \"1\" is not allowed for DELETE.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /*
+ ** If any resources fail the lock/If: conditions, then we must fail
+ ** the delete. Each of the failing resources will be listed within
+ ** a DAV:multistatus body, wrapped into a 424 response.
+ **
+ ** Note that a failure on the resource itself does not generate a
+ ** multistatus response -- only internal members/collections.
+ */
+ if ((err = dav_validate_request(r, resource, depth, NULL,
+ &multi_response,
+ DAV_VALIDATE_PARENT
+ | DAV_VALIDATE_USE_424, NULL)) != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "Could not DELETE %s due to a failed "
+ "precondition (e.g. locks).",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, multi_response);
+ }
+
+ /* ### RFC 2518 s. 8.10.5 says to remove _all_ locks, not just those
+ * locked by the token(s) in the if_header.
+ */
+ if ((result = dav_unlock(r, resource, NULL)) != OK) {
+ return result;
+ }
+
+ /* if versioned resource, make sure parent is checked out */
+ if ((err = dav_auto_checkout(r, resource, 1 /* parent_only */,
+ &av_info)) != NULL) {
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* try to remove the resource */
+ err = (*resource->hooks->remove_resource)(resource, &multi_response);
+
+ /* restore writability of parent back to what it was */
+ err2 = dav_auto_checkin(r, NULL, err != NULL /* undo if error */,
+ 0 /*unlock*/, &av_info);
+
+ /* check for errors now */
+ if (err != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "Could not DELETE %s.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, multi_response);
+ }
+ if (err2 != NULL) {
+ /* just log a warning */
+ err = dav_push_error(r->pool, err2->status, 0,
+ "The DELETE was successful, but there "
+ "was a problem automatically checking in "
+ "the parent collection.",
+ err2);
+ dav_log_err(r, err, APLOG_WARNING);
+ }
+
+ /* ### HTTP_NO_CONTENT if no body, HTTP_OK if there is a body (some day) */
+
+ /* Apache will supply a default error for this. */
+ return HTTP_NO_CONTENT;
+}
+
+/* generate DAV:supported-method-set OPTIONS response */
+static dav_error *dav_gen_supported_methods(request_rec *r,
+ const apr_xml_elem *elem,
+ const apr_table_t *methods,
+ apr_text_header *body)
+{
+ const apr_array_header_t *arr;
+ const apr_table_entry_t *elts;
+ apr_xml_elem *child;
+ apr_xml_attr *attr;
+ char *s;
+ int i;
+
+ apr_text_append(r->pool, body, "<D:supported-method-set>" DEBUG_CR);
+
+ if (elem->first_child == NULL) {
+ /* show all supported methods */
+ arr = apr_table_elts(methods);
+ elts = (const apr_table_entry_t *)arr->elts;
+
+ for (i = 0; i < arr->nelts; ++i) {
+ if (elts[i].key == NULL)
+ continue;
+
+ s = apr_psprintf(r->pool,
+ "<D:supported-method D:name=\"%s\"/>"
+ DEBUG_CR,
+ elts[i].key);
+ apr_text_append(r->pool, body, s);
+ }
+ }
+ else {
+ /* check for support of specific methods */
+ for (child = elem->first_child; child != NULL; child = child->next) {
+ if (child->ns == APR_XML_NS_DAV_ID
+ && strcmp(child->name, "supported-method") == 0) {
+ const char *name = NULL;
+
+ /* go through attributes to find method name */
+ for (attr = child->attr; attr != NULL; attr = attr->next) {
+ if (attr->ns == APR_XML_NS_DAV_ID
+ && strcmp(attr->name, "name") == 0)
+ name = attr->value;
+ }
+
+ if (name == NULL) {
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST, 0,
+ "A DAV:supported-method element "
+ "does not have a \"name\" attribute");
+ }
+
+ /* see if method is supported */
+ if (apr_table_get(methods, name) != NULL) {
+ s = apr_psprintf(r->pool,
+ "<D:supported-method D:name=\"%s\"/>"
+ DEBUG_CR,
+ name);
+ apr_text_append(r->pool, body, s);
+ }
+ }
+ }
+ }
+
+ apr_text_append(r->pool, body, "</D:supported-method-set>" DEBUG_CR);
+ return NULL;
+}
+
+/* generate DAV:supported-live-property-set OPTIONS response */
+static dav_error *dav_gen_supported_live_props(request_rec *r,
+ const dav_resource *resource,
+ const apr_xml_elem *elem,
+ apr_text_header *body)
+{
+ dav_lockdb *lockdb;
+ dav_propdb *propdb;
+ apr_xml_elem *child;
+ apr_xml_attr *attr;
+ dav_error *err;
+
+ /* open lock database, to report on supported lock properties */
+ /* ### should open read-only */
+ if ((err = dav_open_lockdb(r, 0, &lockdb)) != NULL) {
+ return dav_push_error(r->pool, err->status, 0,
+ "The lock database could not be opened, "
+ "preventing the reporting of supported lock "
+ "properties.",
+ err);
+ }
+
+ /* open the property database (readonly) for the resource */
+ if ((err = dav_open_propdb(r, lockdb, resource, 1, NULL,
+ &propdb)) != NULL) {
+ if (lockdb != NULL)
+ (*lockdb->hooks->close_lockdb)(lockdb);
+
+ return dav_push_error(r->pool, err->status, 0,
+ "The property database could not be opened, "
+ "preventing report of supported properties.",
+ err);
+ }
+
+ apr_text_append(r->pool, body, "<D:supported-live-property-set>" DEBUG_CR);
+
+ if (elem->first_child == NULL) {
+ /* show all supported live properties */
+ dav_get_props_result props = dav_get_allprops(propdb, DAV_PROP_INSERT_SUPPORTED);
+ body->last->next = props.propstats;
+ while (body->last->next != NULL)
+ body->last = body->last->next;
+ }
+ else {
+ /* check for support of specific live property */
+ for (child = elem->first_child; child != NULL; child = child->next) {
+ if (child->ns == APR_XML_NS_DAV_ID
+ && strcmp(child->name, "supported-live-property") == 0) {
+ const char *name = NULL;
+ const char *nmspace = NULL;
+
+ /* go through attributes to find name and namespace */
+ for (attr = child->attr; attr != NULL; attr = attr->next) {
+ if (attr->ns == APR_XML_NS_DAV_ID) {
+ if (strcmp(attr->name, "name") == 0)
+ name = attr->value;
+ else if (strcmp(attr->name, "namespace") == 0)
+ nmspace = attr->value;
+ }
+ }
+
+ if (name == NULL) {
+ err = dav_new_error(r->pool, HTTP_BAD_REQUEST, 0,
+ "A DAV:supported-live-property "
+ "element does not have a \"name\" "
+ "attribute");
+ break;
+ }
+
+ /* default namespace to DAV: */
+ if (nmspace == NULL)
+ nmspace = "DAV:";
+
+ /* check for support of property */
+ dav_get_liveprop_supported(propdb, nmspace, name, body);
+ }
+ }
+ }
+
+ apr_text_append(r->pool, body, "</D:supported-live-property-set>" DEBUG_CR);
+
+ dav_close_propdb(propdb);
+
+ if (lockdb != NULL)
+ (*lockdb->hooks->close_lockdb)(lockdb);
+
+ return err;
+}
+
+/* generate DAV:supported-report-set OPTIONS response */
+static dav_error *dav_gen_supported_reports(request_rec *r,
+ const dav_resource *resource,
+ const apr_xml_elem *elem,
+ const dav_hooks_vsn *vsn_hooks,
+ apr_text_header *body)
+{
+ apr_xml_elem *child;
+ apr_xml_attr *attr;
+ dav_error *err;
+ char *s;
+
+ apr_text_append(r->pool, body, "<D:supported-report-set>" DEBUG_CR);
+
+ if (vsn_hooks != NULL) {
+ const dav_report_elem *reports;
+ const dav_report_elem *rp;
+
+ if ((err = (*vsn_hooks->avail_reports)(resource, &reports)) != NULL) {
+ return dav_push_error(r->pool, err->status, 0,
+ "DAV:supported-report-set could not be "
+ "determined due to a problem fetching the "
+ "available reports for this resource.",
+ err);
+ }
+
+ if (reports != NULL) {
+ if (elem->first_child == NULL) {
+ /* show all supported reports */
+ for (rp = reports; rp->nmspace != NULL; ++rp) {
+ /* Note: we presume reports->namespace is
+ * properly XML/URL quoted */
+ s = apr_psprintf(r->pool,
+ "<D:supported-report D:name=\"%s\" "
+ "D:namespace=\"%s\"/>" DEBUG_CR,
+ rp->name, rp->nmspace);
+ apr_text_append(r->pool, body, s);
+ }
+ }
+ else {
+ /* check for support of specific report */
+ for (child = elem->first_child; child != NULL; child = child->next) {
+ if (child->ns == APR_XML_NS_DAV_ID
+ && strcmp(child->name, "supported-report") == 0) {
+ const char *name = NULL;
+ const char *nmspace = NULL;
+
+ /* go through attributes to find name and namespace */
+ for (attr = child->attr; attr != NULL; attr = attr->next) {
+ if (attr->ns == APR_XML_NS_DAV_ID) {
+ if (strcmp(attr->name, "name") == 0)
+ name = attr->value;
+ else if (strcmp(attr->name, "namespace") == 0)
+ nmspace = attr->value;
+ }
+ }
+
+ if (name == NULL) {
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST, 0,
+ "A DAV:supported-report element "
+ "does not have a \"name\" attribute");
+ }
+
+ /* default namespace to DAV: */
+ if (nmspace == NULL)
+ nmspace = "DAV:";
+
+ for (rp = reports; rp->nmspace != NULL; ++rp) {
+ if (strcmp(name, rp->name) == 0
+ && strcmp(nmspace, rp->nmspace) == 0) {
+ /* Note: we presume reports->nmspace is
+ * properly XML/URL quoted
+ */
+ s = apr_psprintf(r->pool,
+ "<D:supported-report "
+ "D:name=\"%s\" "
+ "D:namespace=\"%s\"/>"
+ DEBUG_CR,
+ rp->name, rp->nmspace);
+ apr_text_append(r->pool, body, s);
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ apr_text_append(r->pool, body, "</D:supported-report-set>" DEBUG_CR);
+ return NULL;
+}
+
+
+/* handle the SEARCH method */
+static int dav_method_search(request_rec *r)
+{
+ const dav_hooks_search *search_hooks = DAV_GET_HOOKS_SEARCH(r);
+ dav_resource *resource;
+ dav_error *err;
+ dav_response *multi_status;
+
+ /* If no search provider, decline the request */
+ if (search_hooks == NULL)
+ return DECLINED;
+
+ /* This method should only be called when the resource is not
+ * visible to Apache. We will fetch the resource from the repository,
+ * then create a subrequest for Apache to handle.
+ */
+ err = dav_get_resource(r, 1 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ /* set up the HTTP headers for the response */
+ if ((err = (*resource->hooks->set_headers)(r, resource)) != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ "Unable to set up HTTP headers.",
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ if (r->header_only) {
+ return DONE;
+ }
+
+ /* okay... time to search the content */
+ /* Let's validate XML and process walk function
+ * in the hook function
+ */
+ if ((err = (*search_hooks->search_resource)(r, &multi_status)) != NULL) {
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* We have results in multi_status */
+ /* Should I pass namespace?? */
+ dav_send_multistatus(r, HTTP_MULTI_STATUS, multi_status, NULL);
+
+ return DONE;
+}
+
+
+/* handle the OPTIONS method */
+static int dav_method_options(request_rec *r)
+{
+ const dav_hooks_locks *locks_hooks = DAV_GET_HOOKS_LOCKS(r);
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ const dav_hooks_binding *binding_hooks = DAV_GET_HOOKS_BINDING(r);
+ const dav_hooks_search *search_hooks = DAV_GET_HOOKS_SEARCH(r);
+ dav_resource *resource;
+ const char *dav_level;
+ char *allow;
+ char *s;
+ const apr_array_header_t *arr;
+ const apr_table_entry_t *elts;
+ apr_table_t *methods = apr_table_make(r->pool, 12);
+ apr_text_header vsn_options = { 0 };
+ apr_text_header body = { 0 };
+ apr_text *t;
+ int text_size;
+ int result;
+ int i;
+ apr_array_header_t *uri_ary;
+ apr_xml_doc *doc;
+ const apr_xml_elem *elem;
+ dav_error *err;
+
+ /* resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ /* parse any request body */
+ if ((result = ap_xml_parse_input(r, &doc)) != OK) {
+ return result;
+ }
+ /* note: doc == NULL if no request body */
+
+ if (doc && !dav_validate_root(doc, "options")) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The \"options\" element was not found.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* determine which providers are available */
+ dav_level = "1";
+
+ if (locks_hooks != NULL) {
+ dav_level = "1,2";
+ }
+
+ if (binding_hooks != NULL)
+ dav_level = apr_pstrcat(r->pool, dav_level, ",bindings", NULL);
+
+ /* ###
+ * MSFT Web Folders chokes if length of DAV header value > 63 characters!
+ * To workaround that, we use separate DAV headers for versioning and
+ * live prop provider namespace URIs.
+ * ###
+ */
+ apr_table_setn(r->headers_out, "DAV", dav_level);
+
+ /*
+ * If there is a versioning provider, generate DAV headers
+ * for versioning options.
+ */
+ if (vsn_hooks != NULL) {
+ (*vsn_hooks->get_vsn_options)(r->pool, &vsn_options);
+
+ for (t = vsn_options.first; t != NULL; t = t->next)
+ apr_table_addn(r->headers_out, "DAV", t->text);
+ }
+
+ /*
+ * Gather property set URIs from all the liveprop providers,
+ * and generate a separate DAV header for each URI, to avoid
+ * problems with long header lengths.
+ */
+ uri_ary = apr_array_make(r->pool, 5, sizeof(const char *));
+ dav_run_gather_propsets(uri_ary);
+ for (i = 0; i < uri_ary->nelts; ++i) {
+ if (((char **)uri_ary->elts)[i] != NULL)
+ apr_table_addn(r->headers_out, "DAV", ((char **)uri_ary->elts)[i]);
+ }
+
+ /* this tells MSFT products to skip looking for FrontPage extensions */
+ apr_table_setn(r->headers_out, "MS-Author-Via", "DAV");
+
+ /*
+ * Determine which methods are allowed on the resource.
+ * Three cases: resource is null (3), is lock-null (7.4), or exists.
+ *
+ * All cases support OPTIONS, and if there is a lock provider, LOCK.
+ * (Lock-) null resources also support MKCOL and PUT.
+ * Lock-null supports PROPFIND and UNLOCK.
+ * Existing resources support lots of stuff.
+ */
+
+ apr_table_addn(methods, "OPTIONS", "");
+
+ /* ### take into account resource type */
+ switch (dav_get_resource_state(r, resource))
+ {
+ case DAV_RESOURCE_EXISTS:
+ /* resource exists */
+ apr_table_addn(methods, "GET", "");
+ apr_table_addn(methods, "HEAD", "");
+ apr_table_addn(methods, "POST", "");
+ apr_table_addn(methods, "DELETE", "");
+ apr_table_addn(methods, "TRACE", "");
+ apr_table_addn(methods, "PROPFIND", "");
+ apr_table_addn(methods, "PROPPATCH", "");
+ apr_table_addn(methods, "COPY", "");
+ apr_table_addn(methods, "MOVE", "");
+
+ if (!resource->collection)
+ apr_table_addn(methods, "PUT", "");
+
+ if (locks_hooks != NULL) {
+ apr_table_addn(methods, "LOCK", "");
+ apr_table_addn(methods, "UNLOCK", "");
+ }
+
+ break;
+
+ case DAV_RESOURCE_LOCK_NULL:
+ /* resource is lock-null. */
+ apr_table_addn(methods, "MKCOL", "");
+ apr_table_addn(methods, "PROPFIND", "");
+ apr_table_addn(methods, "PUT", "");
+
+ if (locks_hooks != NULL) {
+ apr_table_addn(methods, "LOCK", "");
+ apr_table_addn(methods, "UNLOCK", "");
+ }
+
+ break;
+
+ case DAV_RESOURCE_NULL:
+ /* resource is null. */
+ apr_table_addn(methods, "MKCOL", "");
+ apr_table_addn(methods, "PUT", "");
+
+ if (locks_hooks != NULL)
+ apr_table_addn(methods, "LOCK", "");
+
+ break;
+
+ default:
+ /* ### internal error! */
+ break;
+ }
+
+ /* If there is a versioning provider, add versioning methods */
+ if (vsn_hooks != NULL) {
+ if (!resource->exists) {
+ if ((*vsn_hooks->versionable)(resource))
+ apr_table_addn(methods, "VERSION-CONTROL", "");
+
+ if (vsn_hooks->can_be_workspace != NULL
+ && (*vsn_hooks->can_be_workspace)(resource))
+ apr_table_addn(methods, "MKWORKSPACE", "");
+
+ if (vsn_hooks->can_be_activity != NULL
+ && (*vsn_hooks->can_be_activity)(resource))
+ apr_table_addn(methods, "MKACTIVITY", "");
+ }
+ else if (!resource->versioned) {
+ if ((*vsn_hooks->versionable)(resource))
+ apr_table_addn(methods, "VERSION-CONTROL", "");
+ }
+ else if (resource->working) {
+ apr_table_addn(methods, "CHECKIN", "");
+
+ /* ### we might not support this DeltaV option */
+ apr_table_addn(methods, "UNCHECKOUT", "");
+ }
+ else if (vsn_hooks->add_label != NULL) {
+ apr_table_addn(methods, "CHECKOUT", "");
+ apr_table_addn(methods, "LABEL", "");
+ }
+ else {
+ apr_table_addn(methods, "CHECKOUT", "");
+ }
+ }
+
+ /* If there is a bindings provider, see if resource is bindable */
+ if (binding_hooks != NULL
+ && (*binding_hooks->is_bindable)(resource)) {
+ apr_table_addn(methods, "BIND", "");
+ }
+
+ /* If there is a search provider, set SEARCH in option */
+ if (search_hooks != NULL) {
+ apr_table_addn(methods, "SEARCH", "");
+ }
+
+ /* Generate the Allow header */
+ arr = apr_table_elts(methods);
+ elts = (const apr_table_entry_t *)arr->elts;
+ text_size = 0;
+
+ /* first, compute total length */
+ for (i = 0; i < arr->nelts; ++i) {
+ if (elts[i].key == NULL)
+ continue;
+
+ /* add 1 for comma or null */
+ text_size += strlen(elts[i].key) + 1;
+ }
+
+ s = allow = apr_palloc(r->pool, text_size);
+
+ for (i = 0; i < arr->nelts; ++i) {
+ if (elts[i].key == NULL)
+ continue;
+
+ if (s != allow)
+ *s++ = ',';
+
+ strcpy(s, elts[i].key);
+ s += strlen(s);
+ }
+
+ apr_table_setn(r->headers_out, "Allow", allow);
+
+
+ /* If there is search set_option_head function, set head */
+ /* DASL: <DAV:basicsearch>
+ * DASL: <http://foo.bar.com/syntax1>
+ * DASL: <http://akuma.com/syntax2>
+ */
+ if (search_hooks != NULL
+ && *search_hooks->set_option_head != NULL) {
+ if ((err = (*search_hooks->set_option_head)(r)) != NULL) {
+ return dav_handle_err(r, err, NULL);
+ }
+ }
+
+ /* if there was no request body, then there is no response body */
+ if (doc == NULL) {
+ ap_set_content_length(r, 0);
+
+ /* ### this sends a Content-Type. the default OPTIONS does not. */
+
+ /* ### the default (ap_send_http_options) returns OK, but I believe
+ * ### that is because it is the default handler and nothing else
+ * ### will run after the thing. */
+ return DONE;
+ }
+
+ /* handle each options request */
+ for (elem = doc->root->first_child; elem != NULL; elem = elem->next) {
+ /* check for something we recognize first */
+ int core_option = 0;
+ dav_error *err = NULL;
+
+ if (elem->ns == APR_XML_NS_DAV_ID) {
+ if (strcmp(elem->name, "supported-method-set") == 0) {
+ err = dav_gen_supported_methods(r, elem, methods, &body);
+ core_option = 1;
+ }
+ else if (strcmp(elem->name, "supported-live-property-set") == 0) {
+ err = dav_gen_supported_live_props(r, resource, elem, &body);
+ core_option = 1;
+ }
+ else if (strcmp(elem->name, "supported-report-set") == 0) {
+ err = dav_gen_supported_reports(r, resource, elem, vsn_hooks, &body);
+ core_option = 1;
+ }
+ }
+
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ /* if unrecognized option, pass to versioning provider */
+ if (!core_option && vsn_hooks != NULL) {
+ if ((err = (*vsn_hooks->get_option)(resource, elem, &body))
+ != NULL) {
+ return dav_handle_err(r, err, NULL);
+ }
+ }
+ }
+
+ /* send the options response */
+ r->status = HTTP_OK;
+ ap_set_content_type(r, DAV_XML_CONTENT_TYPE);
+
+ /* send the headers and response body */
+ ap_rputs(DAV_XML_HEADER DEBUG_CR
+ "<D:options-response xmlns:D=\"DAV:\">" DEBUG_CR, r);
+
+ for (t = body.first; t != NULL; t = t->next)
+ ap_rputs(t->text, r);
+
+ ap_rputs("</D:options-response>" DEBUG_CR, r);
+
+ /* we've sent everything necessary to the client. */
+ return DONE;
+}
+
+static void dav_cache_badprops(dav_walker_ctx *ctx)
+{
+ const apr_xml_elem *elem;
+ apr_text_header hdr = { 0 };
+
+ /* just return if we built the thing already */
+ if (ctx->propstat_404 != NULL) {
+ return;
+ }
+
+ apr_text_append(ctx->w.pool, &hdr,
+ "<D:propstat>" DEBUG_CR
+ "<D:prop>" DEBUG_CR);
+
+ elem = dav_find_child(ctx->doc->root, "prop");
+ for (elem = elem->first_child; elem; elem = elem->next) {
+ apr_text_append(ctx->w.pool, &hdr,
+ apr_xml_empty_elem(ctx->w.pool, elem));
+ }
+
+ apr_text_append(ctx->w.pool, &hdr,
+ "</D:prop>" DEBUG_CR
+ "<D:status>HTTP/1.1 404 Not Found</D:status>" DEBUG_CR
+ "</D:propstat>" DEBUG_CR);
+
+ ctx->propstat_404 = hdr.first;
+}
+
+static dav_error * dav_propfind_walker(dav_walk_resource *wres, int calltype)
+{
+ dav_walker_ctx *ctx = wres->walk_ctx;
+ dav_error *err;
+ dav_propdb *propdb;
+ dav_get_props_result propstats = { 0 };
+
+ /*
+ ** Note: ctx->doc can only be NULL for DAV_PROPFIND_IS_ALLPROP. Since
+ ** dav_get_allprops() does not need to do namespace translation,
+ ** we're okay.
+ **
+ ** Note: we cast to lose the "const". The propdb won't try to change
+ ** the resource, however, since we are opening readonly.
+ */
+ err = dav_open_propdb(ctx->r, ctx->w.lockdb, wres->resource, 1,
+ ctx->doc ? ctx->doc->namespaces : NULL, &propdb);
+ if (err != NULL) {
+ /* ### do something with err! */
+
+ if (ctx->propfind_type == DAV_PROPFIND_IS_PROP) {
+ dav_get_props_result badprops = { 0 };
+
+ /* some props were expected on this collection/resource */
+ dav_cache_badprops(ctx);
+ badprops.propstats = ctx->propstat_404;
+ dav_stream_response(wres, 0, &badprops, ctx->scratchpool);
+ }
+ else {
+ /* no props on this collection/resource */
+ dav_stream_response(wres, HTTP_OK, NULL, ctx->scratchpool);
+ }
+
+ apr_pool_clear(ctx->scratchpool);
+ return NULL;
+ }
+ /* ### what to do about closing the propdb on server failure? */
+
+ if (ctx->propfind_type == DAV_PROPFIND_IS_PROP) {
+ propstats = dav_get_props(propdb, ctx->doc);
+ }
+ else {
+ dav_prop_insert what = ctx->propfind_type == DAV_PROPFIND_IS_ALLPROP
+ ? DAV_PROP_INSERT_VALUE
+ : DAV_PROP_INSERT_NAME;
+ propstats = dav_get_allprops(propdb, what);
+ }
+ dav_close_propdb(propdb);
+
+ dav_stream_response(wres, 0, &propstats, ctx->scratchpool);
+
+ /* at this point, ctx->scratchpool has been used to stream a
+ single response. this function fully controls the pool, and
+ thus has the right to clear it for the next iteration of this
+ callback. */
+ apr_pool_clear(ctx->scratchpool);
+
+ return NULL;
+}
+
+/* handle the PROPFIND method */
+static int dav_method_propfind(request_rec *r)
+{
+ dav_resource *resource;
+ int depth;
+ dav_error *err;
+ int result;
+ apr_xml_doc *doc;
+ const apr_xml_elem *child;
+ dav_walker_ctx ctx = { { 0 } };
+ dav_response *multi_status;
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 1 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ if (dav_get_resource_state(r, resource) == DAV_RESOURCE_NULL) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ if ((depth = dav_get_depth(r, DAV_INFINITY)) < 0) {
+ /* dav_get_depth() supplies additional information for the
+ * default message. */
+ return HTTP_BAD_REQUEST;
+ }
+
+ if (depth == DAV_INFINITY && resource->collection) {
+ dav_dir_conf *conf;
+ conf = (dav_dir_conf *)ap_get_module_config(r->per_dir_config,
+ &dav_module);
+ /* default is to DISALLOW these requests */
+ if (conf->allow_depthinfinity != DAV_ENABLED_ON) {
+ return dav_error_response(r, HTTP_FORBIDDEN,
+ apr_psprintf(r->pool,
+ "PROPFIND requests with a "
+ "Depth of \"infinity\" are "
+ "not allowed for %s.",
+ ap_escape_html(r->pool,
+ r->uri)));
+ }
+ }
+
+ if ((result = ap_xml_parse_input(r, &doc)) != OK) {
+ return result;
+ }
+ /* note: doc == NULL if no request body */
+
+ if (doc && !dav_validate_root(doc, "propfind")) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The \"propfind\" element was not found.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* ### validate that only one of these three elements is present */
+
+ if (doc == NULL
+ || (child = dav_find_child(doc->root, "allprop")) != NULL) {
+ /* note: no request body implies allprop */
+ ctx.propfind_type = DAV_PROPFIND_IS_ALLPROP;
+ }
+ else if ((child = dav_find_child(doc->root, "propname")) != NULL) {
+ ctx.propfind_type = DAV_PROPFIND_IS_PROPNAME;
+ }
+ else if ((child = dav_find_child(doc->root, "prop")) != NULL) {
+ ctx.propfind_type = DAV_PROPFIND_IS_PROP;
+ }
+ else {
+ /* "propfind" element must have one of the above three children */
+
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The \"propfind\" element does not contain one of "
+ "the required child elements (the specific command).");
+ return HTTP_BAD_REQUEST;
+ }
+
+ ctx.w.walk_type = DAV_WALKTYPE_NORMAL | DAV_WALKTYPE_AUTH;
+ ctx.w.func = dav_propfind_walker;
+ ctx.w.walk_ctx = &ctx;
+ ctx.w.pool = r->pool;
+ ctx.w.root = resource;
+
+ ctx.doc = doc;
+ ctx.r = r;
+ ctx.bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ apr_pool_create(&ctx.scratchpool, r->pool);
+
+ /* ### should open read-only */
+ if ((err = dav_open_lockdb(r, 0, &ctx.w.lockdb)) != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ "The lock database could not be opened, "
+ "preventing access to the various lock "
+ "properties for the PROPFIND.",
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+ if (ctx.w.lockdb != NULL) {
+ /* if we have a lock database, then we can walk locknull resources */
+ ctx.w.walk_type |= DAV_WALKTYPE_LOCKNULL;
+ }
+
+ /* send <multistatus> tag, with all doc->namespaces attached. */
+
+ /* NOTE: we *cannot* leave out the doc's namespaces from the
+ initial <multistatus> tag. if a 404 was generated for an HREF,
+ then we need to spit out the doc's namespaces for use by the
+ 404. Note that <response> elements will override these ns0,
+ ns1, etc, but NOT within the <response> scope for the
+ badprops. */
+ dav_begin_multistatus(ctx.bb, r, HTTP_MULTI_STATUS,
+ doc ? doc->namespaces : NULL);
+
+ /* Have the provider walk the resource. */
+ err = (*resource->hooks->walk)(&ctx.w, depth, &multi_status);
+
+ if (ctx.w.lockdb != NULL) {
+ (*ctx.w.lockdb->hooks->close_lockdb)(ctx.w.lockdb);
+ }
+
+ if (err != NULL) {
+ /* If an error occurred during the resource walk, there's
+ basically nothing we can do but abort the connection and
+ log an error. This is one of the limitations of HTTP; it
+ needs to "know" the entire status of the response before
+ generating it, which is just impossible in these streamy
+ response situations. */
+ err = dav_push_error(r->pool, err->status, 0,
+ "Provider encountered an error while streaming"
+ " a multistatus PROPFIND response.", err);
+ dav_log_err(r, err, APLOG_ERR);
+ r->connection->aborted = 1;
+ return DONE;
+ }
+
+ dav_finish_multistatus(r, ctx.bb);
+ ap_filter_flush(ctx.bb, r->output_filters);
+
+ /* the response has been sent. */
+ return DONE;
+}
+
+static apr_text * dav_failed_proppatch(apr_pool_t *p,
+ apr_array_header_t *prop_ctx)
+{
+ apr_text_header hdr = { 0 };
+ int i = prop_ctx->nelts;
+ dav_prop_ctx *ctx = (dav_prop_ctx *)prop_ctx->elts;
+ dav_error *err424_set = NULL;
+ dav_error *err424_delete = NULL;
+ const char *s;
+
+ /* ### might be nice to sort by status code and description */
+
+ for ( ; i-- > 0; ++ctx ) {
+ apr_text_append(p, &hdr,
+ "<D:propstat>" DEBUG_CR
+ "<D:prop>");
+ apr_text_append(p, &hdr, apr_xml_empty_elem(p, ctx->prop));
+ apr_text_append(p, &hdr, "</D:prop>" DEBUG_CR);
+
+ if (ctx->err == NULL) {
+ /* nothing was assigned here yet, so make it a 424 */
+
+ if (ctx->operation == DAV_PROP_OP_SET) {
+ if (err424_set == NULL)
+ err424_set = dav_new_error(p, HTTP_FAILED_DEPENDENCY, 0,
+ "Attempted DAV:set operation "
+ "could not be completed due "
+ "to other errors.");
+ ctx->err = err424_set;
+ }
+ else if (ctx->operation == DAV_PROP_OP_DELETE) {
+ if (err424_delete == NULL)
+ err424_delete = dav_new_error(p, HTTP_FAILED_DEPENDENCY, 0,
+ "Attempted DAV:remove "
+ "operation could not be "
+ "completed due to other "
+ "errors.");
+ ctx->err = err424_delete;
+ }
+ }
+
+ s = apr_psprintf(p,
+ "<D:status>"
+ "HTTP/1.1 %d (status)"
+ "</D:status>" DEBUG_CR,
+ ctx->err->status);
+ apr_text_append(p, &hdr, s);
+
+ /* ### we should use compute_desc if necessary... */
+ if (ctx->err->desc != NULL) {
+ apr_text_append(p, &hdr, "<D:responsedescription>" DEBUG_CR);
+ apr_text_append(p, &hdr, ctx->err->desc);
+ apr_text_append(p, &hdr, "</D:responsedescription>" DEBUG_CR);
+ }
+
+ apr_text_append(p, &hdr, "</D:propstat>" DEBUG_CR);
+ }
+
+ return hdr.first;
+}
+
+static apr_text * dav_success_proppatch(apr_pool_t *p, apr_array_header_t *prop_ctx)
+{
+ apr_text_header hdr = { 0 };
+ int i = prop_ctx->nelts;
+ dav_prop_ctx *ctx = (dav_prop_ctx *)prop_ctx->elts;
+
+ /*
+ * ### we probably need to revise the way we assemble the response...
+ * ### this code assumes everything will return status==200.
+ */
+
+ apr_text_append(p, &hdr,
+ "<D:propstat>" DEBUG_CR
+ "<D:prop>" DEBUG_CR);
+
+ for ( ; i-- > 0; ++ctx ) {
+ apr_text_append(p, &hdr, apr_xml_empty_elem(p, ctx->prop));
+ }
+
+ apr_text_append(p, &hdr,
+ "</D:prop>" DEBUG_CR
+ "<D:status>HTTP/1.1 200 OK</D:status>" DEBUG_CR
+ "</D:propstat>" DEBUG_CR);
+
+ return hdr.first;
+}
+
+static void dav_prop_log_errors(dav_prop_ctx *ctx)
+{
+ dav_log_err(ctx->r, ctx->err, APLOG_ERR);
+}
+
+/*
+ * Call <func> for each context. This can stop when an error occurs, or
+ * simply iterate through the whole list.
+ *
+ * Returns 1 if an error occurs (and the iteration is aborted). Returns 0
+ * if all elements are processed.
+ *
+ * If <reverse> is true (non-zero), then the list is traversed in
+ * reverse order.
+ */
+static int dav_process_ctx_list(void (*func)(dav_prop_ctx *ctx),
+ apr_array_header_t *ctx_list, int stop_on_error,
+ int reverse)
+{
+ int i = ctx_list->nelts;
+ dav_prop_ctx *ctx = (dav_prop_ctx *)ctx_list->elts;
+
+ if (reverse)
+ ctx += i;
+
+ while (i--) {
+ if (reverse)
+ --ctx;
+
+ (*func)(ctx);
+ if (stop_on_error && DAV_PROP_CTX_HAS_ERR(*ctx)) {
+ return 1;
+ }
+
+ if (!reverse)
+ ++ctx;
+ }
+
+ return 0;
+}
+
+/* handle the PROPPATCH method */
+static int dav_method_proppatch(request_rec *r)
+{
+ dav_error *err;
+ dav_resource *resource;
+ int result;
+ apr_xml_doc *doc;
+ apr_xml_elem *child;
+ dav_propdb *propdb;
+ int failure = 0;
+ dav_response resp = { 0 };
+ apr_text *propstat_text;
+ apr_array_header_t *ctx_list;
+ dav_prop_ctx *ctx;
+ dav_auto_version_info av_info;
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ if ((result = ap_xml_parse_input(r, &doc)) != OK) {
+ return result;
+ }
+ /* note: doc == NULL if no request body */
+
+ if (doc == NULL || !dav_validate_root(doc, "propertyupdate")) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The request body does not contain "
+ "a \"propertyupdate\" element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* Check If-Headers and existing locks */
+ /* Note: depth == 0. Implies no need for a multistatus response. */
+ if ((err = dav_validate_request(r, resource, 0, NULL, NULL,
+ DAV_VALIDATE_RESOURCE, NULL)) != NULL) {
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* make sure the resource can be modified (if versioning repository) */
+ if ((err = dav_auto_checkout(r, resource,
+ 0 /* not parent_only */,
+ &av_info)) != NULL) {
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, NULL);
+ }
+
+ if ((err = dav_open_propdb(r, NULL, resource, 0, doc->namespaces,
+ &propdb)) != NULL) {
+ /* undo any auto-checkout */
+ dav_auto_checkin(r, resource, 1 /*undo*/, 0 /*unlock*/, &av_info);
+
+ err = dav_push_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ apr_psprintf(r->pool,
+ "Could not open the property "
+ "database for %s.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+ /* ### what to do about closing the propdb on server failure? */
+
+ /* ### validate "live" properties */
+
+ /* set up an array to hold property operation contexts */
+ ctx_list = apr_array_make(r->pool, 10, sizeof(dav_prop_ctx));
+
+ /* do a first pass to ensure that all "remove" properties exist */
+ for (child = doc->root->first_child; child; child = child->next) {
+ int is_remove;
+ apr_xml_elem *prop_group;
+ apr_xml_elem *one_prop;
+
+ /* Ignore children that are not set/remove */
+ if (child->ns != APR_XML_NS_DAV_ID
+ || (!(is_remove = strcmp(child->name, "remove") == 0)
+ && strcmp(child->name, "set") != 0)) {
+ continue;
+ }
+
+ /* make sure that a "prop" child exists for set/remove */
+ if ((prop_group = dav_find_child(child, "prop")) == NULL) {
+ dav_close_propdb(propdb);
+
+ /* undo any auto-checkout */
+ dav_auto_checkin(r, resource, 1 /*undo*/, 0 /*unlock*/, &av_info);
+
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "A \"prop\" element is missing inside "
+ "the propertyupdate command.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ for (one_prop = prop_group->first_child; one_prop;
+ one_prop = one_prop->next) {
+
+ ctx = (dav_prop_ctx *)apr_array_push(ctx_list);
+ ctx->propdb = propdb;
+ ctx->operation = is_remove ? DAV_PROP_OP_DELETE : DAV_PROP_OP_SET;
+ ctx->prop = one_prop;
+
+ ctx->r = r; /* for later use by dav_prop_log_errors() */
+
+ dav_prop_validate(ctx);
+
+ if ( DAV_PROP_CTX_HAS_ERR(*ctx) ) {
+ failure = 1;
+ }
+ }
+ }
+
+ /* ### should test that we found at least one set/remove */
+
+ /* execute all of the operations */
+ if (!failure && dav_process_ctx_list(dav_prop_exec, ctx_list, 1, 0)) {
+ failure = 1;
+ }
+
+ /* generate a failure/success response */
+ if (failure) {
+ (void)dav_process_ctx_list(dav_prop_rollback, ctx_list, 0, 1);
+ propstat_text = dav_failed_proppatch(r->pool, ctx_list);
+ }
+ else {
+ (void)dav_process_ctx_list(dav_prop_commit, ctx_list, 0, 0);
+ propstat_text = dav_success_proppatch(r->pool, ctx_list);
+ }
+
+ /* make sure this gets closed! */
+ dav_close_propdb(propdb);
+
+ /* complete any auto-versioning */
+ dav_auto_checkin(r, resource, failure, 0 /*unlock*/, &av_info);
+
+ /* log any errors that occurred */
+ (void)dav_process_ctx_list(dav_prop_log_errors, ctx_list, 0, 0);
+
+ resp.href = resource->uri;
+
+ /* ### should probably use something new to pass along this text... */
+ resp.propresult.propstats = propstat_text;
+
+ dav_send_multistatus(r, HTTP_MULTI_STATUS, &resp, doc->namespaces);
+
+ /* the response has been sent. */
+ return DONE;
+}
+
+static int process_mkcol_body(request_rec *r)
+{
+ /* This is snarfed from ap_setup_client_block(). We could get pretty
+ * close to this behavior by passing REQUEST_NO_BODY, but we need to
+ * return HTTP_UNSUPPORTED_MEDIA_TYPE (while ap_setup_client_block
+ * returns HTTP_REQUEST_ENTITY_TOO_LARGE). */
+
+ const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
+ const char *lenp = apr_table_get(r->headers_in, "Content-Length");
+
+ /* make sure to set the Apache request fields properly. */
+ r->read_body = REQUEST_NO_BODY;
+ r->read_chunked = 0;
+ r->remaining = 0;
+
+ if (tenc) {
+ if (strcasecmp(tenc, "chunked")) {
+ /* Use this instead of Apache's default error string */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Unknown Transfer-Encoding %s", tenc);
+ return HTTP_NOT_IMPLEMENTED;
+ }
+
+ r->read_chunked = 1;
+ }
+ else if (lenp) {
+ const char *pos = lenp;
+
+ while (apr_isdigit(*pos) || apr_isspace(*pos)) {
+ ++pos;
+ }
+
+ if (*pos != '\0') {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid Content-Length %s", lenp);
+ return HTTP_BAD_REQUEST;
+ }
+
+ r->remaining = apr_atoi64(lenp);
+ }
+
+ if (r->read_chunked || r->remaining > 0) {
+ /* ### log something? */
+
+ /* Apache will supply a default error for this. */
+ return HTTP_UNSUPPORTED_MEDIA_TYPE;
+ }
+
+ /*
+ * Get rid of the body. this will call ap_setup_client_block(), but
+ * our copy above has already verified its work.
+ */
+ return ap_discard_request_body(r);
+}
+
+/* handle the MKCOL method */
+static int dav_method_mkcol(request_rec *r)
+{
+ dav_resource *resource;
+ int resource_state;
+ dav_auto_version_info av_info;
+ const dav_hooks_locks *locks_hooks = DAV_GET_HOOKS_LOCKS(r);
+ dav_error *err;
+ dav_error *err2;
+ int result;
+ dav_dir_conf *conf;
+ dav_response *multi_status;
+
+ /* handle the request body */
+ /* ### this may move lower once we start processing bodies */
+ if ((result = process_mkcol_body(r)) != OK) {
+ return result;
+ }
+
+ conf = (dav_dir_conf *)ap_get_module_config(r->per_dir_config,
+ &dav_module);
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ if (resource->exists) {
+ /* oops. something was already there! */
+
+ /* Apache will supply a default error for this. */
+ /* ### we should provide a specific error message! */
+ return HTTP_METHOD_NOT_ALLOWED;
+ }
+
+ resource_state = dav_get_resource_state(r, resource);
+
+ /*
+ * Check If-Headers and existing locks.
+ *
+ * Note: depth == 0 normally requires no multistatus response. However,
+ * if we pass DAV_VALIDATE_PARENT, then we could get an error on a URI
+ * other than the Request-URI, thereby requiring a multistatus.
+ *
+ * If the resource does not exist (DAV_RESOURCE_NULL), then we must
+ * check the resource *and* its parent. If the resource exists or is
+ * a locknull resource, then we check only the resource.
+ */
+ if ((err = dav_validate_request(r, resource, 0, NULL, &multi_status,
+ resource_state == DAV_RESOURCE_NULL ?
+ DAV_VALIDATE_PARENT :
+ DAV_VALIDATE_RESOURCE, NULL)) != NULL) {
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, multi_status);
+ }
+
+ /* if versioned resource, make sure parent is checked out */
+ if ((err = dav_auto_checkout(r, resource, 1 /* parent_only */,
+ &av_info)) != NULL) {
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* try to create the collection */
+ resource->collection = 1;
+ err = (*resource->hooks->create_collection)(resource);
+
+ /* restore modifiability of parent back to what it was */
+ err2 = dav_auto_checkin(r, NULL, err != NULL /* undo if error */,
+ 0 /*unlock*/, &av_info);
+
+ /* check for errors now */
+ if (err != NULL) {
+ return dav_handle_err(r, err, NULL);
+ }
+ if (err2 != NULL) {
+ /* just log a warning */
+ err = dav_push_error(r->pool, err2->status, 0,
+ "The MKCOL was successful, but there "
+ "was a problem automatically checking in "
+ "the parent collection.",
+ err2);
+ dav_log_err(r, err, APLOG_WARNING);
+ }
+
+ if (locks_hooks != NULL) {
+ dav_lockdb *lockdb;
+
+ if ((err = (*locks_hooks->open_lockdb)(r, 0, 0, &lockdb)) != NULL) {
+ /* The directory creation was successful, but the locking failed. */
+ err = dav_push_error(r->pool, err->status, 0,
+ "The MKCOL was successful, but there "
+ "was a problem opening the lock database "
+ "which prevents inheriting locks from the "
+ "parent resources.",
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* notify lock system that we have created/replaced a resource */
+ err = dav_notify_created(r, lockdb, resource, resource_state, 0);
+
+ (*locks_hooks->close_lockdb)(lockdb);
+
+ if (err != NULL) {
+ /* The dir creation was successful, but the locking failed. */
+ err = dav_push_error(r->pool, err->status, 0,
+ "The MKCOL was successful, but there "
+ "was a problem updating its lock "
+ "information.",
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+ }
+
+ /* return an appropriate response (HTTP_CREATED) */
+ return dav_created(r, NULL, "Collection", 0);
+}
+
+/* handle the COPY and MOVE methods */
+static int dav_method_copymove(request_rec *r, int is_move)
+{
+ dav_resource *resource;
+ dav_resource *resnew;
+ dav_auto_version_info src_av_info = { 0 };
+ dav_auto_version_info dst_av_info = { 0 };
+ const char *body;
+ const char *dest;
+ dav_error *err;
+ dav_error *err2;
+ dav_error *err3;
+ dav_response *multi_response;
+ dav_lookup_result lookup;
+ int is_dir;
+ int overwrite;
+ int depth;
+ int result;
+ dav_lockdb *lockdb;
+ int replace_dest;
+ int resnew_state;
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, !is_move /* label_allowed */,
+ 0 /* use_checked_in */, &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ /* If not a file or collection resource, COPY/MOVE not allowed */
+ /* ### allow COPY/MOVE of DeltaV resource types */
+ if (resource->type != DAV_RESOURCE_TYPE_REGULAR) {
+ body = apr_psprintf(r->pool,
+ "Cannot COPY/MOVE resource %s.",
+ ap_escape_html(r->pool, r->uri));
+ return dav_error_response(r, HTTP_METHOD_NOT_ALLOWED, body);
+ }
+
+ /* get the destination URI */
+ dest = apr_table_get(r->headers_in, "Destination");
+ if (dest == NULL) {
+ /* Look in headers provided by Netscape's Roaming Profiles */
+ const char *nscp_host = apr_table_get(r->headers_in, "Host");
+ const char *nscp_path = apr_table_get(r->headers_in, "New-uri");
+
+ if (nscp_host != NULL && nscp_path != NULL)
+ dest = apr_psprintf(r->pool, "http://%s%s", nscp_host, nscp_path);
+ }
+ if (dest == NULL) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The request is missing a Destination header.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ lookup = dav_lookup_uri(dest, r, 1 /* must_be_absolute */);
+ if (lookup.rnew == NULL) {
+ if (lookup.err.status == HTTP_BAD_REQUEST) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "%s", lookup.err.desc);
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* ### this assumes that dav_lookup_uri() only generates a status
+ * ### that Apache can provide a status line for!! */
+
+ return dav_error_response(r, lookup.err.status, lookup.err.desc);
+ }
+ if (lookup.rnew->status != HTTP_OK) {
+ const char *auth = apr_table_get(lookup.rnew->err_headers_out,
+ "WWW-Authenticate");
+ if (lookup.rnew->status == HTTP_UNAUTHORIZED && auth != NULL) {
+ /* propagate the WWW-Authorization header up from the
+ * subreq so the client sees it. */
+ apr_table_set(r->err_headers_out, "WWW-Authenticate",
+ apr_pstrdup(r->pool, auth));
+ }
+
+ /* ### how best to report this... */
+ return dav_error_response(r, lookup.rnew->status,
+ "Destination URI had an error.");
+ }
+
+ /* Resolve destination resource */
+ err = dav_get_resource(lookup.rnew, 0 /* label_allowed */,
+ 0 /* use_checked_in */, &resnew);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ /* are the two resources handled by the same repository? */
+ if (resource->hooks != resnew->hooks) {
+ /* ### this message exposes some backend config, but screw it... */
+ return dav_error_response(r, HTTP_BAD_GATEWAY,
+ "Destination URI is handled by a "
+ "different repository than the source URI. "
+ "MOVE or COPY between repositories is "
+ "not possible.");
+ }
+
+ /* get and parse the overwrite header value */
+ if ((overwrite = dav_get_overwrite(r)) < 0) {
+ /* dav_get_overwrite() supplies additional information for the
+ * default message. */
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* quick failure test: if dest exists and overwrite is false. */
+ if (resnew->exists && !overwrite) {
+ /* Supply some text for the error response body. */
+ return dav_error_response(r, HTTP_PRECONDITION_FAILED,
+ "Destination is not empty and "
+ "Overwrite is not \"T\"");
+ }
+
+ /* are the source and destination the same? */
+ if ((*resource->hooks->is_same_resource)(resource, resnew)) {
+ /* Supply some text for the error response body. */
+ return dav_error_response(r, HTTP_FORBIDDEN,
+ "Source and Destination URIs are the same.");
+
+ }
+
+ is_dir = resource->collection;
+
+ /* get and parse the Depth header value. "0" and "infinity" are legal. */
+ if ((depth = dav_get_depth(r, DAV_INFINITY)) < 0) {
+ /* dav_get_depth() supplies additional information for the
+ * default message. */
+ return HTTP_BAD_REQUEST;
+ }
+ if (depth == 1) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Depth must be \"0\" or \"infinity\" for COPY or MOVE.");
+ return HTTP_BAD_REQUEST;
+ }
+ if (is_move && is_dir && depth != DAV_INFINITY) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Depth must be \"infinity\" when moving a collection.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /*
+ * Check If-Headers and existing locks for each resource in the source
+ * if we are performing a MOVE. We will return a 424 response with a
+ * DAV:multistatus body. The multistatus responses will contain the
+ * information about any resource that fails the validation.
+ *
+ * We check the parent resource, too, since this is a MOVE. Moving the
+ * resource effectively removes it from the parent collection, so we
+ * must ensure that we have met the appropriate conditions.
+ *
+ * If a problem occurs with the Request-URI itself, then a plain error
+ * (rather than a multistatus) will be returned.
+ */
+ if (is_move
+ && (err = dav_validate_request(r, resource, depth, NULL,
+ &multi_response,
+ DAV_VALIDATE_PARENT
+ | DAV_VALIDATE_USE_424,
+ NULL)) != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "Could not MOVE %s due to a failed "
+ "precondition on the source "
+ "(e.g. locks).",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, multi_response);
+ }
+
+ /*
+ * Check If-Headers and existing locks for destination. Note that we
+ * use depth==infinity since the target (hierarchy) will be deleted
+ * before the move/copy is completed.
+ *
+ * Note that we are overwriting the target, which implies a DELETE, so
+ * we are subject to the error/response rules as a DELETE. Namely, we
+ * will return a 424 error if any of the validations fail.
+ * (see dav_method_delete() for more information)
+ */
+ if ((err = dav_validate_request(lookup.rnew, resnew, DAV_INFINITY, NULL,
+ &multi_response,
+ DAV_VALIDATE_PARENT
+ | DAV_VALIDATE_USE_424, NULL)) != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "Could not MOVE/COPY %s due to a "
+ "failed precondition on the "
+ "destination (e.g. locks).",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, multi_response);
+ }
+
+ if (is_dir
+ && depth == DAV_INFINITY
+ && (*resource->hooks->is_parent_resource)(resource, resnew)) {
+ /* Supply some text for the error response body. */
+ return dav_error_response(r, HTTP_FORBIDDEN,
+ "Source collection contains the "
+ "Destination.");
+
+ }
+ if (is_dir
+ && (*resnew->hooks->is_parent_resource)(resnew, resource)) {
+ /* The destination must exist (since it contains the source), and
+ * a condition above implies Overwrite==T. Obviously, we cannot
+ * delete the Destination before the MOVE/COPY, as that would
+ * delete the Source.
+ */
+
+ /* Supply some text for the error response body. */
+ return dav_error_response(r, HTTP_FORBIDDEN,
+ "Destination collection contains the Source "
+ "and Overwrite has been specified.");
+ }
+
+ /* ### for now, we don't need anything in the body */
+ if ((result = ap_discard_request_body(r)) != OK) {
+ return result;
+ }
+
+ if ((err = dav_open_lockdb(r, 0, &lockdb)) != NULL) {
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* remove any locks from the old resources */
+ /*
+ * ### this is Yet Another Traversal. if we do a rename(), then we
+ * ### really don't have to do this in some cases since the inode
+ * ### values will remain constant across the move. but we can't
+ * ### know that fact from outside the provider :-(
+ *
+ * ### note that we now have a problem atomicity in the move/copy
+ * ### since a failure after this would have removed locks (technically,
+ * ### this is okay to do, but really...)
+ */
+ if (is_move && lockdb != NULL) {
+ /* ### this is wrong! it blasts direct locks on parent resources */
+ /* ### pass lockdb! */
+ (void)dav_unlock(r, resource, NULL);
+ }
+
+ /* if this is a move, then the source parent collection will be modified */
+ if (is_move) {
+ if ((err = dav_auto_checkout(r, resource, 1 /* parent_only */,
+ &src_av_info)) != NULL) {
+ if (lockdb != NULL)
+ (*lockdb->hooks->close_lockdb)(lockdb);
+
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, NULL);
+ }
+ }
+
+ /*
+ * Remember the initial state of the destination, so the lock system
+ * can be notified as to how it changed.
+ */
+ resnew_state = dav_get_resource_state(lookup.rnew, resnew);
+
+ /* In a MOVE operation, the destination is replaced by the source.
+ * In a COPY operation, if the destination exists, is under version
+ * control, and is the same resource type as the source,
+ * then it should not be replaced, but modified to be a copy of
+ * the source.
+ */
+ if (!resnew->exists)
+ replace_dest = 0;
+ else if (is_move || !resource->versioned)
+ replace_dest = 1;
+ else if (resource->type != resnew->type)
+ replace_dest = 1;
+ else if ((resource->collection == 0) != (resnew->collection == 0))
+ replace_dest = 1;
+ else
+ replace_dest = 0;
+
+ /* If the destination must be created or replaced,
+ * make sure the parent collection is writable
+ */
+ if (!resnew->exists || replace_dest) {
+ if ((err = dav_auto_checkout(r, resnew, 1 /*parent_only*/,
+ &dst_av_info)) != NULL) {
+ /* could not make destination writable:
+ * if move, restore state of source parent
+ */
+ if (is_move) {
+ (void)dav_auto_checkin(r, NULL, 1 /* undo */,
+ 0 /*unlock*/, &src_av_info);
+ }
+
+ if (lockdb != NULL)
+ (*lockdb->hooks->close_lockdb)(lockdb);
+
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, NULL);
+ }
+ }
+
+ /* If source and destination parents are the same, then
+ * use the same resource object, so status updates to one are reflected
+ * in the other, when doing auto-versioning. Otherwise,
+ * we may try to checkin the parent twice.
+ */
+ if (src_av_info.parent_resource != NULL
+ && dst_av_info.parent_resource != NULL
+ && (*src_av_info.parent_resource->hooks->is_same_resource)
+ (src_av_info.parent_resource, dst_av_info.parent_resource)) {
+
+ dst_av_info.parent_resource = src_av_info.parent_resource;
+ }
+
+ /* If destination is being replaced, remove it first
+ * (we know Ovewrite must be TRUE). Then try to copy/move the resource.
+ */
+ if (replace_dest)
+ err = (*resnew->hooks->remove_resource)(resnew, &multi_response);
+
+ if (err == NULL) {
+ if (is_move)
+ err = (*resource->hooks->move_resource)(resource, resnew,
+ &multi_response);
+ else
+ err = (*resource->hooks->copy_resource)(resource, resnew, depth,
+ &multi_response);
+ }
+
+ /* perform any auto-versioning cleanup */
+ err2 = dav_auto_checkin(r, NULL, err != NULL /* undo if error */,
+ 0 /*unlock*/, &dst_av_info);
+
+ if (is_move) {
+ err3 = dav_auto_checkin(r, NULL, err != NULL /* undo if error */,
+ 0 /*unlock*/, &src_av_info);
+ }
+ else
+ err3 = NULL;
+
+ /* check for error from remove/copy/move operations */
+ if (err != NULL) {
+ if (lockdb != NULL)
+ (*lockdb->hooks->close_lockdb)(lockdb);
+
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "Could not MOVE/COPY %s.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, multi_response);
+ }
+
+ /* check for errors from auto-versioning */
+ if (err2 != NULL) {
+ /* just log a warning */
+ err = dav_push_error(r->pool, err2->status, 0,
+ "The MOVE/COPY was successful, but there was a "
+ "problem automatically checking in the "
+ "source parent collection.",
+ err2);
+ dav_log_err(r, err, APLOG_WARNING);
+ }
+ if (err3 != NULL) {
+ /* just log a warning */
+ err = dav_push_error(r->pool, err3->status, 0,
+ "The MOVE/COPY was successful, but there was a "
+ "problem automatically checking in the "
+ "destination or its parent collection.",
+ err3);
+ dav_log_err(r, err, APLOG_WARNING);
+ }
+
+ /* propagate any indirect locks at the target */
+ if (lockdb != NULL) {
+
+ /* notify lock system that we have created/replaced a resource */
+ err = dav_notify_created(r, lockdb, resnew, resnew_state, depth);
+
+ (*lockdb->hooks->close_lockdb)(lockdb);
+
+ if (err != NULL) {
+ /* The move/copy was successful, but the locking failed. */
+ err = dav_push_error(r->pool, err->status, 0,
+ "The MOVE/COPY was successful, but there "
+ "was a problem updating the lock "
+ "information.",
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+ }
+
+ /* return an appropriate response (HTTP_CREATED or HTTP_NO_CONTENT) */
+ return dav_created(r, lookup.rnew->uri, "Destination",
+ resnew_state == DAV_RESOURCE_EXISTS);
+}
+
+/* dav_method_lock: Handler to implement the DAV LOCK method
+ * Returns appropriate HTTP_* response.
+ */
+static int dav_method_lock(request_rec *r)
+{
+ dav_error *err;
+ dav_resource *resource;
+ const dav_hooks_locks *locks_hooks;
+ int result;
+ int depth;
+ int new_lock_request = 0;
+ apr_xml_doc *doc;
+ dav_lock *lock;
+ dav_response *multi_response = NULL;
+ dav_lockdb *lockdb;
+ int resource_state;
+
+ /* If no locks provider, decline the request */
+ locks_hooks = DAV_GET_HOOKS_LOCKS(r);
+ if (locks_hooks == NULL)
+ return DECLINED;
+
+ if ((result = ap_xml_parse_input(r, &doc)) != OK)
+ return result;
+
+ depth = dav_get_depth(r, DAV_INFINITY);
+ if (depth != 0 && depth != DAV_INFINITY) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Depth must be 0 or \"infinity\" for LOCK.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ /*
+ * Open writable. Unless an error occurs, we'll be
+ * writing into the database.
+ */
+ if ((err = (*locks_hooks->open_lockdb)(r, 0, 0, &lockdb)) != NULL) {
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, NULL);
+ }
+
+ if (doc != NULL) {
+ if ((err = dav_lock_parse_lockinfo(r, resource, lockdb, doc,
+ &lock)) != NULL) {
+ /* ### add a higher-level description to err? */
+ goto error;
+ }
+ new_lock_request = 1;
+
+ lock->auth_user = apr_pstrdup(r->pool, r->user);
+ }
+
+ resource_state = dav_get_resource_state(r, resource);
+
+ /*
+ * Check If-Headers and existing locks.
+ *
+ * If this will create a locknull resource, then the LOCK will affect
+ * the parent collection (much like a PUT/MKCOL). For that case, we must
+ * validate the parent resource's conditions.
+ */
+ if ((err = dav_validate_request(r, resource, depth, NULL, &multi_response,
+ (resource_state == DAV_RESOURCE_NULL
+ ? DAV_VALIDATE_PARENT
+ : DAV_VALIDATE_RESOURCE)
+ | (new_lock_request ? lock->scope : 0)
+ | DAV_VALIDATE_ADD_LD,
+ lockdb)) != OK) {
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "Could not LOCK %s due to a failed "
+ "precondition (e.g. other locks).",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ goto error;
+ }
+
+ if (new_lock_request == 0) {
+ dav_locktoken_list *ltl;
+
+ /*
+ * Refresh request
+ * ### Assumption: We can renew multiple locks on the same resource
+ * ### at once. First harvest all the positive lock-tokens given in
+ * ### the If header. Then modify the lock entries for this resource
+ * ### with the new Timeout val.
+ */
+
+ if ((err = dav_get_locktoken_list(r, &ltl)) != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "The lock refresh for %s failed "
+ "because no lock tokens were "
+ "specified in an \"If:\" "
+ "header.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ goto error;
+ }
+
+ if ((err = (*locks_hooks->refresh_locks)(lockdb, resource, ltl,
+ dav_get_timeout(r),
+ &lock)) != NULL) {
+ /* ### add a higher-level description to err? */
+ goto error;
+ }
+ } else {
+ /* New lock request */
+ char *locktoken_txt;
+ dav_dir_conf *conf;
+
+ conf = (dav_dir_conf *)ap_get_module_config(r->per_dir_config,
+ &dav_module);
+
+ /* apply lower bound (if any) from DAVMinTimeout directive */
+ if (lock->timeout != DAV_TIMEOUT_INFINITE
+ && lock->timeout < time(NULL) + conf->locktimeout)
+ lock->timeout = time(NULL) + conf->locktimeout;
+
+ err = dav_add_lock(r, resource, lockdb, lock, &multi_response);
+ if (err != NULL) {
+ /* ### add a higher-level description to err? */
+ goto error;
+ }
+
+ locktoken_txt = apr_pstrcat(r->pool, "<",
+ (*locks_hooks->format_locktoken)(r->pool,
+ lock->locktoken),
+ ">", NULL);
+
+ apr_table_set(r->headers_out, "Lock-Token", locktoken_txt);
+ }
+
+ (*locks_hooks->close_lockdb)(lockdb);
+
+ r->status = HTTP_OK;
+ ap_set_content_type(r, DAV_XML_CONTENT_TYPE);
+
+ ap_rputs(DAV_XML_HEADER DEBUG_CR "<D:prop xmlns:D=\"DAV:\">" DEBUG_CR, r);
+ if (lock == NULL)
+ ap_rputs("<D:lockdiscovery/>" DEBUG_CR, r);
+ else {
+ ap_rprintf(r,
+ "<D:lockdiscovery>" DEBUG_CR
+ "%s" DEBUG_CR
+ "</D:lockdiscovery>" DEBUG_CR,
+ dav_lock_get_activelock(r, lock, NULL));
+ }
+ ap_rputs("</D:prop>", r);
+
+ /* the response has been sent. */
+ return DONE;
+
+ error:
+ (*locks_hooks->close_lockdb)(lockdb);
+ return dav_handle_err(r, err, multi_response);
+}
+
+/* dav_method_unlock: Handler to implement the DAV UNLOCK method
+ * Returns appropriate HTTP_* response.
+ */
+static int dav_method_unlock(request_rec *r)
+{
+ dav_error *err;
+ dav_resource *resource;
+ const dav_hooks_locks *locks_hooks;
+ int result;
+ const char *const_locktoken_txt;
+ char *locktoken_txt;
+ dav_locktoken *locktoken = NULL;
+ int resource_state;
+ dav_response *multi_response;
+
+ /* If no locks provider, decline the request */
+ locks_hooks = DAV_GET_HOOKS_LOCKS(r);
+ if (locks_hooks == NULL)
+ return DECLINED;
+
+ if ((const_locktoken_txt = apr_table_get(r->headers_in,
+ "Lock-Token")) == NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Unlock failed (%s): "
+ "No Lock-Token specified in header", r->filename);
+ return HTTP_BAD_REQUEST;
+ }
+
+ locktoken_txt = apr_pstrdup(r->pool, const_locktoken_txt);
+ if (locktoken_txt[0] != '<') {
+ /* ### should provide more specifics... */
+ return HTTP_BAD_REQUEST;
+ }
+ locktoken_txt++;
+
+ if (locktoken_txt[strlen(locktoken_txt) - 1] != '>') {
+ /* ### should provide more specifics... */
+ return HTTP_BAD_REQUEST;
+ }
+ locktoken_txt[strlen(locktoken_txt) - 1] = '\0';
+
+ if ((err = (*locks_hooks->parse_locktoken)(r->pool, locktoken_txt,
+ &locktoken)) != NULL) {
+ err = dav_push_error(r->pool, HTTP_BAD_REQUEST, 0,
+ apr_psprintf(r->pool,
+ "The UNLOCK on %s failed -- an "
+ "invalid lock token was specified "
+ "in the \"If:\" header.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ resource_state = dav_get_resource_state(r, resource);
+
+ /*
+ * Check If-Headers and existing locks.
+ *
+ * Note: depth == 0 normally requires no multistatus response. However,
+ * if we pass DAV_VALIDATE_PARENT, then we could get an error on a URI
+ * other than the Request-URI, thereby requiring a multistatus.
+ *
+ * If the resource is a locknull resource, then the UNLOCK will affect
+ * the parent collection (much like a delete). For that case, we must
+ * validate the parent resource's conditions.
+ */
+ if ((err = dav_validate_request(r, resource, 0, locktoken,
+ &multi_response,
+ resource_state == DAV_RESOURCE_LOCK_NULL
+ ? DAV_VALIDATE_PARENT
+ : DAV_VALIDATE_RESOURCE, NULL)) != NULL) {
+ /* ### add a higher-level description? */
+ return dav_handle_err(r, err, multi_response);
+ }
+
+ /* ### RFC 2518 s. 8.11: If this resource is locked by locktoken,
+ * _all_ resources locked by locktoken are released. It does not say
+ * resource has to be the root of an infinte lock. Thus, an UNLOCK
+ * on any part of an infinte lock will remove the lock on all resources.
+ *
+ * For us, if r->filename represents an indirect lock (part of an infinity lock),
+ * we must actually perform an UNLOCK on the direct lock for this resource.
+ */
+ if ((result = dav_unlock(r, resource, locktoken)) != OK) {
+ return result;
+ }
+
+ return HTTP_NO_CONTENT;
+}
+
+static int dav_method_vsn_control(request_rec *r)
+{
+ dav_resource *resource;
+ int resource_state;
+ dav_auto_version_info av_info;
+ const dav_hooks_locks *locks_hooks = DAV_GET_HOOKS_LOCKS(r);
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ dav_error *err;
+ apr_xml_doc *doc;
+ const char *target = NULL;
+ int result;
+
+ /* if no versioning provider, decline the request */
+ if (vsn_hooks == NULL)
+ return DECLINED;
+
+ /* ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ /* remember the pre-creation resource state */
+ resource_state = dav_get_resource_state(r, resource);
+
+ /* parse the request body (may be a version-control element) */
+ if ((result = ap_xml_parse_input(r, &doc)) != OK) {
+ return result;
+ }
+ /* note: doc == NULL if no request body */
+
+ if (doc != NULL) {
+ const apr_xml_elem *child;
+ apr_size_t tsize;
+
+ if (!dav_validate_root(doc, "version-control")) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The request body does not contain "
+ "a \"version-control\" element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* get the version URI */
+ if ((child = dav_find_child(doc->root, "version")) == NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The \"version-control\" element does not contain "
+ "a \"version\" element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ if ((child = dav_find_child(child, "href")) == NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The \"version\" element does not contain "
+ "an \"href\" element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* get version URI */
+ apr_xml_to_text(r->pool, child, APR_XML_X2T_INNER, NULL, NULL,
+ &target, &tsize);
+ if (tsize == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "An \"href\" element does not contain a URI.");
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ /* Check request preconditions */
+
+ /* ### need a general mechanism for reporting precondition violations
+ * ### (should be returning XML document for 403/409 responses)
+ */
+
+ /* if not versioning existing resource, must specify version to select */
+ if (!resource->exists && target == NULL) {
+ err = dav_new_error(r->pool, HTTP_CONFLICT, 0,
+ "<DAV:initial-version-required/>");
+ return dav_handle_err(r, err, NULL);
+ }
+ else if (resource->exists) {
+ /* cannot add resource to existing version history */
+ if (target != NULL) {
+ err = dav_new_error(r->pool, HTTP_CONFLICT, 0,
+ "<DAV:cannot-add-to-existing-history/>");
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* resource must be unversioned and versionable, or version selector */
+ if (resource->type != DAV_RESOURCE_TYPE_REGULAR
+ || (!resource->versioned && !(vsn_hooks->versionable)(resource))) {
+ err = dav_new_error(r->pool, HTTP_CONFLICT, 0,
+ "<DAV:must-be-versionable/>");
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* the DeltaV spec says if resource is a version selector,
+ * then VERSION-CONTROL is a no-op
+ */
+ if (resource->versioned) {
+ /* set the Cache-Control header, per the spec */
+ apr_table_setn(r->headers_out, "Cache-Control", "no-cache");
+
+ /* no body */
+ ap_set_content_length(r, 0);
+
+ return DONE;
+ }
+ }
+
+ /* Check If-Headers and existing locks */
+ /* Note: depth == 0. Implies no need for a multistatus response. */
+ if ((err = dav_validate_request(r, resource, 0, NULL, NULL,
+ resource_state == DAV_RESOURCE_NULL ?
+ DAV_VALIDATE_PARENT :
+ DAV_VALIDATE_RESOURCE, NULL)) != NULL) {
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* if in versioned collection, make sure parent is checked out */
+ if ((err = dav_auto_checkout(r, resource, 1 /* parent_only */,
+ &av_info)) != NULL) {
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* attempt to version-control the resource */
+ if ((err = (*vsn_hooks->vsn_control)(resource, target)) != NULL) {
+ dav_auto_checkin(r, resource, 1 /*undo*/, 0 /*unlock*/, &av_info);
+ err = dav_push_error(r->pool, HTTP_CONFLICT, 0,
+ apr_psprintf(r->pool,
+ "Could not VERSION-CONTROL resource %s.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* revert writability of parent directory */
+ err = dav_auto_checkin(r, resource, 0 /*undo*/, 0 /*unlock*/, &av_info);
+ if (err != NULL) {
+ /* just log a warning */
+ err = dav_push_error(r->pool, err->status, 0,
+ "The VERSION-CONTROL was successful, but there "
+ "was a problem automatically checking in "
+ "the parent collection.",
+ err);
+ dav_log_err(r, err, APLOG_WARNING);
+ }
+
+ /* if the resource is lockable, let lock system know of new resource */
+ if (locks_hooks != NULL
+ && (*locks_hooks->get_supportedlock)(resource) != NULL) {
+ dav_lockdb *lockdb;
+
+ if ((err = (*locks_hooks->open_lockdb)(r, 0, 0, &lockdb)) != NULL) {
+ /* The resource creation was successful, but the locking failed. */
+ err = dav_push_error(r->pool, err->status, 0,
+ "The VERSION-CONTROL was successful, but there "
+ "was a problem opening the lock database "
+ "which prevents inheriting locks from the "
+ "parent resources.",
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* notify lock system that we have created/replaced a resource */
+ err = dav_notify_created(r, lockdb, resource, resource_state, 0);
+
+ (*locks_hooks->close_lockdb)(lockdb);
+
+ if (err != NULL) {
+ /* The dir creation was successful, but the locking failed. */
+ err = dav_push_error(r->pool, err->status, 0,
+ "The VERSION-CONTROL was successful, but there "
+ "was a problem updating its lock "
+ "information.",
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+ }
+
+ /* set the Cache-Control header, per the spec */
+ apr_table_setn(r->headers_out, "Cache-Control", "no-cache");
+
+ /* return an appropriate response (HTTP_CREATED) */
+ return dav_created(r, resource->uri, "Version selector", 0 /*replaced*/);
+}
+
+/* handle the CHECKOUT method */
+static int dav_method_checkout(request_rec *r)
+{
+ dav_resource *resource;
+ dav_resource *working_resource;
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ dav_error *err;
+ int result;
+ apr_xml_doc *doc;
+ int apply_to_vsn = 0;
+ int is_unreserved = 0;
+ int is_fork_ok = 0;
+ int create_activity = 0;
+ apr_array_header_t *activities = NULL;
+
+ /* If no versioning provider, decline the request */
+ if (vsn_hooks == NULL)
+ return DECLINED;
+
+ if ((result = ap_xml_parse_input(r, &doc)) != OK)
+ return result;
+
+ if (doc != NULL) {
+ const apr_xml_elem *aset;
+
+ if (!dav_validate_root(doc, "checkout")) {
+ /* This supplies additional information for the default msg. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The request body, if present, must be a "
+ "DAV:checkout element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ if (dav_find_child(doc->root, "apply-to-version") != NULL) {
+ if (apr_table_get(r->headers_in, "label") != NULL) {
+ /* ### we want generic 403/409 XML reporting here */
+ /* ### DAV:must-not-have-label-and-apply-to-version */
+ return dav_error_response(r, HTTP_CONFLICT,
+ "DAV:apply-to-version cannot be "
+ "used in conjunction with a "
+ "Label header.");
+ }
+ apply_to_vsn = 1;
+ }
+
+ is_unreserved = dav_find_child(doc->root, "unreserved") != NULL;
+ is_fork_ok = dav_find_child(doc->root, "fork-ok") != NULL;
+
+ if ((aset = dav_find_child(doc->root, "activity-set")) != NULL) {
+ if (dav_find_child(aset, "new") != NULL) {
+ create_activity = 1;
+ }
+ else {
+ const apr_xml_elem *child = aset->first_child;
+
+ activities = apr_array_make(r->pool, 1, sizeof(const char *));
+
+ for (; child != NULL; child = child->next) {
+ if (child->ns == APR_XML_NS_DAV_ID
+ && strcmp(child->name, "href") == 0) {
+ const char *href;
+
+ href = dav_xml_get_cdata(child, r->pool,
+ 1 /* strip_white */);
+ *(const char **)apr_array_push(activities) = href;
+ }
+ }
+
+ if (activities->nelts == 0) {
+ /* no href's is a DTD violation:
+ <!ELEMENT activity-set (href+ | new)>
+ */
+
+ /* This supplies additional info for the default msg. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Within the DAV:activity-set element, the "
+ "DAV:new element must be used, or at least "
+ "one DAV:href must be specified.");
+ return HTTP_BAD_REQUEST;
+ }
+ }
+ }
+ }
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 1 /*label_allowed*/, apply_to_vsn, &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ /* Check the state of the resource: must be a file or collection,
+ * must be versioned, and must not already be checked out.
+ */
+ if (resource->type != DAV_RESOURCE_TYPE_REGULAR
+ && resource->type != DAV_RESOURCE_TYPE_VERSION) {
+ return dav_error_response(r, HTTP_CONFLICT,
+ "Cannot checkout this type of resource.");
+ }
+
+ if (!resource->versioned) {
+ return dav_error_response(r, HTTP_CONFLICT,
+ "Cannot checkout unversioned resource.");
+ }
+
+ if (resource->working) {
+ return dav_error_response(r, HTTP_CONFLICT,
+ "The resource is already checked out to the workspace.");
+ }
+
+ /* ### do lock checks, once behavior is defined */
+
+ /* Do the checkout */
+ if ((err = (*vsn_hooks->checkout)(resource, 0 /*auto_checkout*/,
+ is_unreserved, is_fork_ok,
+ create_activity, activities,
+ &working_resource)) != NULL) {
+ err = dav_push_error(r->pool, HTTP_CONFLICT, 0,
+ apr_psprintf(r->pool,
+ "Could not CHECKOUT resource %s.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* set the Cache-Control header, per the spec */
+ apr_table_setn(r->headers_out, "Cache-Control", "no-cache");
+
+ /* if no working resource created, return OK,
+ * else return CREATED with working resource URL in Location header
+ */
+ if (working_resource == NULL) {
+ /* no body */
+ ap_set_content_length(r, 0);
+ return DONE;
+ }
+
+ return dav_created(r, working_resource->uri, "Checked-out resource", 0);
+}
+
+/* handle the UNCHECKOUT method */
+static int dav_method_uncheckout(request_rec *r)
+{
+ dav_resource *resource;
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ dav_error *err;
+ int result;
+
+ /* If no versioning provider, decline the request */
+ if (vsn_hooks == NULL)
+ return DECLINED;
+
+ if ((result = ap_discard_request_body(r)) != OK) {
+ return result;
+ }
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ /* Check the state of the resource: must be a file or collection,
+ * must be versioned, and must be checked out.
+ */
+ if (resource->type != DAV_RESOURCE_TYPE_REGULAR) {
+ return dav_error_response(r, HTTP_CONFLICT,
+ "Cannot uncheckout this type of resource.");
+ }
+
+ if (!resource->versioned) {
+ return dav_error_response(r, HTTP_CONFLICT,
+ "Cannot uncheckout unversioned resource.");
+ }
+
+ if (!resource->working) {
+ return dav_error_response(r, HTTP_CONFLICT,
+ "The resource is not checked out to the workspace.");
+ }
+
+ /* ### do lock checks, once behavior is defined */
+
+ /* Do the uncheckout */
+ if ((err = (*vsn_hooks->uncheckout)(resource)) != NULL) {
+ err = dav_push_error(r->pool, HTTP_CONFLICT, 0,
+ apr_psprintf(r->pool,
+ "Could not UNCHECKOUT resource %s.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* no body */
+ ap_set_content_length(r, 0);
+
+ return DONE;
+}
+
+/* handle the CHECKIN method */
+static int dav_method_checkin(request_rec *r)
+{
+ dav_resource *resource;
+ dav_resource *new_version;
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ dav_error *err;
+ int result;
+ apr_xml_doc *doc;
+ int keep_checked_out = 0;
+
+ /* If no versioning provider, decline the request */
+ if (vsn_hooks == NULL)
+ return DECLINED;
+
+ if ((result = ap_xml_parse_input(r, &doc)) != OK)
+ return result;
+
+ if (doc != NULL) {
+ if (!dav_validate_root(doc, "checkin")) {
+ /* This supplies additional information for the default msg. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The request body, if present, must be a "
+ "DAV:checkin element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ keep_checked_out = dav_find_child(doc->root, "keep-checked-out") != NULL;
+ }
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ /* Check the state of the resource: must be a file or collection,
+ * must be versioned, and must be checked out.
+ */
+ if (resource->type != DAV_RESOURCE_TYPE_REGULAR) {
+ return dav_error_response(r, HTTP_CONFLICT,
+ "Cannot checkin this type of resource.");
+ }
+
+ if (!resource->versioned) {
+ return dav_error_response(r, HTTP_CONFLICT,
+ "Cannot checkin unversioned resource.");
+ }
+
+ if (!resource->working) {
+ return dav_error_response(r, HTTP_CONFLICT,
+ "The resource is not checked out.");
+ }
+
+ /* ### do lock checks, once behavior is defined */
+
+ /* Do the checkin */
+ if ((err = (*vsn_hooks->checkin)(resource, keep_checked_out, &new_version))
+ != NULL) {
+ err = dav_push_error(r->pool, HTTP_CONFLICT, 0,
+ apr_psprintf(r->pool,
+ "Could not CHECKIN resource %s.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ return dav_created(r, new_version->uri, "Version", 0);
+}
+
+static int dav_method_update(request_rec *r)
+{
+ dav_resource *resource;
+ dav_resource *version = NULL;
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ apr_xml_doc *doc;
+ apr_xml_elem *child;
+ int is_label = 0;
+ int depth;
+ int result;
+ apr_size_t tsize;
+ const char *target;
+ dav_response *multi_response;
+ dav_error *err;
+ dav_lookup_result lookup;
+
+ /* If no versioning provider, or UPDATE not supported,
+ * decline the request */
+ if (vsn_hooks == NULL || vsn_hooks->update == NULL)
+ return DECLINED;
+
+ if ((depth = dav_get_depth(r, 0)) < 0) {
+ /* dav_get_depth() supplies additional information for the
+ * default message. */
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* parse the request body */
+ if ((result = ap_xml_parse_input(r, &doc)) != OK) {
+ return result;
+ }
+
+ if (doc == NULL || !dav_validate_root(doc, "update")) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The request body does not contain "
+ "an \"update\" element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* check for label-name or version element, but not both */
+ if ((child = dav_find_child(doc->root, "label-name")) != NULL)
+ is_label = 1;
+ else if ((child = dav_find_child(doc->root, "version")) != NULL) {
+ /* get the href element */
+ if ((child = dav_find_child(child, "href")) == NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The version element does not contain "
+ "an \"href\" element.");
+ return HTTP_BAD_REQUEST;
+ }
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The \"update\" element does not contain "
+ "a \"label-name\" or \"version\" element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* a depth greater than zero is only allowed for a label */
+ if (!is_label && depth != 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Depth must be zero for UPDATE with a version");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* get the target value (a label or a version URI) */
+ apr_xml_to_text(r->pool, child, APR_XML_X2T_INNER, NULL, NULL,
+ &target, &tsize);
+ if (tsize == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "A \"label-name\" or \"href\" element does not contain "
+ "any content.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ /* ### need a general mechanism for reporting precondition violations
+ * ### (should be returning XML document for 403/409 responses)
+ */
+ if (resource->type != DAV_RESOURCE_TYPE_REGULAR
+ || !resource->versioned || resource->working) {
+ return dav_error_response(r, HTTP_CONFLICT,
+ "<DAV:must-be-checked-in-version-controlled-resource>");
+ }
+
+ /* if target is a version, resolve the version resource */
+ /* ### dav_lookup_uri only allows absolute URIs; is that OK? */
+ if (!is_label) {
+ lookup = dav_lookup_uri(target, r, 0 /* must_be_absolute */);
+ if (lookup.rnew == NULL) {
+ if (lookup.err.status == HTTP_BAD_REQUEST) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "%s", lookup.err.desc);
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* ### this assumes that dav_lookup_uri() only generates a status
+ * ### that Apache can provide a status line for!! */
+
+ return dav_error_response(r, lookup.err.status, lookup.err.desc);
+ }
+ if (lookup.rnew->status != HTTP_OK) {
+ /* ### how best to report this... */
+ return dav_error_response(r, lookup.rnew->status,
+ "Version URI had an error.");
+ }
+
+ /* resolve version resource */
+ err = dav_get_resource(lookup.rnew, 0 /* label_allowed */,
+ 0 /* use_checked_in */, &version);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ /* NULL out target, since we're using a version resource */
+ target = NULL;
+ }
+
+ /* do the UPDATE operation */
+ err = (*vsn_hooks->update)(resource, version, target, depth, &multi_response);
+
+ if (err != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "Could not UPDATE %s.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, multi_response);
+ }
+
+ /* set the Cache-Control header, per the spec */
+ apr_table_setn(r->headers_out, "Cache-Control", "no-cache");
+
+ /* no body */
+ ap_set_content_length(r, 0);
+
+ return DONE;
+}
+
+/* context maintained during LABEL treewalk */
+typedef struct dav_label_walker_ctx
+{
+ /* input: */
+ dav_walk_params w;
+
+ /* label being manipulated */
+ const char *label;
+
+ /* label operation */
+ int label_op;
+#define DAV_LABEL_ADD 1
+#define DAV_LABEL_SET 2
+#define DAV_LABEL_REMOVE 3
+
+ /* version provider hooks */
+ const dav_hooks_vsn *vsn_hooks;
+
+} dav_label_walker_ctx;
+
+static dav_error * dav_label_walker(dav_walk_resource *wres, int calltype)
+{
+ dav_label_walker_ctx *ctx = wres->walk_ctx;
+ dav_error *err = NULL;
+
+ /* Check the state of the resource: must be a version or
+ * non-checkedout version selector
+ */
+ /* ### need a general mechanism for reporting precondition violations
+ * ### (should be returning XML document for 403/409 responses)
+ */
+ if (wres->resource->type != DAV_RESOURCE_TYPE_VERSION &&
+ (wres->resource->type != DAV_RESOURCE_TYPE_REGULAR
+ || !wres->resource->versioned)) {
+ err = dav_new_error(ctx->w.pool, HTTP_CONFLICT, 0,
+ "<DAV:must-be-version-or-version-selector/>");
+ }
+ else if (wres->resource->working) {
+ err = dav_new_error(ctx->w.pool, HTTP_CONFLICT, 0,
+ "<DAV:must-not-be-checked-out/>");
+ }
+ else {
+ /* do the label operation */
+ if (ctx->label_op == DAV_LABEL_REMOVE)
+ err = (*ctx->vsn_hooks->remove_label)(wres->resource, ctx->label);
+ else
+ err = (*ctx->vsn_hooks->add_label)(wres->resource, ctx->label,
+ ctx->label_op == DAV_LABEL_SET);
+ }
+
+ if (err != NULL) {
+ /* ### need utility routine to add response with description? */
+ dav_add_response(wres, err->status, NULL);
+ wres->response->desc = err->desc;
+ }
+
+ return NULL;
+}
+
+static int dav_method_label(request_rec *r)
+{
+ dav_resource *resource;
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ apr_xml_doc *doc;
+ apr_xml_elem *child;
+ int depth;
+ int result;
+ apr_size_t tsize;
+ dav_error *err;
+ dav_label_walker_ctx ctx = { { 0 } };
+ dav_response *multi_status;
+
+ /* If no versioning provider, or the provider doesn't support
+ * labels, decline the request */
+ if (vsn_hooks == NULL || vsn_hooks->add_label == NULL)
+ return DECLINED;
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 1 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ if ((depth = dav_get_depth(r, 0)) < 0) {
+ /* dav_get_depth() supplies additional information for the
+ * default message. */
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* parse the request body */
+ if ((result = ap_xml_parse_input(r, &doc)) != OK) {
+ return result;
+ }
+
+ if (doc == NULL || !dav_validate_root(doc, "label")) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The request body does not contain "
+ "a \"label\" element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* check for add, set, or remove element */
+ if ((child = dav_find_child(doc->root, "add")) != NULL) {
+ ctx.label_op = DAV_LABEL_ADD;
+ }
+ else if ((child = dav_find_child(doc->root, "set")) != NULL) {
+ ctx.label_op = DAV_LABEL_SET;
+ }
+ else if ((child = dav_find_child(doc->root, "remove")) != NULL) {
+ ctx.label_op = DAV_LABEL_REMOVE;
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The \"label\" element does not contain "
+ "an \"add\", \"set\", or \"remove\" element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* get the label string */
+ if ((child = dav_find_child(child, "label-name")) == NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The label command element does not contain "
+ "a \"label-name\" element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ apr_xml_to_text(r->pool, child, APR_XML_X2T_INNER, NULL, NULL,
+ &ctx.label, &tsize);
+ if (tsize == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "A \"label-name\" element does not contain "
+ "a label name.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* do the label operation walk */
+ ctx.w.walk_type = DAV_WALKTYPE_NORMAL;
+ ctx.w.func = dav_label_walker;
+ ctx.w.walk_ctx = &ctx;
+ ctx.w.pool = r->pool;
+ ctx.w.root = resource;
+ ctx.vsn_hooks = vsn_hooks;
+
+ err = (*resource->hooks->walk)(&ctx.w, depth, &multi_status);
+
+ if (err != NULL) {
+ /* some sort of error occurred which terminated the walk */
+ err = dav_push_error(r->pool, err->status, 0,
+ "The LABEL operation was terminated prematurely.",
+ err);
+ return dav_handle_err(r, err, multi_status);
+ }
+
+ if (multi_status != NULL) {
+ /* One or more resources had errors. If depth was zero, convert
+ * response to simple error, else make sure there is an
+ * overall error to pass to dav_handle_err()
+ */
+ if (depth == 0) {
+ err = dav_new_error(r->pool, multi_status->status, 0, multi_status->desc);
+ multi_status = NULL;
+ }
+ else {
+ err = dav_new_error(r->pool, HTTP_MULTI_STATUS, 0,
+ "Errors occurred during the LABEL operation.");
+ }
+
+ return dav_handle_err(r, err, multi_status);
+ }
+
+ /* set the Cache-Control header, per the spec */
+ apr_table_setn(r->headers_out, "Cache-Control", "no-cache");
+
+ /* no body */
+ ap_set_content_length(r, 0);
+
+ return DONE;
+}
+
+static int dav_method_report(request_rec *r)
+{
+ dav_resource *resource;
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ int result;
+ int label_allowed;
+ apr_xml_doc *doc;
+ dav_error *err;
+
+ /* If no versioning provider, decline the request */
+ if (vsn_hooks == NULL)
+ return DECLINED;
+
+ if ((result = ap_xml_parse_input(r, &doc)) != OK)
+ return result;
+ if (doc == NULL) {
+ /* This supplies additional information for the default msg. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The request body must specify a report.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* Ask repository module to resolve the resource.
+ * First determine whether a Target-Selector header is allowed
+ * for this report.
+ */
+ label_allowed = (*vsn_hooks->report_label_header_allowed)(doc);
+ err = dav_get_resource(r, label_allowed, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ /* set up defaults for the report response */
+ r->status = HTTP_OK;
+ ap_set_content_type(r, DAV_XML_CONTENT_TYPE);
+
+ /* run report hook */
+ if ((err = (*vsn_hooks->deliver_report)(r, resource, doc,
+ r->output_filters)) != NULL) {
+ if (! r->sent_bodyct)
+ /* No data has been sent to client yet; throw normal error. */
+ return dav_handle_err(r, err, NULL);
+
+ /* If an error occurred during the report delivery, there's
+ basically nothing we can do but abort the connection and
+ log an error. This is one of the limitations of HTTP; it
+ needs to "know" the entire status of the response before
+ generating it, which is just impossible in these streamy
+ response situations. */
+ err = dav_push_error(r->pool, err->status, 0,
+ "Provider encountered an error while streaming"
+ " a REPORT response.", err);
+ dav_log_err(r, err, APLOG_ERR);
+ r->connection->aborted = 1;
+ return DONE;
+ }
+
+ return DONE;
+}
+
+static int dav_method_make_workspace(request_rec *r)
+{
+ dav_resource *resource;
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ dav_error *err;
+ apr_xml_doc *doc;
+ int result;
+
+ /* if no versioning provider, or the provider does not support workspaces,
+ * decline the request
+ */
+ if (vsn_hooks == NULL || vsn_hooks->make_workspace == NULL)
+ return DECLINED;
+
+ /* ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ /* parse the request body (must be a mkworkspace element) */
+ if ((result = ap_xml_parse_input(r, &doc)) != OK) {
+ return result;
+ }
+
+ if (doc == NULL
+ || !dav_validate_root(doc, "mkworkspace")) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The request body does not contain "
+ "a \"mkworkspace\" element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* Check request preconditions */
+
+ /* ### need a general mechanism for reporting precondition violations
+ * ### (should be returning XML document for 403/409 responses)
+ */
+
+ /* resource must not already exist */
+ if (resource->exists) {
+ err = dav_new_error(r->pool, HTTP_CONFLICT, 0,
+ "<DAV:resource-must-be-null/>");
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* ### what about locking? */
+
+ /* attempt to create the workspace */
+ if ((err = (*vsn_hooks->make_workspace)(resource, doc)) != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "Could not create workspace %s.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* set the Cache-Control header, per the spec */
+ apr_table_setn(r->headers_out, "Cache-Control", "no-cache");
+
+ /* return an appropriate response (HTTP_CREATED) */
+ return dav_created(r, resource->uri, "Workspace", 0 /*replaced*/);
+}
+
+static int dav_method_make_activity(request_rec *r)
+{
+ dav_resource *resource;
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ dav_error *err;
+ int result;
+
+ /* if no versioning provider, or the provider does not support activities,
+ * decline the request
+ */
+ if (vsn_hooks == NULL || vsn_hooks->make_activity == NULL)
+ return DECLINED;
+
+ /* ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ /* MKACTIVITY does not have a defined request body. */
+ if ((result = ap_discard_request_body(r)) != OK) {
+ return result;
+ }
+
+ /* Check request preconditions */
+
+ /* ### need a general mechanism for reporting precondition violations
+ * ### (should be returning XML document for 403/409 responses)
+ */
+
+ /* resource must not already exist */
+ if (resource->exists) {
+ err = dav_new_error(r->pool, HTTP_CONFLICT, 0,
+ "<DAV:resource-must-be-null/>");
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* the provider must say whether the resource can be created as
+ an activity, i.e. whether the location is ok. */
+ if (vsn_hooks->can_be_activity != NULL
+ && !(*vsn_hooks->can_be_activity)(resource)) {
+ err = dav_new_error(r->pool, HTTP_FORBIDDEN, 0,
+ "<DAV:activity-location-ok/>");
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* ### what about locking? */
+
+ /* attempt to create the activity */
+ if ((err = (*vsn_hooks->make_activity)(resource)) != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "Could not create activity %s.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* set the Cache-Control header, per the spec */
+ apr_table_setn(r->headers_out, "Cache-Control", "no-cache");
+
+ /* return an appropriate response (HTTP_CREATED) */
+ return dav_created(r, resource->uri, "Activity", 0 /*replaced*/);
+}
+
+static int dav_method_baseline_control(request_rec *r)
+{
+ /* ### */
+ return HTTP_METHOD_NOT_ALLOWED;
+}
+
+static int dav_method_merge(request_rec *r)
+{
+ dav_resource *resource;
+ dav_resource *source_resource;
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ dav_error *err;
+ int result;
+ apr_xml_doc *doc;
+ apr_xml_elem *source_elem;
+ apr_xml_elem *href_elem;
+ apr_xml_elem *prop_elem;
+ const char *source;
+ int no_auto_merge;
+ int no_checkout;
+ dav_lookup_result lookup;
+
+ /* If no versioning provider, decline the request */
+ if (vsn_hooks == NULL)
+ return DECLINED;
+
+ if ((result = ap_xml_parse_input(r, &doc)) != OK)
+ return result;
+
+ if (doc == NULL || !dav_validate_root(doc, "merge")) {
+ /* This supplies additional information for the default msg. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The request body must be present and must be a "
+ "DAV:merge element.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ if ((source_elem = dav_find_child(doc->root, "source")) == NULL) {
+ /* This supplies additional information for the default msg. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The DAV:merge element must contain a DAV:source "
+ "element.");
+ return HTTP_BAD_REQUEST;
+ }
+ if ((href_elem = dav_find_child(source_elem, "href")) == NULL) {
+ /* This supplies additional information for the default msg. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The DAV:source element must contain a DAV:href "
+ "element.");
+ return HTTP_BAD_REQUEST;
+ }
+ source = dav_xml_get_cdata(href_elem, r->pool, 1 /* strip_white */);
+
+ /* get a subrequest for the source, so that we can get a dav_resource
+ for that source. */
+ lookup = dav_lookup_uri(source, r, 0 /* must_be_absolute */);
+ if (lookup.rnew == NULL) {
+ if (lookup.err.status == HTTP_BAD_REQUEST) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "%s", lookup.err.desc);
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* ### this assumes that dav_lookup_uri() only generates a status
+ * ### that Apache can provide a status line for!! */
+
+ return dav_error_response(r, lookup.err.status, lookup.err.desc);
+ }
+ if (lookup.rnew->status != HTTP_OK) {
+ /* ### how best to report this... */
+ return dav_error_response(r, lookup.rnew->status,
+ "Merge source URI had an error.");
+ }
+ err = dav_get_resource(lookup.rnew, 0 /* label_allowed */,
+ 0 /* use_checked_in */, &source_resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ no_auto_merge = dav_find_child(doc->root, "no-auto-merge") != NULL;
+ no_checkout = dav_find_child(doc->root, "no-checkout") != NULL;
+
+ prop_elem = dav_find_child(doc->root, "prop");
+
+ /* ### check RFC. I believe the DAV:merge element may contain any
+ ### element also allowed within DAV:checkout. need to extract them
+ ### here, and pass them along.
+ ### if so, then refactor the CHECKOUT method handling so we can reuse
+ ### the code. maybe create a structure to hold CHECKOUT parameters
+ ### which can be passed to the checkout() and merge() hooks. */
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ /* ### check the source and target resources flags/types */
+
+ /* ### do lock checks, once behavior is defined */
+
+ /* set the Cache-Control header, per the spec */
+ /* ### correct? */
+ apr_table_setn(r->headers_out, "Cache-Control", "no-cache");
+
+ /* Initialize these values for a standard MERGE response. If the MERGE
+ is going to do something different (i.e. an error), then it must
+ return a dav_error, and we'll reset these values properly. */
+ r->status = HTTP_OK;
+ ap_set_content_type(r, "text/xml");
+
+ /* ### should we do any preliminary response generation? probably not,
+ ### because we may have an error, thus demanding something else in
+ ### the response body. */
+
+ /* Do the merge, including any response generation. */
+ if ((err = (*vsn_hooks->merge)(resource, source_resource,
+ no_auto_merge, no_checkout,
+ prop_elem,
+ r->output_filters)) != NULL) {
+ /* ### is err->status the right error here? */
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "Could not MERGE resource \"%s\" "
+ "into \"%s\".",
+ ap_escape_html(r->pool, source),
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* the response was fully generated by the merge() hook. */
+ /* ### urk. does this prevent logging? need to check... */
+ return DONE;
+}
+
+static int dav_method_bind(request_rec *r)
+{
+ dav_resource *resource;
+ dav_resource *binding;
+ dav_auto_version_info av_info;
+ const dav_hooks_binding *binding_hooks = DAV_GET_HOOKS_BINDING(r);
+ const char *dest;
+ dav_error *err;
+ dav_error *err2;
+ dav_response *multi_response = NULL;
+ dav_lookup_result lookup;
+ int overwrite;
+
+ /* If no bindings provider, decline the request */
+ if (binding_hooks == NULL)
+ return DECLINED;
+
+ /* Ask repository module to resolve the resource */
+ err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
+ &resource);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ if (!resource->exists) {
+ /* Apache will supply a default error for this. */
+ return HTTP_NOT_FOUND;
+ }
+
+ /* get the destination URI */
+ dest = apr_table_get(r->headers_in, "Destination");
+ if (dest == NULL) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "The request is missing a Destination header.");
+ return HTTP_BAD_REQUEST;
+ }
+
+ lookup = dav_lookup_uri(dest, r, 0 /* must_be_absolute */);
+ if (lookup.rnew == NULL) {
+ if (lookup.err.status == HTTP_BAD_REQUEST) {
+ /* This supplies additional information for the default message. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "%s", lookup.err.desc);
+ return HTTP_BAD_REQUEST;
+ }
+ else if (lookup.err.status == HTTP_BAD_GATEWAY) {
+ /* ### Bindings protocol draft 02 says to return 507
+ * ### (Cross Server Binding Forbidden); Apache already defines 507
+ * ### as HTTP_INSUFFICIENT_STORAGE. So, for now, we'll return
+ * ### HTTP_FORBIDDEN
+ */
+ return dav_error_response(r, HTTP_FORBIDDEN,
+ "Cross server bindings are not "
+ "allowed by this server.");
+ }
+
+ /* ### this assumes that dav_lookup_uri() only generates a status
+ * ### that Apache can provide a status line for!! */
+
+ return dav_error_response(r, lookup.err.status, lookup.err.desc);
+ }
+ if (lookup.rnew->status != HTTP_OK) {
+ /* ### how best to report this... */
+ return dav_error_response(r, lookup.rnew->status,
+ "Destination URI had an error.");
+ }
+
+ /* resolve binding resource */
+ err = dav_get_resource(lookup.rnew, 0 /* label_allowed */,
+ 0 /* use_checked_in */, &binding);
+ if (err != NULL)
+ return dav_handle_err(r, err, NULL);
+
+ /* are the two resources handled by the same repository? */
+ if (resource->hooks != binding->hooks) {
+ /* ### this message exposes some backend config, but screw it... */
+ return dav_error_response(r, HTTP_BAD_GATEWAY,
+ "Destination URI is handled by a "
+ "different repository than the source URI. "
+ "BIND between repositories is not possible.");
+ }
+
+ /* get and parse the overwrite header value */
+ if ((overwrite = dav_get_overwrite(r)) < 0) {
+ /* dav_get_overwrite() supplies additional information for the
+ * default message. */
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* quick failure test: if dest exists and overwrite is false. */
+ if (binding->exists && !overwrite) {
+ return dav_error_response(r, HTTP_PRECONDITION_FAILED,
+ "Destination is not empty and "
+ "Overwrite is not \"T\"");
+ }
+
+ /* are the source and destination the same? */
+ if ((*resource->hooks->is_same_resource)(resource, binding)) {
+ return dav_error_response(r, HTTP_FORBIDDEN,
+ "Source and Destination URIs are the same.");
+ }
+
+ /*
+ * Check If-Headers and existing locks for destination. Note that we
+ * use depth==infinity since the target (hierarchy) will be deleted
+ * before the move/copy is completed.
+ *
+ * Note that we are overwriting the target, which implies a DELETE, so
+ * we are subject to the error/response rules as a DELETE. Namely, we
+ * will return a 424 error if any of the validations fail.
+ * (see dav_method_delete() for more information)
+ */
+ if ((err = dav_validate_request(lookup.rnew, binding, DAV_INFINITY, NULL,
+ &multi_response,
+ DAV_VALIDATE_PARENT
+ | DAV_VALIDATE_USE_424, NULL)) != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "Could not BIND %s due to a "
+ "failed precondition on the "
+ "destination (e.g. locks).",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, multi_response);
+ }
+
+ /* guard against creating circular bindings */
+ if (resource->collection
+ && (*resource->hooks->is_parent_resource)(resource, binding)) {
+ return dav_error_response(r, HTTP_FORBIDDEN,
+ "Source collection contains the Destination.");
+ }
+ if (resource->collection
+ && (*resource->hooks->is_parent_resource)(binding, resource)) {
+ /* The destination must exist (since it contains the source), and
+ * a condition above implies Overwrite==T. Obviously, we cannot
+ * delete the Destination before the BIND, as that would
+ * delete the Source.
+ */
+
+ return dav_error_response(r, HTTP_FORBIDDEN,
+ "Destination collection contains the Source and "
+ "Overwrite has been specified.");
+ }
+
+ /* prepare the destination collection for modification */
+ if ((err = dav_auto_checkout(r, binding, 1 /* parent_only */,
+ &av_info)) != NULL) {
+ /* could not make destination writable */
+ return dav_handle_err(r, err, NULL);
+ }
+
+ /* If target exists, remove it first (we know Ovewrite must be TRUE).
+ * Then try to bind to the resource.
+ */
+ if (binding->exists)
+ err = (*resource->hooks->remove_resource)(binding, &multi_response);
+
+ if (err == NULL) {
+ err = (*binding_hooks->bind_resource)(resource, binding);
+ }
+
+ /* restore parent collection states */
+ err2 = dav_auto_checkin(r, NULL,
+ err != NULL /* undo if error */,
+ 0 /* unlock */, &av_info);
+
+ /* check for error from remove/bind operations */
+ if (err != NULL) {
+ err = dav_push_error(r->pool, err->status, 0,
+ apr_psprintf(r->pool,
+ "Could not BIND %s.",
+ ap_escape_html(r->pool, r->uri)),
+ err);
+ return dav_handle_err(r, err, multi_response);
+ }
+
+ /* check for errors from reverting writability */
+ if (err2 != NULL) {
+ /* just log a warning */
+ err = dav_push_error(r->pool, err2->status, 0,
+ "The BIND was successful, but there was a "
+ "problem automatically checking in the "
+ "source parent collection.",
+ err2);
+ dav_log_err(r, err, APLOG_WARNING);
+ }
+
+ /* return an appropriate response (HTTP_CREATED) */
+ /* ### spec doesn't say what happens when destination was replaced */
+ return dav_created(r, lookup.rnew->uri, "Binding", 0);
+}
+
+
+/*
+ * Response handler for DAV resources
+ */
+static int dav_handler(request_rec *r)
+{
+ if (strcmp(r->handler, DAV_HANDLER_NAME) != 0)
+ return DECLINED;
+
+ /* Reject requests with an unescaped hash character, as these may
+ * be more destructive than the user intended. */
+ if (r->parsed_uri.fragment != NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "buggy client used un-escaped hash in Request-URI");
+ return dav_error_response(r, HTTP_BAD_REQUEST,
+ "The request was invalid: the URI included "
+ "an un-escaped hash character");
+ }
+
+ /* ### do we need to do anything with r->proxyreq ?? */
+
+ /*
+ * ### anything else to do here? could another module and/or
+ * ### config option "take over" the handler here? i.e. how do
+ * ### we lock down this hierarchy so that we are the ultimate
+ * ### arbiter? (or do we simply depend on the administrator
+ * ### to avoid conflicting configurations?)
+ */
+
+ /*
+ * Set up the methods mask, since that's one of the reasons this handler
+ * gets called, and lower-level things may need the info.
+ *
+ * First, set the mask to the methods we handle directly. Since by
+ * definition we own our managed space, we unconditionally set
+ * the r->allowed field rather than ORing our values with anything
+ * any other module may have put in there.
+ *
+ * These are the HTTP-defined methods that we handle directly.
+ */
+ r->allowed = 0
+ | (AP_METHOD_BIT << M_GET)
+ | (AP_METHOD_BIT << M_PUT)
+ | (AP_METHOD_BIT << M_DELETE)
+ | (AP_METHOD_BIT << M_OPTIONS)
+ | (AP_METHOD_BIT << M_INVALID);
+
+ /*
+ * These are the DAV methods we handle.
+ */
+ r->allowed |= 0
+ | (AP_METHOD_BIT << M_COPY)
+ | (AP_METHOD_BIT << M_LOCK)
+ | (AP_METHOD_BIT << M_UNLOCK)
+ | (AP_METHOD_BIT << M_MKCOL)
+ | (AP_METHOD_BIT << M_MOVE)
+ | (AP_METHOD_BIT << M_PROPFIND)
+ | (AP_METHOD_BIT << M_PROPPATCH);
+
+ /*
+ * These are methods that we don't handle directly, but let the
+ * server's default handler do for us as our agent.
+ */
+ r->allowed |= 0
+ | (AP_METHOD_BIT << M_POST);
+
+ /* ### hrm. if we return HTTP_METHOD_NOT_ALLOWED, then an Allow header
+ * ### is sent; it will need the other allowed states; since the default
+ * ### handler is not called on error, then it doesn't add the other
+ * ### allowed states, so we must
+ */
+
+ /* ### we might need to refine this for just where we return the error.
+ * ### also, there is the issue with other methods (see ISSUES)
+ */
+
+ /* dispatch the appropriate method handler */
+ if (r->method_number == M_GET) {
+ return dav_method_get(r);
+ }
+
+ if (r->method_number == M_PUT) {
+ return dav_method_put(r);
+ }
+
+ if (r->method_number == M_POST) {
+ return dav_method_post(r);
+ }
+
+ if (r->method_number == M_DELETE) {
+ return dav_method_delete(r);
+ }
+
+ if (r->method_number == M_OPTIONS) {
+ return dav_method_options(r);
+ }
+
+ if (r->method_number == M_PROPFIND) {
+ return dav_method_propfind(r);
+ }
+
+ if (r->method_number == M_PROPPATCH) {
+ return dav_method_proppatch(r);
+ }
+
+ if (r->method_number == M_MKCOL) {
+ return dav_method_mkcol(r);
+ }
+
+ if (r->method_number == M_COPY) {
+ return dav_method_copymove(r, DAV_DO_COPY);
+ }
+
+ if (r->method_number == M_MOVE) {
+ return dav_method_copymove(r, DAV_DO_MOVE);
+ }
+
+ if (r->method_number == M_LOCK) {
+ return dav_method_lock(r);
+ }
+
+ if (r->method_number == M_UNLOCK) {
+ return dav_method_unlock(r);
+ }
+
+ if (r->method_number == M_VERSION_CONTROL) {
+ return dav_method_vsn_control(r);
+ }
+
+ if (r->method_number == M_CHECKOUT) {
+ return dav_method_checkout(r);
+ }
+
+ if (r->method_number == M_UNCHECKOUT) {
+ return dav_method_uncheckout(r);
+ }
+
+ if (r->method_number == M_CHECKIN) {
+ return dav_method_checkin(r);
+ }
+
+ if (r->method_number == M_UPDATE) {
+ return dav_method_update(r);
+ }
+
+ if (r->method_number == M_LABEL) {
+ return dav_method_label(r);
+ }
+
+ if (r->method_number == M_REPORT) {
+ return dav_method_report(r);
+ }
+
+ if (r->method_number == M_MKWORKSPACE) {
+ return dav_method_make_workspace(r);
+ }
+
+ if (r->method_number == M_MKACTIVITY) {
+ return dav_method_make_activity(r);
+ }
+
+ if (r->method_number == M_BASELINE_CONTROL) {
+ return dav_method_baseline_control(r);
+ }
+
+ if (r->method_number == M_MERGE) {
+ return dav_method_merge(r);
+ }
+
+ /* BIND method */
+ if (r->method_number == dav_methods[DAV_M_BIND]) {
+ return dav_method_bind(r);
+ }
+
+ /* DASL method */
+ if (r->method_number == dav_methods[DAV_M_SEARCH]) {
+ return dav_method_search(r);
+ }
+
+ /* ### add'l methods for Advanced Collections, ACLs */
+
+ return DECLINED;
+}
+
+static int dav_fixups(request_rec *r)
+{
+ dav_dir_conf *conf;
+
+ /* quickly ignore any HTTP/0.9 requests which aren't subreqs. */
+ if (r->assbackwards && !r->main) {
+ return DECLINED;
+ }
+
+ conf = (dav_dir_conf *)ap_get_module_config(r->per_dir_config,
+ &dav_module);
+
+ /* if DAV is not enabled, then we've got nothing to do */
+ if (conf->provider == NULL) {
+ return DECLINED;
+ }
+
+ /* We are going to handle almost every request. In certain cases,
+ the provider maps to the filesystem (thus, handle_get is
+ FALSE), and core Apache will handle it. a For that case, we
+ just return right away. */
+ if (r->method_number == M_GET) {
+ /*
+ * ### need some work to pull Content-Type and Content-Language
+ * ### from the property database.
+ */
+
+ /*
+ * If the repository hasn't indicated that it will handle the
+ * GET method, then just punt.
+ *
+ * ### this isn't quite right... taking over the response can break
+ * ### things like mod_negotiation. need to look into this some more.
+ */
+ if (!conf->provider->repos->handle_get) {
+ return DECLINED;
+ }
+ }
+
+ /* ### this is wrong. We should only be setting the r->handler for the
+ * requests that mod_dav knows about. If we set the handler for M_POST
+ * requests, then CGI scripts that use POST will return the source for the
+ * script. However, mod_dav DOES handle POST, so something else needs
+ * to be fixed.
+ */
+ if (r->method_number != M_POST) {
+
+ /* We are going to be handling the response for this resource. */
+ r->handler = DAV_HANDLER_NAME;
+ return OK;
+ }
+
+ return DECLINED;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_handler(dav_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_config(dav_init_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_fixups(dav_fixups, NULL, NULL, APR_HOOK_MIDDLE);
+
+ dav_hook_find_liveprop(dav_core_find_liveprop, NULL, NULL, APR_HOOK_LAST);
+ dav_hook_insert_all_liveprops(dav_core_insert_all_liveprops,
+ NULL, NULL, APR_HOOK_MIDDLE);
+
+ dav_core_register_uris(p);
+}
+
+/*---------------------------------------------------------------------------
+ *
+ * Configuration info for the module
+ */
+
+static const command_rec dav_cmds[] =
+{
+ /* per directory/location */
+ AP_INIT_TAKE1("DAV", dav_cmd_dav, NULL, ACCESS_CONF,
+ "specify the DAV provider for a directory or location"),
+
+ /* per directory/location, or per server */
+ AP_INIT_TAKE1("DAVMinTimeout", dav_cmd_davmintimeout, NULL,
+ ACCESS_CONF|RSRC_CONF,
+ "specify minimum allowed timeout"),
+
+ /* per directory/location, or per server */
+ AP_INIT_FLAG("DAVDepthInfinity", dav_cmd_davdepthinfinity, NULL,
+ ACCESS_CONF|RSRC_CONF,
+ "allow Depth infinity PROPFIND requests"),
+
+ { NULL }
+};
+
+module DAV_DECLARE_DATA dav_module =
+{
+ STANDARD20_MODULE_STUFF,
+ dav_create_dir_config, /* dir config creater */
+ dav_merge_dir_config, /* dir merger --- default is to override */
+ dav_create_server_config, /* server config */
+ dav_merge_server_config, /* merge server config */
+ dav_cmds, /* command table */
+ register_hooks, /* register hooks */
+};
+
+APR_HOOK_STRUCT(
+ APR_HOOK_LINK(gather_propsets)
+ APR_HOOK_LINK(find_liveprop)
+ APR_HOOK_LINK(insert_all_liveprops)
+ )
+
+APR_IMPLEMENT_EXTERNAL_HOOK_VOID(dav, DAV, gather_propsets,
+ (apr_array_header_t *uris),
+ (uris))
+
+APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(dav, DAV, int, find_liveprop,
+ (const dav_resource *resource,
+ const char *ns_uri, const char *name,
+ const dav_hooks_liveprop **hooks),
+ (resource, ns_uri, name, hooks), 0)
+
+APR_IMPLEMENT_EXTERNAL_HOOK_VOID(dav, DAV, insert_all_liveprops,
+ (request_rec *r, const dav_resource *resource,
+ dav_prop_insert what, apr_text_header *phdr),
+ (r, resource, what, phdr))
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/mod_dav.dsp b/rubbos/app/httpd-2.0.64/modules/dav/main/mod_dav.dsp
new file mode 100644
index 00000000..38972a11
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/mod_dav.dsp
@@ -0,0 +1,164 @@
+# Microsoft Developer Studio Project File - Name="mod_dav" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_dav - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_dav.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_dav.mak" CFG="mod_dav - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_dav - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_dav - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_dav - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../../include" /I "../../../srclib/apr/include" /I "../../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "DAV_DECLARE_EXPORT" /Fd"Release\mod_dav_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /out:"Release/mod_dav.so" /base:@..\..\..\os\win32\BaseAddr.ref,mod_dav.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_dav.so" /base:@..\..\..\os\win32\BaseAddr.ref,mod_dav.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_dav - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../../include" /I "../../../srclib/apr/include" /I "../../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "DAV_DECLARE_EXPORT" /Fd"Debug\mod_dav_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_dav.so" /base:@..\..\..\os\win32\BaseAddr.ref,mod_dav.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_dav.so" /base:@..\..\..\os\win32\BaseAddr.ref,mod_dav.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_dav - Win32 Release"
+# Name "mod_dav - Win32 Debug"
+# Begin Group "Source Files"
+
+# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90"
+# Begin Source File
+
+SOURCE=.\liveprop.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_dav.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\props.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\providers.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\std_liveprop.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\util.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\util_lock.c
+# End Source File
+# End Group
+# Begin Group "Header Files"
+
+# PROP Default_Filter "h;hpp;hxx;hm;inl;fi;fd"
+# Begin Source File
+
+SOURCE=.\mod_dav.h
+# End Source File
+# End Group
+# Begin Source File
+
+SOURCE=.\mod_dav.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_dav - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\..\build\win32\win32ver.awk
+
+".\mod_dav.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../../build/win32/win32ver.awk mod_dav.so "dav_module for Apache" ../../../include/ap_release.h > .\mod_dav.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_dav - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\..\build\win32\win32ver.awk
+
+".\mod_dav.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../../build/win32/win32ver.awk mod_dav.so "dav_module for Apache" ../../../include/ap_release.h > .\mod_dav.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/mod_dav.h b/rubbos/app/httpd-2.0.64/modules/dav/main/mod_dav.h
new file mode 100644
index 00000000..0fb9ac9b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/mod_dav.h
@@ -0,0 +1,2420 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+** DAV extension module for Apache 2.0.*
+*/
+
+#ifndef _MOD_DAV_H_
+#define _MOD_DAV_H_
+
+#include "apr_hooks.h"
+#include "apr_hash.h"
+#include "apr_dbm.h"
+#include "apr_tables.h"
+
+#include "httpd.h"
+#include "util_filter.h"
+#include "util_xml.h"
+
+#include <limits.h> /* for INT_MAX */
+#include <time.h> /* for time_t */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define DAV_VERSION AP_SERVER_BASEREVISION
+
+#define DAV_XML_HEADER "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
+#define DAV_XML_CONTENT_TYPE "text/xml; charset=\"utf-8\""
+
+#define DAV_READ_BLOCKSIZE 2048 /* used for reading input blocks */
+
+#define DAV_RESPONSE_BODY_1 "<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n<html><head>\n<title>"
+#define DAV_RESPONSE_BODY_2 "</title>\n</head><body>\n<h1>"
+#define DAV_RESPONSE_BODY_3 "</h1>\n<p>"
+#define DAV_RESPONSE_BODY_4 "</p>\n"
+#define DAV_RESPONSE_BODY_5 "</body></html>\n"
+
+#define DAV_DO_COPY 0
+#define DAV_DO_MOVE 1
+
+
+#if 1
+#define DAV_DEBUG 1
+#define DEBUG_CR "\n"
+#define DBG0(f) ap_log_error(APLOG_MARK, \
+ APLOG_ERR, 0, NULL, (f))
+#define DBG1(f,a1) ap_log_error(APLOG_MARK, \
+ APLOG_ERR, 0, NULL, f, a1)
+#define DBG2(f,a1,a2) ap_log_error(APLOG_MARK, \
+ APLOG_ERR, 0, NULL, f, a1, a2)
+#define DBG3(f,a1,a2,a3) ap_log_error(APLOG_MARK, \
+ APLOG_ERR, 0, NULL, f, a1, a2, a3)
+#else
+#undef DAV_DEBUG
+#define DEBUG_CR ""
+#endif
+
+#define DAV_INFINITY INT_MAX /* for the Depth: header */
+
+/* Create a set of DAV_DECLARE(type), DAV_DECLARE_NONSTD(type) and
+ * DAV_DECLARE_DATA with appropriate export and import tags for the platform
+ */
+#if !defined(WIN32)
+#define DAV_DECLARE(type) type
+#define DAV_DECLARE_NONSTD(type) type
+#define DAV_DECLARE_DATA
+#elif defined(DAV_DECLARE_STATIC)
+#define DAV_DECLARE(type) type __stdcall
+#define DAV_DECLARE_NONSTD(type) type
+#define DAV_DECLARE_DATA
+#elif defined(DAV_DECLARE_EXPORT)
+#define DAV_DECLARE(type) __declspec(dllexport) type __stdcall
+#define DAV_DECLARE_NONSTD(type) __declspec(dllexport) type
+#define DAV_DECLARE_DATA __declspec(dllexport)
+#else
+#define DAV_DECLARE(type) __declspec(dllimport) type __stdcall
+#define DAV_DECLARE_NONSTD(type) __declspec(dllimport) type
+#define DAV_DECLARE_DATA __declspec(dllimport)
+#endif
+
+/* --------------------------------------------------------------------
+**
+** ERROR MANAGEMENT
+*/
+
+/*
+** dav_error structure.
+**
+** In most cases, mod_dav uses a pointer to a dav_error structure. If the
+** pointer is NULL, then no error has occurred.
+**
+** In certain cases, a dav_error structure is directly used. In these cases,
+** a status value of 0 means that an error has not occurred.
+**
+** Note: this implies that status != 0 whenever an error occurs.
+**
+** The desc field is optional (it may be NULL). When NULL, it typically
+** implies that Apache has a proper description for the specified status.
+*/
+typedef struct dav_error {
+ int status; /* suggested HTTP status (0 for no error) */
+ int error_id; /* DAV-specific error ID */
+ const char *desc; /* DAV:responsedescription and error log */
+
+ int save_errno; /* copy of errno causing the error */
+
+ const char *namespace; /* [optional] namespace of error */
+ const char *tagname; /* name of error-tag */
+
+ struct dav_error *prev; /* previous error (in stack) */
+
+} dav_error;
+
+/*
+** Create a new error structure. save_errno will be filled with the current
+** errno value.
+*/
+DAV_DECLARE(dav_error*) dav_new_error(apr_pool_t *p, int status,
+ int error_id, const char *desc);
+
+
+/*
+** Create a new error structure with tagname and (optional) namespace;
+** namespace may be NULL, which means "DAV:". save_errno will be
+** filled with the current errno value.
+*/
+DAV_DECLARE(dav_error*) dav_new_error_tag(apr_pool_t *p, int status,
+ int error_id, const char *desc,
+ const char *namespace,
+ const char *tagname);
+
+
+/*
+** Push a new error description onto the stack of errors.
+**
+** This function is used to provide an additional description to an existing
+** error.
+**
+** <status> should contain the caller's view of what the current status is,
+** given the underlying error. If it doesn't have a better idea, then the
+** caller should pass prev->status.
+**
+** <error_id> can specify a new error_id since the topmost description has
+** changed.
+*/
+DAV_DECLARE(dav_error*) dav_push_error(apr_pool_t *p, int status, int error_id,
+ const char *desc, dav_error *prev);
+
+
+/* error ID values... */
+
+/* IF: header errors */
+#define DAV_ERR_IF_PARSE 100 /* general parsing error */
+#define DAV_ERR_IF_MULTIPLE_NOT 101 /* multiple "Not" found */
+#define DAV_ERR_IF_UNK_CHAR 102 /* unknown char in header */
+#define DAV_ERR_IF_ABSENT 103 /* no locktokens given */
+#define DAV_ERR_IF_TAGGED 104 /* in parsing tagged-list */
+#define DAV_ERR_IF_UNCLOSED_PAREN 105 /* in no-tagged-list */
+
+/* Prop DB errors */
+#define DAV_ERR_PROP_BAD_MAJOR 200 /* major version was wrong */
+#define DAV_ERR_PROP_READONLY 201 /* prop is read-only */
+#define DAV_ERR_PROP_NO_DATABASE 202 /* writable db not avail */
+#define DAV_ERR_PROP_NOT_FOUND 203 /* prop not found */
+#define DAV_ERR_PROP_BAD_LOCKDB 204 /* could not open lockdb */
+#define DAV_ERR_PROP_OPENING 205 /* problem opening propdb */
+#define DAV_ERR_PROP_EXEC 206 /* problem exec'ing patch */
+
+/* Predefined DB errors */
+/* ### any to define?? */
+
+/* Predefined locking system errors */
+#define DAV_ERR_LOCK_OPENDB 400 /* could not open lockdb */
+#define DAV_ERR_LOCK_NO_DB 401 /* no database defined */
+#define DAV_ERR_LOCK_CORRUPT_DB 402 /* DB is corrupt */
+#define DAV_ERR_LOCK_UNK_STATE_TOKEN 403 /* unknown State-token */
+#define DAV_ERR_LOCK_PARSE_TOKEN 404 /* bad opaquelocktoken */
+#define DAV_ERR_LOCK_SAVE_LOCK 405 /* err saving locks */
+
+/*
+** Some comments on Error ID values:
+**
+** The numbers do not necessarily need to be unique. Uniqueness simply means
+** that two errors that have not been predefined above can be distinguished
+** from each other. At the moment, mod_dav does not use this distinguishing
+** feature, but it could be used in the future to collapse <response> elements
+** into groups based on the error ID (and associated responsedescription).
+**
+** If a compute_desc is provided, then the error ID should be unique within
+** the context of the compute_desc function (so the function can figure out
+** what to filled into the desc).
+**
+** Basically, subsystems can ignore defining new error ID values if they want
+** to. The subsystems *do* need to return the predefined errors when
+** appropriate, so that mod_dav can figure out what to do. Subsystems can
+** simply leave the error ID field unfilled (zero) if there isn't an error
+** that must be placed there.
+*/
+
+
+/* --------------------------------------------------------------------
+**
+** HOOK STRUCTURES
+**
+** These are here for forward-declaration purposes. For more info, see
+** the section title "HOOK HANDLING" for more information, plus each
+** structure definition.
+*/
+
+/* forward-declare this structure */
+typedef struct dav_hooks_propdb dav_hooks_propdb;
+typedef struct dav_hooks_locks dav_hooks_locks;
+typedef struct dav_hooks_vsn dav_hooks_vsn;
+typedef struct dav_hooks_repository dav_hooks_repository;
+typedef struct dav_hooks_liveprop dav_hooks_liveprop;
+typedef struct dav_hooks_binding dav_hooks_binding;
+typedef struct dav_hooks_search dav_hooks_search;
+
+/* ### deprecated name */
+typedef dav_hooks_propdb dav_hooks_db;
+
+
+/* --------------------------------------------------------------------
+**
+** RESOURCE HANDLING
+*/
+
+/*
+** Resource Types:
+** The base protocol defines only file and collection resources.
+** The versioning protocol defines several additional resource types
+** to represent artifacts of a version control system.
+**
+** This enumeration identifies the type of URL used to identify the
+** resource. Since the same resource may have more than one type of
+** URL which can identify it, dav_resource_type cannot be used
+** alone to determine the type of the resource; attributes of the
+** dav_resource object must also be consulted.
+*/
+typedef enum {
+ DAV_RESOURCE_TYPE_UNKNOWN,
+
+ DAV_RESOURCE_TYPE_REGULAR, /* file or collection; could be
+ * unversioned, or version selector,
+ * or baseline selector */
+
+ DAV_RESOURCE_TYPE_VERSION, /* version or baseline URL */
+
+ DAV_RESOURCE_TYPE_HISTORY, /* version or baseline history URL */
+
+ DAV_RESOURCE_TYPE_WORKING, /* working resource URL */
+
+ DAV_RESOURCE_TYPE_WORKSPACE, /* workspace URL */
+
+ DAV_RESOURCE_TYPE_ACTIVITY, /* activity URL */
+
+ DAV_RESOURCE_TYPE_PRIVATE /* repository-private type */
+
+} dav_resource_type;
+
+/*
+** Opaque, repository-specific information for a resource.
+*/
+typedef struct dav_resource_private dav_resource_private;
+
+/*
+** Resource descriptor, generated by a repository provider.
+**
+** Note: the lock-null state is not explicitly represented here,
+** since it may be expensive to compute. Use dav_get_resource_state()
+** to determine whether a non-existent resource is a lock-null resource.
+**
+** A quick explanation of how the flags can apply to different resources:
+**
+** unversioned file or collection:
+** type = DAV_RESOURCE_TYPE_REGULAR
+** exists = ? (1 if exists)
+** collection = ? (1 if collection)
+** versioned = 0
+** baselined = 0
+** working = 0
+**
+** version-controlled resource or configuration:
+** type = DAV_RESOURCE_TYPE_REGULAR
+** exists = 1
+** collection = ? (1 if collection)
+** versioned = 1
+** baselined = ? (1 if configuration)
+** working = ? (1 if checked out)
+**
+** version/baseline history:
+** type = DAV_RESOURCE_TYPE_HISTORY
+** exists = 1
+** collection = 0
+** versioned = 0
+** baselined = 0
+** working = 0
+**
+** version/baseline:
+** type = DAV_RESOURCE_TYPE_VERSION
+** exists = 1
+** collection = ? (1 if collection)
+** versioned = 1
+** baselined = ? (1 if baseline)
+** working = 0
+**
+** working resource:
+** type = DAV_RESOURCE_TYPE_WORKING
+** exists = 1
+** collection = ? (1 if collection)
+** versioned = 1
+** baselined = 0
+** working = 1
+**
+** workspace:
+** type = DAV_RESOURCE_TYPE_WORKSPACE
+** exists = ? (1 if exists)
+** collection = 1
+** versioned = ? (1 if version-controlled)
+** baselined = ? (1 if baseline-controlled)
+** working = ? (1 if checked out)
+**
+** activity:
+** type = DAV_RESOURCE_TYPE_ACTIVITY
+** exists = ? (1 if exists)
+** collection = 0
+** versioned = 0
+** baselined = 0
+** working = 0
+*/
+typedef struct dav_resource {
+ dav_resource_type type;
+
+ int exists; /* 0 => null resource */
+
+ int collection; /* 0 => file; can be 1 for
+ * REGULAR, VERSION, and WORKING resources,
+ * and is always 1 for WORKSPACE */
+
+ int versioned; /* 0 => unversioned; can be 1 for
+ * REGULAR and WORKSPACE resources,
+ * and is always 1 for VERSION and WORKING */
+
+ int baselined; /* 0 => not baselined; can be 1 for
+ * REGULAR, VERSION, and WORKSPACE resources;
+ * versioned == 1 when baselined == 1 */
+
+ int working; /* 0 => not checked out; can be 1 for
+ * REGULAR and WORKSPACE resources,
+ * and is always 1 for WORKING */
+
+ const char *uri; /* the URI for this resource */
+
+ dav_resource_private *info; /* the provider's private info */
+
+ const dav_hooks_repository *hooks; /* hooks used for this resource */
+
+ /* When allocating items related specifically to this resource, the
+ following pool should be used. Its lifetime will be at least as
+ long as the dav_resource structure. */
+ apr_pool_t *pool;
+
+} dav_resource;
+
+/*
+** Lock token type. Lock providers define the details of a lock token.
+** However, all providers are expected to at least be able to parse
+** the "opaquelocktoken" scheme, which is represented by a uuid_t.
+*/
+typedef struct dav_locktoken dav_locktoken;
+
+
+/* --------------------------------------------------------------------
+**
+** BUFFER HANDLING
+**
+** These buffers are used as a lightweight buffer reuse mechanism. Apache
+** provides sub-pool creation and destruction to much the same effect, but
+** the sub-pools are a bit more general and heavyweight than these buffers.
+*/
+
+/* buffer for reuse; can grow to accomodate needed size */
+typedef struct
+{
+ apr_size_t alloc_len; /* how much has been allocated */
+ apr_size_t cur_len; /* how much is currently being used */
+ char *buf; /* buffer contents */
+} dav_buffer;
+#define DAV_BUFFER_MINSIZE 256 /* minimum size for buffer */
+#define DAV_BUFFER_PAD 64 /* amount of pad when growing */
+
+/* set the cur_len to the given size and ensure space is available */
+DAV_DECLARE(void) dav_set_bufsize(apr_pool_t *p, dav_buffer *pbuf,
+ apr_size_t size);
+
+/* initialize a buffer and copy the specified (null-term'd) string into it */
+DAV_DECLARE(void) dav_buffer_init(apr_pool_t *p, dav_buffer *pbuf,
+ const char *str);
+
+/* check that the buffer can accomodate <extra_needed> more bytes */
+DAV_DECLARE(void) dav_check_bufsize(apr_pool_t *p, dav_buffer *pbuf,
+ apr_size_t extra_needed);
+
+/* append a string to the end of the buffer, adjust length */
+DAV_DECLARE(void) dav_buffer_append(apr_pool_t *p, dav_buffer *pbuf,
+ const char *str);
+
+/* place a string on the end of the buffer, do NOT adjust length */
+DAV_DECLARE(void) dav_buffer_place(apr_pool_t *p, dav_buffer *pbuf,
+ const char *str);
+
+/* place some memory on the end of a buffer; do NOT adjust length */
+DAV_DECLARE(void) dav_buffer_place_mem(apr_pool_t *p, dav_buffer *pbuf,
+ const void *mem, apr_size_t amt,
+ apr_size_t pad);
+
+
+/* --------------------------------------------------------------------
+**
+** HANDY UTILITIES
+*/
+
+/* contains results from one of the getprop functions */
+typedef struct
+{
+ apr_text * propstats; /* <propstat> element text */
+ apr_text * xmlns; /* namespace decls for <response> elem */
+} dav_get_props_result;
+
+/* holds the contents of a <response> element */
+typedef struct dav_response
+{
+ const char *href; /* always */
+ const char *desc; /* optional description at <response> level */
+
+ /* use status if propresult.propstats is NULL. */
+ dav_get_props_result propresult;
+
+ int status;
+
+ struct dav_response *next;
+} dav_response;
+
+typedef struct
+{
+ request_rec *rnew; /* new subrequest */
+ dav_error err; /* potential error response */
+} dav_lookup_result;
+
+
+DAV_DECLARE(dav_lookup_result) dav_lookup_uri(const char *uri, request_rec *r,
+ int must_be_absolute);
+
+/* defines type of property info a provider is to return */
+typedef enum {
+ DAV_PROP_INSERT_NOTDEF, /* property is defined by this provider,
+ but nothing was inserted because the
+ (live) property is not defined for this
+ resource (it may be present as a dead
+ property). */
+ DAV_PROP_INSERT_NOTSUPP, /* property is recognized by this provider,
+ but it is not supported, and cannot be
+ treated as a dead property */
+ DAV_PROP_INSERT_NAME, /* a property name (empty elem) was
+ inserted into the text block */
+ DAV_PROP_INSERT_VALUE, /* a property name/value pair was inserted
+ into the text block */
+ DAV_PROP_INSERT_SUPPORTED /* a supported live property was added to
+ the text block as a
+ <DAV:supported-live-property> element */
+} dav_prop_insert;
+
+/* ### this stuff is private to dav/fs/repos.c; move it... */
+/* format a time string (buf must be at least DAV_TIMEBUF_SIZE chars) */
+#define DAV_STYLE_ISO8601 1
+#define DAV_STYLE_RFC822 2
+#define DAV_TIMEBUF_SIZE 30
+
+DAV_DECLARE(int) dav_get_depth(request_rec *r, int def_depth);
+
+DAV_DECLARE(int) dav_validate_root(const apr_xml_doc *doc,
+ const char *tagname);
+DAV_DECLARE(apr_xml_elem *) dav_find_child(const apr_xml_elem *elem,
+ const char *tagname);
+
+/* gather up all the CDATA into a single string */
+DAV_DECLARE(const char *) dav_xml_get_cdata(const apr_xml_elem *elem, apr_pool_t *pool,
+ int strip_white);
+
+/*
+** XML namespace handling
+**
+** This structure tracks namespace declarations (xmlns:prefix="URI").
+** It maintains a one-to-many relationship of URIs-to-prefixes. In other
+** words, one URI may be defined by many prefixes, but any specific
+** prefix will specify only one URI.
+**
+** Prefixes using the "g###" pattern can be generated automatically if
+** the caller does not have specific prefix requirements.
+*/
+typedef struct {
+ apr_pool_t *pool;
+ apr_hash_t *uri_prefix; /* map URIs to an available prefix */
+ apr_hash_t *prefix_uri; /* map all prefixes to their URIs */
+ int count; /* counter for "g###" prefixes */
+} dav_xmlns_info;
+
+/* create an empty dav_xmlns_info structure */
+DAV_DECLARE(dav_xmlns_info *) dav_xmlns_create(apr_pool_t *pool);
+
+/* add a specific prefix/URI pair. the prefix/uri should have a lifetime
+ at least that of xmlns->pool */
+DAV_DECLARE(void) dav_xmlns_add(dav_xmlns_info *xi,
+ const char *prefix, const char *uri);
+
+/* add a URI (if not present); any prefix is acceptable and is returned.
+ the uri should have a lifetime at least that xmlns->pool */
+DAV_DECLARE(const char *) dav_xmlns_add_uri(dav_xmlns_info *xi,
+ const char *uri);
+
+/* return the URI for a specified prefix (or NULL if the prefix is unknown) */
+DAV_DECLARE(const char *) dav_xmlns_get_uri(dav_xmlns_info *xi,
+ const char *prefix);
+
+/* return an available prefix for a specified URI (or NULL if the URI
+ is unknown) */
+DAV_DECLARE(const char *) dav_xmlns_get_prefix(dav_xmlns_info *xi,
+ const char *uri);
+
+/* generate xmlns declarations (appending into the given text) */
+DAV_DECLARE(void) dav_xmlns_generate(dav_xmlns_info *xi,
+ apr_text_header *phdr);
+
+/* --------------------------------------------------------------------
+**
+** DAV PLUGINS
+*/
+
+/* ### docco ... */
+
+/*
+** dav_provider
+**
+** This structure wraps up all of the hooks that a mod_dav provider can
+** supply. The provider MUST supply <repos> and <propdb>. The rest are
+** optional and should contain NULL if that feature is not supplied.
+**
+** Note that a provider cannot pick and choose portions from various
+** underlying implementations (which was theoretically possible in
+** mod_dav 1.0). There are too many dependencies between a dav_resource
+** (defined by <repos>) and the other functionality.
+**
+** Live properties are not part of the dav_provider structure because they
+** are handled through the APR_HOOK interface (to allow for multiple liveprop
+** providers). The core always provides some properties, and then a given
+** provider will add more properties.
+**
+** Some providers may need to associate a context with the dav_provider
+** structure -- the ctx field is available for storing this context. Just
+** leave it NULL if it isn't required.
+*/
+typedef struct {
+ const dav_hooks_repository *repos;
+ const dav_hooks_propdb *propdb;
+ const dav_hooks_locks *locks;
+ const dav_hooks_vsn *vsn;
+ const dav_hooks_binding *binding;
+ const dav_hooks_search *search;
+
+ void *ctx;
+} dav_provider;
+
+/*
+** gather_propsets: gather all live property propset-URIs
+**
+** The hook implementor should push one or more URIs into the specified
+** array. These URIs are returned in the DAV: header to let clients know
+** what sets of live properties are supported by the installation. mod_dav
+** will place open/close angle brackets around each value (much like
+** a Coded-URL); quotes and brackets should not be in the value.
+**
+** Example: http://apache.org/dav/props/
+**
+** (of course, use your own domain to ensure a unique value)
+*/
+APR_DECLARE_EXTERNAL_HOOK(dav, DAV, void, gather_propsets,
+ (apr_array_header_t *uris))
+
+/*
+** find_liveprop: find a live property, returning a non-zero, unique,
+** opaque identifier.
+**
+** If the hook implementor determines the specified URI/name refers to
+** one of its properties, then it should fill in HOOKS and return a
+** non-zero value. The returned value is the "property ID" and will
+** be passed to the various liveprop hook functions.
+**
+** Return 0 if the property is not defined by the hook implementor.
+*/
+APR_DECLARE_EXTERNAL_HOOK(dav, DAV, int, find_liveprop,
+ (const dav_resource *resource,
+ const char *ns_uri, const char *name,
+ const dav_hooks_liveprop **hooks))
+
+/*
+** insert_all_liveprops: insert all (known) live property names/values.
+**
+** The hook implementor should append XML text to PHDR, containing liveprop
+** names. If INSVALUE is true, then the property values should also be
+** inserted into the output XML stream.
+**
+** The liveprop provider should insert *all* known and *defined* live
+** properties on the specified resource. If a particular liveprop is
+** not defined for this resource, then it should not be inserted.
+*/
+APR_DECLARE_EXTERNAL_HOOK(dav, DAV, void, insert_all_liveprops,
+ (request_rec *r, const dav_resource *resource,
+ dav_prop_insert what, apr_text_header *phdr))
+
+DAV_DECLARE(const dav_hooks_locks *) dav_get_lock_hooks(request_rec *r);
+DAV_DECLARE(const dav_hooks_propdb *) dav_get_propdb_hooks(request_rec *r);
+DAV_DECLARE(const dav_hooks_vsn *) dav_get_vsn_hooks(request_rec *r);
+DAV_DECLARE(const dav_hooks_binding *) dav_get_binding_hooks(request_rec *r);
+DAV_DECLARE(const dav_hooks_search *) dav_get_search_hooks(request_rec *r);
+
+DAV_DECLARE(void) dav_register_provider(apr_pool_t *p, const char *name,
+ const dav_provider *hooks);
+DAV_DECLARE(const dav_provider *) dav_lookup_provider(const char *name);
+
+
+/* ### deprecated */
+#define DAV_GET_HOOKS_PROPDB(r) dav_get_propdb_hooks(r)
+#define DAV_GET_HOOKS_LOCKS(r) dav_get_lock_hooks(r)
+#define DAV_GET_HOOKS_VSN(r) dav_get_vsn_hooks(r)
+#define DAV_GET_HOOKS_BINDING(r) dav_get_binding_hooks(r)
+#define DAV_GET_HOOKS_SEARCH(r) dav_get_search_hooks(r)
+
+
+/* --------------------------------------------------------------------
+**
+** IF HEADER PROCESSING
+**
+** Here is the definition of the If: header from RFC 2518, S9.4:
+**
+** If = "If" ":" (1*No-tag-list | 1*Tagged-list)
+** No-tag-list = List
+** Tagged-list = Resource 1*List
+** Resource = Coded-URL
+** List = "(" 1*(["Not"](State-token | "[" entity-tag "]")) ")"
+** State-token = Coded-URL
+** Coded-URL = "<" absoluteURI ">" ; absoluteURI from RFC 2616
+**
+** List corresponds to dav_if_state_list. No-tag-list corresponds to
+** dav_if_header with uri==NULL. Tagged-list corresponds to a sequence of
+** dav_if_header structures with (duplicate) uri==Resource -- one
+** dav_if_header per state_list. A second Tagged-list will start a new
+** sequence of dav_if_header structures with the new URI.
+**
+** A summary of the semantics, mapped into our structures:
+** - Chained dav_if_headers: OR
+** - Chained dav_if_state_lists: AND
+** - NULL uri matches all resources
+*/
+
+typedef enum
+{
+ dav_if_etag,
+ dav_if_opaquelock
+} dav_if_state_type;
+
+typedef struct dav_if_state_list
+{
+ dav_if_state_type type;
+
+ int condition;
+#define DAV_IF_COND_NORMAL 0
+#define DAV_IF_COND_NOT 1 /* "Not" was applied */
+
+ const char *etag;
+ dav_locktoken *locktoken;
+
+ struct dav_if_state_list *next;
+} dav_if_state_list;
+
+typedef struct dav_if_header
+{
+ const char *uri;
+ apr_size_t uri_len;
+ struct dav_if_state_list *state;
+ struct dav_if_header *next;
+
+ int dummy_header; /* used internally by the lock/etag validation */
+} dav_if_header;
+
+typedef struct dav_locktoken_list
+{
+ dav_locktoken *locktoken;
+ struct dav_locktoken_list *next;
+} dav_locktoken_list;
+
+DAV_DECLARE(dav_error *) dav_get_locktoken_list(request_rec *r,
+ dav_locktoken_list **ltl);
+
+
+/* --------------------------------------------------------------------
+**
+** LIVE PROPERTY HANDLING
+*/
+
+/* opaque type for PROPPATCH rollback information */
+typedef struct dav_liveprop_rollback dav_liveprop_rollback;
+
+struct dav_hooks_liveprop
+{
+ /*
+ ** Insert property information into a text block. The property to
+ ** insert is identified by the propid value. The information to insert
+ ** is identified by the "what" argument, as follows:
+ ** DAV_PROP_INSERT_NAME
+ ** property name, as an empty XML element
+ ** DAV_PROP_INSERT_VALUE
+ ** property name/value, as an XML element
+ ** DAV_PROP_INSERT_SUPPORTED
+ ** if the property is defined on the resource, then
+ ** a DAV:supported-live-property element, as defined
+ ** by the DeltaV extensions to RFC2518.
+ **
+ ** Providers should return DAV_PROP_INSERT_NOTDEF if the property is
+ ** known and not defined for this resource, so should be handled as a
+ ** dead property. If a provider recognizes, but does not support, a
+ ** property, and does not want it handled as a dead property, it should
+ ** return DAV_PROP_INSERT_NOTSUPP.
+ **
+ ** Returns one of DAV_PROP_INSERT_* based on what happened.
+ **
+ ** ### we may need more context... ie. the lock database
+ */
+ dav_prop_insert (*insert_prop)(const dav_resource *resource,
+ int propid, dav_prop_insert what,
+ apr_text_header *phdr);
+
+ /*
+ ** Determine whether a given property is writable.
+ **
+ ** ### we may want a different semantic. i.e. maybe it should be
+ ** ### "can we write <value> into this property?"
+ **
+ ** Returns 1 if the live property can be written, 0 if read-only.
+ */
+ int (*is_writable)(const dav_resource *resource, int propid);
+
+ /*
+ ** This member defines the set of namespace URIs that the provider
+ ** uses for its properties. When insert_all is called, it will be
+ ** passed a list of integers that map from indices into this list
+ ** to namespace IDs for output generation.
+ **
+ ** The last entry in this list should be a NULL value (sentinel).
+ */
+ const char * const * namespace_uris;
+
+ /*
+ ** ### this is not the final design. we want an open-ended way for
+ ** ### liveprop providers to attach *new* properties. To this end,
+ ** ### we'll have a "give me a list of the props you define", a way
+ ** ### to check for a prop's existence, a way to validate a set/remove
+ ** ### of a prop, and a way to execute/commit/rollback that change.
+ */
+
+ /*
+ ** Validate that the live property can be assigned a value, and that
+ ** the provided value is valid.
+ **
+ ** elem will point to the XML element that names the property. For
+ ** example:
+ ** <lp1:executable>T</lp1:executable>
+ **
+ ** The provider can access the cdata fields and the child elements
+ ** to extract the relevant pieces.
+ **
+ ** operation is one of DAV_PROP_OP_SET or _DELETE.
+ **
+ ** The provider may return a value in *context which will be passed
+ ** to each of the exec/commit/rollback functions. For example, this
+ ** may contain an internal value which has been processed from the
+ ** input element.
+ **
+ ** The provider must set defer_to_dead to true (non-zero) or false.
+ ** If true, then the set/remove is deferred to the dead property
+ ** database. Note: it will be set to zero on entry.
+ */
+ dav_error * (*patch_validate)(const dav_resource *resource,
+ const apr_xml_elem *elem,
+ int operation,
+ void **context,
+ int *defer_to_dead);
+
+ /* ### doc... */
+ dav_error * (*patch_exec)(const dav_resource *resource,
+ const apr_xml_elem *elem,
+ int operation,
+ void *context,
+ dav_liveprop_rollback **rollback_ctx);
+
+ /* ### doc... */
+ void (*patch_commit)(const dav_resource *resource,
+ int operation,
+ void *context,
+ dav_liveprop_rollback *rollback_ctx);
+
+ /* ### doc... */
+ dav_error * (*patch_rollback)(const dav_resource *resource,
+ int operation,
+ void *context,
+ dav_liveprop_rollback *rollback_ctx);
+
+ /*
+ ** If a provider needs a context to associate with this hooks structure,
+ ** then this field may be used. In most cases, it will just be NULL.
+ */
+ void *ctx;
+};
+
+/*
+** dav_liveprop_spec: specify a live property
+**
+** This structure is used as a standard way to determine if a particular
+** property is a live property. Its use is not part of the mandated liveprop
+** interface, but can be used by liveprop providers in conjuction with the
+** utility routines below.
+**
+** spec->name == NULL is the defined end-sentinel for a list of specs.
+*/
+typedef struct {
+ int ns; /* provider-local namespace index */
+ const char *name; /* name of the property */
+
+ int propid; /* provider-local property ID */
+
+ int is_writable; /* is the property writable? */
+
+} dav_liveprop_spec;
+
+/*
+** dav_liveprop_group: specify a group of liveprops
+**
+** This structure specifies a group of live properties, their namespaces,
+** and how to handle them.
+*/
+typedef struct {
+ const dav_liveprop_spec *specs;
+ const char * const *namespace_uris;
+ const dav_hooks_liveprop *hooks;
+
+} dav_liveprop_group;
+
+/* ### docco */
+DAV_DECLARE(int) dav_do_find_liveprop(const char *ns_uri, const char *name,
+ const dav_liveprop_group *group,
+ const dav_hooks_liveprop **hooks);
+
+/* ### docco */
+DAV_DECLARE(int) dav_get_liveprop_info(int propid,
+ const dav_liveprop_group *group,
+ const dav_liveprop_spec **info);
+
+/* ### docco */
+DAV_DECLARE(void) dav_register_liveprop_group(apr_pool_t *pool,
+ const dav_liveprop_group *group);
+
+/* ### docco */
+DAV_DECLARE(int) dav_get_liveprop_ns_index(const char *uri);
+
+/* ### docco */
+DAV_DECLARE(int) dav_get_liveprop_ns_count(void);
+
+/* ### docco */
+DAV_DECLARE(void) dav_add_all_liveprop_xmlns(apr_pool_t *p,
+ apr_text_header *phdr);
+
+/*
+** The following three functions are part of mod_dav's internal handling
+** for the core WebDAV properties. They are not part of mod_dav's API.
+*/
+DAV_DECLARE_NONSTD(int) dav_core_find_liveprop(
+ const dav_resource *resource,
+ const char *ns_uri,
+ const char *name,
+ const dav_hooks_liveprop **hooks);
+DAV_DECLARE_NONSTD(void) dav_core_insert_all_liveprops(
+ request_rec *r,
+ const dav_resource *resource,
+ dav_prop_insert what,
+ apr_text_header *phdr);
+DAV_DECLARE_NONSTD(void) dav_core_register_uris(apr_pool_t *p);
+
+
+/*
+** Standard WebDAV Property Identifiers
+**
+** A live property provider does not need to use these; they are simply
+** provided for convenience.
+**
+** Property identifiers need to be unique within a given provider, but not
+** *across* providers (note: this uniqueness constraint was different in
+** older versions of mod_dav).
+**
+** The identifiers start at 20000 to make it easier for providers to avoid
+** conflicts with the standard properties. The properties are arranged
+** alphabetically, and may be reordered from time to time (as properties
+** are introduced).
+**
+** NOTE: there is no problem with reordering (e.g. binary compat) since the
+** identifiers are only used within a given provider, which would pick up
+** the entire set of changes upon a recompile.
+*/
+enum {
+ DAV_PROPID_BEGIN = 20000,
+
+ /* Standard WebDAV properties (RFC 2518) */
+ DAV_PROPID_creationdate,
+ DAV_PROPID_displayname,
+ DAV_PROPID_getcontentlanguage,
+ DAV_PROPID_getcontentlength,
+ DAV_PROPID_getcontenttype,
+ DAV_PROPID_getetag,
+ DAV_PROPID_getlastmodified,
+ DAV_PROPID_lockdiscovery,
+ DAV_PROPID_resourcetype,
+ DAV_PROPID_source,
+ DAV_PROPID_supportedlock,
+
+ /* DeltaV properties (from the I-D (#14)) */
+ DAV_PROPID_activity_checkout_set,
+ DAV_PROPID_activity_set,
+ DAV_PROPID_activity_version_set,
+ DAV_PROPID_auto_merge_set,
+ DAV_PROPID_auto_version,
+ DAV_PROPID_baseline_collection,
+ DAV_PROPID_baseline_controlled_collection,
+ DAV_PROPID_baseline_controlled_collection_set,
+ DAV_PROPID_checked_in,
+ DAV_PROPID_checked_out,
+ DAV_PROPID_checkin_fork,
+ DAV_PROPID_checkout_fork,
+ DAV_PROPID_checkout_set,
+ DAV_PROPID_comment,
+ DAV_PROPID_creator_displayname,
+ DAV_PROPID_current_activity_set,
+ DAV_PROPID_current_workspace_set,
+ DAV_PROPID_default_variant,
+ DAV_PROPID_eclipsed_set,
+ DAV_PROPID_label_name_set,
+ DAV_PROPID_merge_set,
+ DAV_PROPID_precursor_set,
+ DAV_PROPID_predecessor_set,
+ DAV_PROPID_root_version,
+ DAV_PROPID_subactivity_set,
+ DAV_PROPID_subbaseline_set,
+ DAV_PROPID_successor_set,
+ DAV_PROPID_supported_method_set,
+ DAV_PROPID_supported_live_property_set,
+ DAV_PROPID_supported_report_set,
+ DAV_PROPID_unreserved,
+ DAV_PROPID_variant_set,
+ DAV_PROPID_version_controlled_binding_set,
+ DAV_PROPID_version_controlled_configuration,
+ DAV_PROPID_version_history,
+ DAV_PROPID_version_name,
+ DAV_PROPID_workspace,
+ DAV_PROPID_workspace_checkout_set,
+
+ DAV_PROPID_END
+};
+
+/*
+** Property Identifier Registration
+**
+** At the moment, mod_dav requires live property providers to ensure that
+** each property returned has a unique value. For now, this is done through
+** central registration (there are no known providers other than the default,
+** so this remains manageable).
+**
+** WARNING: the TEST ranges should never be "shipped".
+*/
+#define DAV_PROPID_CORE 10000 /* ..10099. defined by mod_dav */
+#define DAV_PROPID_FS 10100 /* ..10299.
+ mod_dav filesystem provider. */
+#define DAV_PROPID_TEST1 10300 /* ..10399 */
+#define DAV_PROPID_TEST2 10400 /* ..10499 */
+#define DAV_PROPID_TEST3 10500 /* ..10599 */
+/* Next: 10600 */
+
+
+/* --------------------------------------------------------------------
+**
+** DATABASE FUNCTIONS
+*/
+
+typedef struct dav_db dav_db;
+typedef struct dav_namespace_map dav_namespace_map;
+typedef struct dav_deadprop_rollback dav_deadprop_rollback;
+
+typedef struct {
+ const char *ns; /* "" signals "no namespace" */
+ const char *name;
+} dav_prop_name;
+
+/* hook functions to enable pluggable databases */
+struct dav_hooks_propdb
+{
+ dav_error * (*open)(apr_pool_t *p, const dav_resource *resource, int ro,
+ dav_db **pdb);
+ void (*close)(dav_db *db);
+
+ /*
+ ** In bulk, define any namespaces that the values and their name
+ ** elements may need.
+ **
+ ** Note: sometimes mod_dav will defer calling this until output_value
+ ** returns found==1. If the output process needs the dav_xmlns_info
+ ** filled for its work, then it will need to fill it on demand rather
+ ** than depending upon this hook to fill in the structure.
+ **
+ ** Note: this will *always* be called during an output sequence. Thus,
+ ** the provider may rely solely on using this to fill the xmlns info.
+ */
+ dav_error * (*define_namespaces)(dav_db *db, dav_xmlns_info *xi);
+
+ /*
+ ** Output the value from the database (i.e. add an element name and
+ ** the value into *phdr). Set *found based on whether the name/value
+ ** was found in the propdb.
+ **
+ ** Note: it is NOT an error for the key/value pair to not exist.
+ **
+ ** The dav_xmlns_info passed to define_namespaces() is also passed to
+ ** each output_value() call so that namespaces can be added on-demand.
+ ** It can also be used to look up prefixes or URIs during the output
+ ** process.
+ */
+ dav_error * (*output_value)(dav_db *db, const dav_prop_name *name,
+ dav_xmlns_info *xi,
+ apr_text_header *phdr, int *found);
+
+ /*
+ ** Build a mapping from "global" namespaces (stored in apr_xml_*)
+ ** into provider-local namespace identifiers.
+ **
+ ** This mapping should be done once per set of namespaces, and the
+ ** resulting mapping should be passed into the store() hook function.
+ **
+ ** Note: usually, there is just a single document/namespaces for all
+ ** elements passed. However, the generality of creating multiple
+ ** mappings and passing them to store() is provided here.
+ **
+ ** Note: this is only in preparation for a series of store() calls.
+ ** As a result, the propdb must be open for read/write access when
+ ** this function is called.
+ */
+ dav_error * (*map_namespaces)(dav_db *db,
+ const apr_array_header_t *namespaces,
+ dav_namespace_map **mapping);
+
+ /*
+ ** Store a property value for a given name. The value->combined field
+ ** MUST be set for this call.
+ **
+ ** ### WARNING: current providers will quote the text within ELEM.
+ ** ### this implies you can call this function only once with a given
+ ** ### element structure (a second time will quote it again).
+ */
+ dav_error * (*store)(dav_db *db, const dav_prop_name *name,
+ const apr_xml_elem *elem,
+ dav_namespace_map *mapping);
+
+ /* remove a given property */
+ dav_error * (*remove)(dav_db *db, const dav_prop_name *name);
+
+ /* returns 1 if the record specified by "key" exists; 0 otherwise */
+ int (*exists)(dav_db *db, const dav_prop_name *name);
+
+ /*
+ ** Iterate over the property names in the database.
+ **
+ ** iter->name.ns == iter->name.name == NULL when there are no more names.
+ **
+ ** Note: only one iteration may occur over the propdb at a time.
+ */
+ dav_error * (*first_name)(dav_db *db, dav_prop_name *pname);
+ dav_error * (*next_name)(dav_db *db, dav_prop_name *pname);
+
+ /*
+ ** Rollback support: get rollback context, and apply it.
+ **
+ ** struct dav_deadprop_rollback is a provider-private structure; it
+ ** should remember the name, and the name's old value (or the fact that
+ ** the value was not present, and should be deleted if a rollback occurs).
+ */
+ dav_error * (*get_rollback)(dav_db *db, const dav_prop_name *name,
+ dav_deadprop_rollback **prollback);
+ dav_error * (*apply_rollback)(dav_db *db,
+ dav_deadprop_rollback *rollback);
+
+ /*
+ ** If a provider needs a context to associate with this hooks structure,
+ ** then this field may be used. In most cases, it will just be NULL.
+ */
+ void *ctx;
+};
+
+
+/* --------------------------------------------------------------------
+**
+** LOCK FUNCTIONS
+*/
+
+/* Used to represent a Timeout header of "Infinity" */
+#define DAV_TIMEOUT_INFINITE 0
+
+DAV_DECLARE(time_t) dav_get_timeout(request_rec *r);
+
+/*
+** Opaque, provider-specific information for a lock database.
+*/
+typedef struct dav_lockdb_private dav_lockdb_private;
+
+/*
+** Opaque, provider-specific information for a lock record.
+*/
+typedef struct dav_lock_private dav_lock_private;
+
+/*
+** Lock database type. Lock providers are urged to implement a "lazy" open, so
+** doing an "open" is cheap until something is actually needed from the DB.
+*/
+typedef struct
+{
+ const dav_hooks_locks *hooks; /* the hooks used for this lockdb */
+ int ro; /* was it opened readonly? */
+
+ dav_lockdb_private *info;
+
+} dav_lockdb;
+
+typedef enum {
+ DAV_LOCKSCOPE_UNKNOWN,
+ DAV_LOCKSCOPE_EXCLUSIVE,
+ DAV_LOCKSCOPE_SHARED
+} dav_lock_scope;
+
+typedef enum {
+ DAV_LOCKTYPE_UNKNOWN,
+ DAV_LOCKTYPE_WRITE
+} dav_lock_type;
+
+typedef enum {
+ DAV_LOCKREC_DIRECT, /* lock asserted on this resource */
+ DAV_LOCKREC_INDIRECT, /* lock inherited from a parent */
+ DAV_LOCKREC_INDIRECT_PARTIAL /* most info is not filled in */
+} dav_lock_rectype;
+
+/*
+** dav_lock: hold information about a lock on a resource.
+**
+** This structure is used for both direct and indirect locks. A direct lock
+** is a lock applied to a specific resource by the client. An indirect lock
+** is one that is inherited from a parent resource by virtue of a non-zero
+** Depth: header when the lock was applied.
+**
+** mod_dav records both types of locks in the lock database, managing their
+** addition/removal as resources are moved about the namespace.
+**
+** Note that the lockdb is free to marshal this structure in any form that
+** it likes.
+**
+** For a "partial" lock, the <rectype> and <locktoken> fields must be filled
+** in. All other (user) fields should be zeroed. The lock provider will
+** usually fill in the <info> field, and the <next> field may be used to
+** construct a list of partial locks.
+**
+** The lock provider MUST use the info field to store a value such that a
+** dav_lock structure can locate itself in the underlying lock database.
+** This requirement is needed for refreshing: when an indirect dav_lock is
+** refreshed, its reference to the direct lock does not specify the direct's
+** resource, so the only way to locate the (refreshed, direct) lock in the
+** database is to use the info field.
+**
+** Note that <is_locknull> only refers to the resource where this lock was
+** found.
+** ### hrm. that says the abstraction is wrong. is_locknull may disappear.
+*/
+typedef struct dav_lock
+{
+ dav_lock_rectype rectype; /* type of lock record */
+ int is_locknull; /* lock establishes a locknull resource */
+
+ /* ### put the resource in here? */
+
+ dav_lock_scope scope; /* scope of the lock */
+ dav_lock_type type; /* type of lock */
+ int depth; /* depth of the lock */
+ time_t timeout; /* when the lock will timeout */
+
+ const dav_locktoken *locktoken; /* the token that was issued */
+
+ const char *owner; /* (XML) owner of the lock */
+ const char *auth_user; /* auth'd username owning lock */
+
+ dav_lock_private *info; /* private to the lockdb */
+
+ struct dav_lock *next; /* for managing a list of locks */
+} dav_lock;
+
+/* Property-related public lock functions */
+DAV_DECLARE(const char *)dav_lock_get_activelock(request_rec *r,
+ dav_lock *locks,
+ dav_buffer *pbuf);
+
+/* LockDB-related public lock functions */
+DAV_DECLARE(dav_error *) dav_lock_parse_lockinfo(request_rec *r,
+ const dav_resource *resrouce,
+ dav_lockdb *lockdb,
+ const apr_xml_doc *doc,
+ dav_lock **lock_request);
+DAV_DECLARE(int) dav_unlock(request_rec *r,
+ const dav_resource *resource,
+ const dav_locktoken *locktoken);
+DAV_DECLARE(dav_error *) dav_add_lock(request_rec *r,
+ const dav_resource *resource,
+ dav_lockdb *lockdb, dav_lock *request,
+ dav_response **response);
+DAV_DECLARE(dav_error *) dav_notify_created(request_rec *r,
+ dav_lockdb *lockdb,
+ const dav_resource *resource,
+ int resource_state,
+ int depth);
+
+DAV_DECLARE(dav_error*) dav_lock_query(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ dav_lock **locks);
+
+DAV_DECLARE(dav_error *) dav_validate_request(request_rec *r,
+ dav_resource *resource,
+ int depth,
+ dav_locktoken *locktoken,
+ dav_response **response,
+ int flags,
+ dav_lockdb *lockdb);
+/*
+** flags:
+** 0x0F -- reserved for <dav_lock_scope> values
+**
+** other flags, detailed below
+*/
+#define DAV_VALIDATE_RESOURCE 0x0010 /* validate just the resource */
+#define DAV_VALIDATE_PARENT 0x0020 /* validate resource AND its parent */
+#define DAV_VALIDATE_ADD_LD 0x0040 /* add DAV:lockdiscovery into
+ the 424 DAV:response */
+#define DAV_VALIDATE_USE_424 0x0080 /* return 424 status, not 207 */
+#define DAV_VALIDATE_IS_PARENT 0x0100 /* for internal use */
+
+/* Lock-null related public lock functions */
+DAV_DECLARE(int) dav_get_resource_state(request_rec *r,
+ const dav_resource *resource);
+
+/* Lock provider hooks. Locking is optional, so there may be no
+ * lock provider for a given repository.
+ */
+struct dav_hooks_locks
+{
+ /* Return the supportedlock property for a resource */
+ const char * (*get_supportedlock)(
+ const dav_resource *resource
+ );
+
+ /* Parse a lock token URI, returning a lock token object allocated
+ * in the given pool.
+ */
+ dav_error * (*parse_locktoken)(
+ apr_pool_t *p,
+ const char *char_token,
+ dav_locktoken **locktoken_p
+ );
+
+ /* Format a lock token object into a URI string, allocated in
+ * the given pool.
+ *
+ * Always returns non-NULL.
+ */
+ const char * (*format_locktoken)(
+ apr_pool_t *p,
+ const dav_locktoken *locktoken
+ );
+
+ /* Compare two lock tokens.
+ *
+ * Result < 0 => lt1 < lt2
+ * Result == 0 => lt1 == lt2
+ * Result > 0 => lt1 > lt2
+ */
+ int (*compare_locktoken)(
+ const dav_locktoken *lt1,
+ const dav_locktoken *lt2
+ );
+
+ /* Open the provider's lock database.
+ *
+ * The provider may or may not use a "real" database for locks
+ * (a lock could be an attribute on a resource, for example).
+ *
+ * The provider may choose to use the value of the DAVLockDB directive
+ * (as returned by dav_get_lockdb_path()) to decide where to place
+ * any storage it may need.
+ *
+ * The request storage pool should be associated with the lockdb,
+ * so it can be used in subsequent operations.
+ *
+ * If ro != 0, only readonly operations will be performed.
+ * If force == 0, the open can be "lazy"; no subsequent locking operations
+ * may occur.
+ * If force != 0, locking operations will definitely occur.
+ */
+ dav_error * (*open_lockdb)(
+ request_rec *r,
+ int ro,
+ int force,
+ dav_lockdb **lockdb
+ );
+
+ /* Indicates completion of locking operations */
+ void (*close_lockdb)(
+ dav_lockdb *lockdb
+ );
+
+ /* Take a resource out of the lock-null state. */
+ dav_error * (*remove_locknull_state)(
+ dav_lockdb *lockdb,
+ const dav_resource *resource
+ );
+
+ /*
+ ** Create a (direct) lock structure for the given resource. A locktoken
+ ** will be created.
+ **
+ ** The lock provider may store private information into lock->info.
+ */
+ dav_error * (*create_lock)(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ dav_lock **lock);
+
+ /*
+ ** Get the locks associated with the specified resource.
+ **
+ ** If resolve_locks is true (non-zero), then any indirect locks are
+ ** resolved to their actual, direct lock (i.e. the reference to followed
+ ** to the original lock).
+ **
+ ** The locks, if any, are returned as a linked list in no particular
+ ** order. If no locks are present, then *locks will be NULL.
+ */
+ dav_error * (*get_locks)(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ int calltype,
+ dav_lock **locks);
+
+#define DAV_GETLOCKS_RESOLVED 0 /* resolve indirects to directs */
+#define DAV_GETLOCKS_PARTIAL 1 /* leave indirects partially filled */
+#define DAV_GETLOCKS_COMPLETE 2 /* fill out indirect locks */
+
+ /*
+ ** Find a particular lock on a resource (specified by its locktoken).
+ **
+ ** *lock will be set to NULL if the lock is not found.
+ **
+ ** Note that the provider can optimize the unmarshalling -- only one
+ ** lock (or none) must be constructed and returned.
+ **
+ ** If partial_ok is true (non-zero), then an indirect lock can be
+ ** partially filled in. Otherwise, another lookup is done and the
+ ** lock structure will be filled out as a DAV_LOCKREC_INDIRECT.
+ */
+ dav_error * (*find_lock)(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ const dav_locktoken *locktoken,
+ int partial_ok,
+ dav_lock **lock);
+
+ /*
+ ** Quick test to see if the resource has *any* locks on it.
+ **
+ ** This is typically used to determine if a non-existent resource
+ ** has a lock and is (therefore) a locknull resource.
+ **
+ ** WARNING: this function may return TRUE even when timed-out locks
+ ** exist (i.e. it may not perform timeout checks).
+ */
+ dav_error * (*has_locks)(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ int *locks_present);
+
+ /*
+ ** Append the specified lock(s) to the set of locks on this resource.
+ **
+ ** If "make_indirect" is true (non-zero), then the specified lock(s)
+ ** should be converted to an indirect lock (if it is a direct lock)
+ ** before appending. Note that the conversion to an indirect lock does
+ ** not alter the passed-in lock -- the change is internal the
+ ** append_locks function.
+ **
+ ** Multiple locks are specified using the lock->next links.
+ */
+ dav_error * (*append_locks)(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ int make_indirect,
+ const dav_lock *lock);
+
+ /*
+ ** Remove any lock that has the specified locktoken.
+ **
+ ** If locktoken == NULL, then ALL locks are removed.
+ */
+ dav_error * (*remove_lock)(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ const dav_locktoken *locktoken);
+
+ /*
+ ** Refresh all locks, found on the specified resource, which has a
+ ** locktoken in the provided list.
+ **
+ ** If the lock is indirect, then the direct lock is referenced and
+ ** refreshed.
+ **
+ ** Each lock that is updated is returned in the <locks> argument.
+ ** Note that the locks will be fully resolved.
+ */
+ dav_error * (*refresh_locks)(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ const dav_locktoken_list *ltl,
+ time_t new_time,
+ dav_lock **locks);
+
+ /*
+ ** Look up the resource associated with a particular locktoken.
+ **
+ ** The search begins at the specified <start_resource> and the lock
+ ** specified by <locktoken>.
+ **
+ ** If the resource/token specifies an indirect lock, then the direct
+ ** lock will be looked up, and THAT resource will be returned. In other
+ ** words, this function always returns the resource where a particular
+ ** lock (token) was asserted.
+ **
+ ** NOTE: this function pointer is allowed to be NULL, indicating that
+ ** the provider does not support this type of functionality. The
+ ** caller should then traverse up the repository hierarchy looking
+ ** for the resource defining a lock with this locktoken.
+ */
+ dav_error * (*lookup_resource)(dav_lockdb *lockdb,
+ const dav_locktoken *locktoken,
+ const dav_resource *start_resource,
+ const dav_resource **resource);
+
+ /*
+ ** If a provider needs a context to associate with this hooks structure,
+ ** then this field may be used. In most cases, it will just be NULL.
+ */
+ void *ctx;
+};
+
+/* what types of resources can be discovered by dav_get_resource_state() */
+#define DAV_RESOURCE_LOCK_NULL 10 /* resource lock-null */
+#define DAV_RESOURCE_NULL 11 /* resource null */
+#define DAV_RESOURCE_EXISTS 12 /* resource exists */
+#define DAV_RESOURCE_ERROR 13 /* an error occurred */
+
+
+/* --------------------------------------------------------------------
+**
+** PROPERTY HANDLING
+*/
+
+typedef struct dav_propdb dav_propdb;
+
+
+DAV_DECLARE(dav_error *) dav_open_propdb(
+ request_rec *r,
+ dav_lockdb *lockdb,
+ const dav_resource *resource,
+ int ro,
+ apr_array_header_t *ns_xlate,
+ dav_propdb **propdb);
+
+DAV_DECLARE(void) dav_close_propdb(dav_propdb *db);
+
+DAV_DECLARE(dav_get_props_result) dav_get_props(
+ dav_propdb *db,
+ apr_xml_doc *doc);
+
+DAV_DECLARE(dav_get_props_result) dav_get_allprops(
+ dav_propdb *db,
+ dav_prop_insert what);
+
+DAV_DECLARE(void) dav_get_liveprop_supported(
+ dav_propdb *propdb,
+ const char *ns_uri,
+ const char *propname,
+ apr_text_header *body);
+
+/*
+** 3-phase property modification.
+**
+** 1) validate props. readable? unlocked? ACLs allow access?
+** 2) execute operation (set/delete)
+** 3) commit or rollback
+**
+** ### eventually, auth must be available. a ref to the request_rec (which
+** ### contains the auth info) should be in the shared context struct.
+**
+** Each function may alter the error values and information contained within
+** the context record. This should be done as an "increasing" level of
+** error, rather than overwriting any previous error.
+**
+** Note that commit() cannot generate errors. It should simply free the
+** rollback information.
+**
+** rollback() may generate additional errors because the rollback operation
+** can sometimes fail(!).
+**
+** The caller should allocate an array of these, one per operation. It should
+** be zero-initialized, then the db, operation, and prop fields should be
+** filled in before calling dav_prop_validate. Note that the set/delete
+** operations are order-dependent. For a given (logical) context, the same
+** pointer must be passed to each phase.
+**
+** error_type is an internal value, but will have the same numeric value
+** for each possible "desc" value. This allows the caller to group the
+** descriptions via the error_type variable, rather than through string
+** comparisons. Note that "status" does not provide enough granularity to
+** differentiate/group the "desc" values.
+**
+** Note that the propdb will maintain some (global) context across all
+** of the property change contexts. This implies that you can have only
+** one open transaction per propdb.
+*/
+typedef struct dav_prop_ctx
+{
+ dav_propdb *propdb;
+
+ int operation;
+#define DAV_PROP_OP_SET 1 /* set a property value */
+#define DAV_PROP_OP_DELETE 2 /* delete a prop value */
+/* ### add a GET? */
+
+ apr_xml_elem *prop; /* property to affect */
+
+ dav_error *err; /* error (if any) */
+
+ /* private items to the propdb */
+ int is_liveprop;
+ void *liveprop_ctx;
+ struct dav_rollback_item *rollback; /* optional rollback info */
+
+ /* private to mod_dav.c */
+ request_rec *r;
+
+} dav_prop_ctx;
+
+DAV_DECLARE_NONSTD(void) dav_prop_validate(dav_prop_ctx *ctx);
+DAV_DECLARE_NONSTD(void) dav_prop_exec(dav_prop_ctx *ctx);
+DAV_DECLARE_NONSTD(void) dav_prop_commit(dav_prop_ctx *ctx);
+DAV_DECLARE_NONSTD(void) dav_prop_rollback(dav_prop_ctx *ctx);
+
+#define DAV_PROP_CTX_HAS_ERR(dpc) ((dpc).err && (dpc).err->status >= 300)
+
+
+/* --------------------------------------------------------------------
+**
+** WALKER STRUCTURE
+*/
+
+enum {
+ DAV_CALLTYPE_MEMBER = 1, /* called for a member resource */
+ DAV_CALLTYPE_COLLECTION, /* called for a collection */
+ DAV_CALLTYPE_LOCKNULL /* called for a locknull resource */
+};
+
+typedef struct
+{
+ /* the client-provided context */
+ void *walk_ctx;
+
+ /* pool to use for allocations in the callback */
+ apr_pool_t *pool;
+
+ /* the current resource */
+ const dav_resource *resource;
+
+ /* OUTPUT: add responses to this */
+ dav_response *response;
+
+} dav_walk_resource;
+
+typedef struct
+{
+ int walk_type;
+#define DAV_WALKTYPE_AUTH 0x0001 /* limit to authorized files */
+#define DAV_WALKTYPE_NORMAL 0x0002 /* walk normal files */
+#define DAV_WALKTYPE_LOCKNULL 0x0004 /* walk locknull resources */
+
+ /* callback function and a client context for the walk */
+ dav_error * (*func)(dav_walk_resource *wres, int calltype);
+ void *walk_ctx;
+
+ /* what pool to use for allocations needed by walk logic */
+ apr_pool_t *pool;
+
+ /* beginning root of the walk */
+ const dav_resource *root;
+
+ /* lock database to enable walking LOCKNULL resources */
+ dav_lockdb *lockdb;
+
+} dav_walk_params;
+
+/* directory tree walking context */
+typedef struct dav_walker_ctx
+{
+ /* input: */
+ dav_walk_params w;
+
+
+ /* ### client data... phasing out this big glom */
+
+ /* this brigade buffers data being sent to r->output_filters */
+ apr_bucket_brigade *bb;
+
+ /* a scratch pool, used to stream responses and iteratively cleared. */
+ apr_pool_t *scratchpool;
+
+ request_rec *r; /* original request */
+
+ /* for PROPFIND operations */
+ apr_xml_doc *doc;
+ int propfind_type;
+#define DAV_PROPFIND_IS_ALLPROP 1
+#define DAV_PROPFIND_IS_PROPNAME 2
+#define DAV_PROPFIND_IS_PROP 3
+
+ apr_text *propstat_404; /* (cached) propstat giving a 404 error */
+
+ const dav_if_header *if_header; /* for validation */
+ const dav_locktoken *locktoken; /* for UNLOCK */
+ const dav_lock *lock; /* for LOCK */
+ int skip_root; /* for dav_inherit_locks() */
+
+ int flags;
+
+ dav_buffer work_buf; /* for dav_validate_request() */
+
+} dav_walker_ctx;
+
+DAV_DECLARE(void) dav_add_response(dav_walk_resource *wres,
+ int status,
+ dav_get_props_result *propstats);
+
+
+/* --------------------------------------------------------------------
+**
+** "STREAM" STRUCTURE
+**
+** mod_dav uses this abstraction for interacting with the repository
+** while fetching/storing resources. mod_dav views resources as a stream
+** of bytes.
+**
+** Note that the structure is opaque -- it is private to the repository
+** that created the stream in the repository's "open" function.
+**
+** ### THIS STUFF IS GOING AWAY ... GET/read requests are handled by
+** ### having the provider jam stuff straight into the filter stack.
+** ### this is only left for handling PUT/write requests.
+*/
+
+typedef struct dav_stream dav_stream;
+
+typedef enum {
+ DAV_MODE_WRITE_TRUNC, /* truncate and open for writing */
+ DAV_MODE_WRITE_SEEKABLE /* open for writing; random access */
+} dav_stream_mode;
+
+
+/* --------------------------------------------------------------------
+**
+** REPOSITORY FUNCTIONS
+*/
+
+/* Repository provider hooks */
+struct dav_hooks_repository
+{
+ /* Flag for whether repository requires special GET handling.
+ * If resources in the repository are not visible in the
+ * filesystem location which URLs map to, then special handling
+ * is required to first fetch a resource from the repository,
+ * respond to the GET request, then free the resource copy.
+ */
+ int handle_get;
+
+ /* Get a resource descriptor for the URI in a request. A descriptor
+ * should always be returned even if the resource does not exist. This
+ * repository has been identified as handling the resource given by
+ * the URI, so an answer must be given. If there is a problem with the
+ * URI or accessing the resource or whatever, then an error should be
+ * returned.
+ *
+ * root_dir:
+ * the root of the directory for which this repository is configured.
+ *
+ * label:
+ * if a Label: header is present (and allowed), this is the label
+ * to use to identify a version resource from the resource's
+ * corresponding version history. Otherwise, it will be NULL.
+ *
+ * use_checked_in:
+ * use the DAV:checked-in property of the resource identified by the
+ * Request-URI to identify and return a version resource
+ *
+ * The provider may associate the request storage pool with the resource
+ * (in the resource->pool field), to use in other operations on that
+ * resource.
+ */
+ dav_error * (*get_resource)(
+ request_rec *r,
+ const char *root_dir,
+ const char *label,
+ int use_checked_in,
+ dav_resource **resource
+ );
+
+ /* Get a resource descriptor for the parent of the given resource.
+ * The resources need not exist. NULL is returned if the resource
+ * is the root collection.
+ *
+ * An error should be returned only if there is a fatal error in
+ * fetching information about the parent resource.
+ */
+ dav_error * (*get_parent_resource)(
+ const dav_resource *resource,
+ dav_resource **parent_resource
+ );
+
+ /* Determine whether two resource descriptors refer to the same resource.
+ *
+ * Result != 0 => the resources are the same.
+ */
+ int (*is_same_resource)(
+ const dav_resource *res1,
+ const dav_resource *res2
+ );
+
+ /* Determine whether one resource is a parent (immediate or otherwise)
+ * of another.
+ *
+ * Result != 0 => res1 is a parent of res2.
+ */
+ int (*is_parent_resource)(
+ const dav_resource *res1,
+ const dav_resource *res2
+ );
+
+ /*
+ ** Open a stream for this resource, using the specified mode. The
+ ** stream will be returned in *stream.
+ */
+ dav_error * (*open_stream)(const dav_resource *resource,
+ dav_stream_mode mode,
+ dav_stream **stream);
+
+ /*
+ ** Close the specified stream.
+ **
+ ** mod_dav will (ideally) make sure to call this. For safety purposes,
+ ** a provider should (ideally) register a cleanup function with the
+ ** request pool to get this closed and cleaned up.
+ **
+ ** Note the possibility of an error from the close -- it is entirely
+ ** feasible that the close does a "commit" of some kind, which can
+ ** produce an error.
+ **
+ ** commit should be TRUE (non-zero) or FALSE (0) if the stream was
+ ** opened for writing. This flag states whether to retain the file
+ ** or not.
+ ** Note: the commit flag is ignored for streams opened for reading.
+ */
+ dav_error * (*close_stream)(dav_stream *stream, int commit);
+
+ /*
+ ** Write data to the stream.
+ **
+ ** All of the bytes must be written, or an error should be returned.
+ */
+ dav_error * (*write_stream)(dav_stream *stream,
+ const void *buf, apr_size_t bufsize);
+
+ /*
+ ** Seek to an absolute position in the stream. This is used to support
+ ** Content-Range in a GET/PUT.
+ **
+ ** NOTE: if this function is NULL (which is allowed), then any
+ ** operations using Content-Range will be refused.
+ */
+ dav_error * (*seek_stream)(dav_stream *stream, apr_off_t abs_position);
+
+ /*
+ ** If a GET is processed using a stream (open_stream, read_stream)
+ ** rather than via a sub-request (on get_pathname), then this function
+ ** is used to provide the repository with a way to set the headers
+ ** in the response.
+ **
+ ** This function may be called without a following deliver(), to
+ ** handle a HEAD request.
+ **
+ ** This may be NULL if handle_get is FALSE.
+ */
+ dav_error * (*set_headers)(request_rec *r,
+ const dav_resource *resource);
+
+ /*
+ ** The provider should deliver the resource into the specified filter.
+ ** Basically, this is the response to the GET method.
+ **
+ ** Note that this is called for all resources, including collections.
+ ** The provider should determine what has content to deliver or not.
+ **
+ ** set_headers will be called prior to this function, allowing the
+ ** provider to set the appropriate response headers.
+ **
+ ** This may be NULL if handle_get is FALSE.
+ ** ### maybe toss handle_get and just use this function as the marker
+ */
+ dav_error * (*deliver)(const dav_resource *resource,
+ ap_filter_t *output);
+
+ /* Create a collection resource. The resource must not already exist.
+ *
+ * Result == NULL if the collection was created successfully. Also, the
+ * resource object is updated to reflect that the resource exists, and
+ * is a collection.
+ */
+ dav_error * (*create_collection)(
+ dav_resource *resource
+ );
+
+ /* Copy one resource to another. The destination may exist, if it is
+ * versioned.
+ * Handles both files and collections. Properties are copied as well.
+ * If the destination exists and is versioned, the provider must update
+ * the destination to have identical content to the source,
+ * recursively for collections.
+ * The depth argument is ignored for a file, and can be either 0 or
+ * DAV_INFINITY for a collection.
+ * If an error occurs in a child resource, then the return value is
+ * non-NULL, and *response is set to a multistatus response.
+ * If the copy is successful, the dst resource object is
+ * updated to reflect that the resource exists.
+ */
+ dav_error * (*copy_resource)(
+ const dav_resource *src,
+ dav_resource *dst,
+ int depth,
+ dav_response **response
+ );
+
+ /* Move one resource to another. The destination must not exist.
+ * Handles both files and collections. Properties are moved as well.
+ * If an error occurs in a child resource, then the return value is
+ * non-NULL, and *response is set to a multistatus response.
+ * If the move is successful, the src and dst resource objects are
+ * updated to reflect that the source no longer exists, and the
+ * destination does.
+ */
+ dav_error * (*move_resource)(
+ dav_resource *src,
+ dav_resource *dst,
+ dav_response **response
+ );
+
+ /* Remove a resource. Handles both files and collections.
+ * Removes any associated properties as well.
+ * If an error occurs in a child resource, then the return value is
+ * non-NULL, and *response is set to a multistatus response.
+ * If the delete is successful, the resource object is updated to
+ * reflect that the resource no longer exists.
+ */
+ dav_error * (*remove_resource)(
+ dav_resource *resource,
+ dav_response **response
+ );
+
+ /* Walk a resource hierarchy.
+ *
+ * Iterates over the resource hierarchy specified by params->root.
+ * Control of the walk and the callback are specified by 'params'.
+ *
+ * An error may be returned. *response will contain multistatus
+ * responses (if any) suitable for the body of the error. It is also
+ * possible to return NULL, yet still have multistatus responses.
+ * In this case, typically the caller should return a 207 (Multistatus)
+ * and the responses (in the body) as the HTTP response.
+ */
+ dav_error * (*walk)(const dav_walk_params *params, int depth,
+ dav_response **response);
+
+ /* Get the entity tag for a resource */
+ const char * (*getetag)(const dav_resource *resource);
+
+ /*
+ ** If a provider needs a context to associate with this hooks structure,
+ ** then this field may be used. In most cases, it will just be NULL.
+ */
+ void *ctx;
+};
+
+
+/* --------------------------------------------------------------------
+**
+** VERSIONING FUNCTIONS
+*/
+
+
+/* dav_add_vary_header
+ *
+ * If there were any headers in the request which require a Vary header
+ * in the response, add it.
+ */
+DAV_DECLARE(void) dav_add_vary_header(request_rec *in_req,
+ request_rec *out_req,
+ const dav_resource *resource);
+
+/*
+** Flags specifying auto-versioning behavior, returned by
+** the auto_versionable hook. The value returned depends
+** on both the state of the resource and the value of the
+** DAV:auto-versioning property for the resource.
+**
+** If the resource does not exist (null or lock-null),
+** DAV_AUTO_VERSION_ALWAYS causes creation of a new version-controlled resource
+**
+** If the resource is checked in,
+** DAV_AUTO_VERSION_ALWAYS causes it to be checked out always,
+** DAV_AUTO_VERSION_LOCKED causes it to be checked out only when locked
+**
+** If the resource is checked out,
+** DAV_AUTO_VERSION_ALWAYS causes it to be checked in always,
+** DAV_AUTO_VERSION_LOCKED causes it to be checked in when unlocked
+** (note: a provider should allow auto-checkin only for resources which
+** were automatically checked out)
+**
+** In all cases, DAV_AUTO_VERSION_NEVER results in no auto-versioning behavior.
+*/
+typedef enum {
+ DAV_AUTO_VERSION_NEVER,
+ DAV_AUTO_VERSION_ALWAYS,
+ DAV_AUTO_VERSION_LOCKED
+} dav_auto_version;
+
+/*
+** This structure is used to record what auto-versioning operations
+** were done to make a resource writable, so that they can be undone
+** at the end of a request.
+*/
+typedef struct {
+ int resource_versioned; /* 1 => resource was auto-version-controlled */
+ int resource_checkedout; /* 1 => resource was auto-checked-out */
+ int parent_checkedout; /* 1 => parent was auto-checked-out */
+ dav_resource *parent_resource; /* parent resource, if it was needed */
+} dav_auto_version_info;
+
+/* Ensure that a resource is writable. If there is no versioning
+ * provider, then this is essentially a no-op. Versioning repositories
+ * require explicit resource creation and checkout before they can
+ * be written to. If a new resource is to be created, or an existing
+ * resource deleted, the parent collection must be checked out as well.
+ *
+ * Set the parent_only flag to only make the parent collection writable.
+ * Otherwise, both parent and child are made writable as needed. If the
+ * child does not exist, then a new versioned resource is created and
+ * checked out.
+ *
+ * If auto-versioning is not enabled for a versioned resource, then an error is
+ * returned, since the resource cannot be modified.
+ *
+ * The dav_auto_version_info structure is filled in with enough information
+ * to restore both parent and child resources to the state they were in
+ * before the auto-versioning operations occurred.
+ */
+DAV_DECLARE(dav_error *) dav_auto_checkout(
+ request_rec *r,
+ dav_resource *resource,
+ int parent_only,
+ dav_auto_version_info *av_info);
+
+/* Revert the writability of resources back to what they were
+ * before they were modified. If undo == 0, then the resource
+ * modifications are maintained (i.e. they are checked in).
+ * If undo != 0, then resource modifications are discarded
+ * (i.e. they are unchecked out).
+ *
+ * Set the unlock flag to indicate that the resource is about
+ * to be unlocked; it will be checked in if the resource
+ * auto-versioning property indicates it should be. In this case,
+ * av_info is ignored, so it can be NULL.
+ *
+ * The resource argument may be NULL if only the parent resource
+ * was checked out (i.e. the parent_only was != 0 in the
+ * dav_auto_checkout call).
+ */
+DAV_DECLARE(dav_error *) dav_auto_checkin(
+ request_rec *r,
+ dav_resource *resource,
+ int undo,
+ int unlock,
+ dav_auto_version_info *av_info);
+
+/*
+** This structure is used to describe available reports
+**
+** "nmspace" should be valid XML and URL-quoted. mod_dav will place
+** double-quotes around it and use it in an xmlns declaration.
+*/
+typedef struct {
+ const char *nmspace; /* namespace of the XML report element */
+ const char *name; /* element name for the XML report */
+} dav_report_elem;
+
+
+/* Versioning provider hooks */
+struct dav_hooks_vsn
+{
+ /*
+ ** MANDATORY HOOKS
+ ** The following hooks are mandatory for all versioning providers;
+ ** they define the functionality needed to implement "core" versioning.
+ */
+
+ /* Return supported versioning options.
+ * Each dav_text item in the list will be returned as a separate
+ * DAV header. Providers are advised to limit the length of an
+ * individual text item to 63 characters, to conform to the limit
+ * used by MS Web Folders.
+ */
+ void (*get_vsn_options)(apr_pool_t *p, apr_text_header *phdr);
+
+ /* Get the value of a specific option for an OPTIONS request.
+ * The option being requested is given by the parsed XML
+ * element object "elem". The value of the option should be
+ * appended to the "option" text object.
+ */
+ dav_error * (*get_option)(const dav_resource *resource,
+ const apr_xml_elem *elem,
+ apr_text_header *option);
+
+ /* Determine whether a non-versioned (or non-existent) resource
+ * is versionable. Returns != 0 if resource can be versioned.
+ */
+ int (*versionable)(const dav_resource *resource);
+
+ /* Determine whether auto-versioning is enabled for a resource
+ * (which may not exist, or may not be versioned). If the resource
+ * is a checked-out resource, the provider must only enable
+ * auto-checkin if the resource was automatically checked out.
+ *
+ * The value returned depends on both the state of the resource
+ * and the value of its DAV:auto-version property. See the description
+ * of the dav_auto_version enumeration above for the details.
+ */
+ dav_auto_version (*auto_versionable)(const dav_resource *resource);
+
+ /* Put a resource under version control. If the resource already
+ * exists unversioned, then it becomes the initial version of the
+ * new version history, and it is replaced by a version selector
+ * which targets the new version.
+ *
+ * If the resource does not exist, then a new version-controlled
+ * resource is created which either targets an existing version (if the
+ * "target" argument is not NULL), or the initial, empty version
+ * in a new history resource (if the "target" argument is NULL).
+ *
+ * If successful, the resource object state is updated appropriately
+ * (that is, changed to refer to the new version-controlled resource).
+ */
+ dav_error * (*vsn_control)(dav_resource *resource,
+ const char *target);
+
+ /* Checkout a resource. If successful, the resource
+ * object state is updated appropriately.
+ *
+ * The auto_checkout flag will be set if this checkout is being
+ * done automatically, as part of some method which modifies
+ * the resource. The provider must remember that the resource
+ * was automatically checked out, so it can determine whether it
+ * can be automatically checked in. (Auto-checkin should only be
+ * enabled for resources which were automatically checked out.)
+ *
+ * If the working resource has a different URL from the
+ * target resource, a dav_resource descriptor is returned
+ * for the new working resource. Otherwise, the original
+ * resource descriptor will refer to the working resource.
+ * The working_resource argument can be NULL if the caller
+ * is not interested in the working resource.
+ *
+ * If the client has specified DAV:unreserved or DAV:fork-ok in the
+ * checkout request, then the corresponding flags are set. If
+ * DAV:activity-set has been specified, then create_activity is set
+ * if DAV:new was specified; otherwise, the DAV:href elements' CDATA
+ * (the actual href text) is passed in the "activities" array (each
+ * element of the array is a const char *). activities will be NULL
+ * no DAV:activity-set was provided or when create_activity is set.
+ */
+ dav_error * (*checkout)(dav_resource *resource,
+ int auto_checkout,
+ int is_unreserved, int is_fork_ok,
+ int create_activity,
+ apr_array_header_t *activities,
+ dav_resource **working_resource);
+
+ /* Uncheckout a checked-out resource. If successful, the resource
+ * object state is updated appropriately.
+ */
+ dav_error * (*uncheckout)(dav_resource *resource);
+
+ /* Checkin a checked-out resource. If successful, the resource
+ * object state is updated appropriately, and the
+ * version_resource descriptor will refer to the new version.
+ * The version_resource argument can be NULL if the caller
+ * is not interested in the new version resource.
+ *
+ * If the client has specified DAV:keep-checked-out in the checkin
+ * request, then the keep_checked_out flag is set. The provider
+ * should create a new version, but keep the resource in the
+ * checked-out state.
+ */
+ dav_error * (*checkin)(dav_resource *resource,
+ int keep_checked_out,
+ dav_resource **version_resource);
+
+ /*
+ ** Return the set of reports available at this resource.
+ **
+ ** An array of report elements should be returned, with an end-marker
+ ** element containing namespace==NULL. The value of the
+ ** DAV:supported-report-set property will be constructed and
+ ** returned.
+ */
+ dav_error * (*avail_reports)(const dav_resource *resource,
+ const dav_report_elem **reports);
+
+ /*
+ ** Determine whether a Label header can be used
+ ** with a particular report. The dav_xml_doc structure
+ ** contains the parsed report request body.
+ ** Returns 0 if the Label header is not allowed.
+ */
+ int (*report_label_header_allowed)(const apr_xml_doc *doc);
+
+ /*
+ ** Generate a report on a resource. Since a provider is free
+ ** to define its own reports, and the value of request headers
+ ** may affect the interpretation of a report, the request record
+ ** must be passed to this routine.
+ **
+ ** The dav_xml_doc structure contains the parsed report request
+ ** body. The report response should be generated into the specified
+ ** output filter.
+ **
+ ** If an error occurs, and a response has not yet been generated,
+ ** then an error can be returned from this function. mod_dav will
+ ** construct an appropriate error response. Once some output has
+ ** been placed into the filter, however, the provider should not
+ ** return an error -- there is no way that mod_dav can deliver it
+ ** properly.
+ **
+ ** ### maybe we need a way to signal an error anyways, and then
+ ** ### apache can abort the connection?
+ */
+ dav_error * (*deliver_report)(request_rec *r,
+ const dav_resource *resource,
+ const apr_xml_doc *doc,
+ ap_filter_t *output);
+
+ /*
+ ** OPTIONAL HOOKS
+ ** The following hooks are optional; if not defined, then the
+ ** corresponding protocol methods will be unsupported.
+ */
+
+ /*
+ ** Set the state of a checked-in version-controlled resource.
+ **
+ ** If the request specified a version, the version resource
+ ** represents that version. If the request specified a label,
+ ** then "version" is NULL, and "label" is the label.
+ **
+ ** The depth argument is ignored for a file, and can be 0, 1, or
+ ** DAV_INFINITY for a collection. The depth argument only applies
+ ** with a label, not a version.
+ **
+ ** If an error occurs in a child resource, then the return value is
+ ** non-NULL, and *response is set to a multistatus response.
+ **
+ ** This hook is optional; if not defined, then the UPDATE method
+ ** will not be supported.
+ */
+ dav_error * (*update)(const dav_resource *resource,
+ const dav_resource *version,
+ const char *label,
+ int depth,
+ dav_response **response);
+
+ /*
+ ** Add a label to a version. The resource is either a specific
+ ** version, or a version selector, in which case the label should
+ ** be added to the current target of the version selector. The
+ ** version selector cannot be checked out.
+ **
+ ** If replace != 0, any existing label by the same name is
+ ** effectively deleted first. Otherwise, it is an error to
+ ** attempt to add a label which already exists on some version
+ ** of the same history resource.
+ **
+ ** This hook is optional; if not defined, then the LABEL method
+ ** will not be supported. If it is defined, then the remove_label
+ ** hook must be defined also.
+ */
+ dav_error * (*add_label)(const dav_resource *resource,
+ const char *label,
+ int replace);
+
+ /*
+ ** Remove a label from a version. The resource is either a specific
+ ** version, or a version selector, in which case the label should
+ ** be added to the current target of the version selector. The
+ ** version selector cannot be checked out.
+ **
+ ** It is an error if no such label exists on the specified version.
+ **
+ ** This hook is optional, but if defined, the add_label hook
+ ** must be defined also.
+ */
+ dav_error * (*remove_label)(const dav_resource *resource,
+ const char *label);
+
+ /*
+ ** Determine whether a null resource can be created as a workspace.
+ ** The provider may restrict workspaces to certain locations.
+ ** Returns 0 if the resource cannot be a workspace.
+ **
+ ** This hook is optional; if the provider does not support workspaces,
+ ** it should be set to NULL.
+ */
+ int (*can_be_workspace)(const dav_resource *resource);
+
+ /*
+ ** Create a workspace resource. The resource must not already
+ ** exist. Any <DAV:mkworkspace> element is passed to the provider
+ ** in the "doc" structure; it may be empty.
+ **
+ ** If workspace creation is succesful, the state of the resource
+ ** object is updated appropriately.
+ **
+ ** This hook is optional; if the provider does not support workspaces,
+ ** it should be set to NULL.
+ */
+ dav_error * (*make_workspace)(dav_resource *resource,
+ apr_xml_doc *doc);
+
+ /*
+ ** Determine whether a null resource can be created as an activity.
+ ** The provider may restrict activities to certain locations.
+ ** Returns 0 if the resource cannot be an activity.
+ **
+ ** This hook is optional; if the provider does not support activities,
+ ** it should be set to NULL.
+ */
+ int (*can_be_activity)(const dav_resource *resource);
+
+ /*
+ ** Create an activity resource. The resource must not already
+ ** exist.
+ **
+ ** If activity creation is succesful, the state of the resource
+ ** object is updated appropriately.
+ **
+ ** This hook is optional; if the provider does not support activities,
+ ** it should be set to NULL.
+ */
+ dav_error * (*make_activity)(dav_resource *resource);
+
+ /*
+ ** Merge a resource (tree) into target resource (tree).
+ **
+ ** ### more doc...
+ **
+ ** This hook is optional; if the provider does not support merging,
+ ** then this should be set to NULL.
+ */
+ dav_error * (*merge)(dav_resource *target, dav_resource *source,
+ int no_auto_merge, int no_checkout,
+ apr_xml_elem *prop_elem,
+ ap_filter_t *output);
+
+ /*
+ ** If a provider needs a context to associate with this hooks structure,
+ ** then this field may be used. In most cases, it will just be NULL.
+ */
+ void *ctx;
+};
+
+
+/* --------------------------------------------------------------------
+**
+** BINDING FUNCTIONS
+*/
+
+/* binding provider hooks */
+struct dav_hooks_binding {
+
+ /* Determine whether a resource can be the target of a binding.
+ * Returns 0 if the resource cannot be a binding target.
+ */
+ int (*is_bindable)(const dav_resource *resource);
+
+ /* Create a binding to a resource.
+ * The resource argument is the target of the binding;
+ * the binding argument must be a resource which does not already
+ * exist.
+ */
+ dav_error * (*bind_resource)(const dav_resource *resource,
+ dav_resource *binding);
+
+ /*
+ ** If a provider needs a context to associate with this hooks structure,
+ ** then this field may be used. In most cases, it will just be NULL.
+ */
+ void *ctx;
+
+};
+
+
+/* --------------------------------------------------------------------
+**
+** SEARCH(DASL) FUNCTIONS
+*/
+
+/* search provider hooks */
+struct dav_hooks_search {
+ /* Set header for a OPTION method
+ * An error may be returned.
+ * To set a hadder, this function might call
+ * apr_table_setn(r->headers_out, "DASL", dasl_optin1);
+ *
+ * Examples:
+ * DASL: <DAV:basicsearch>
+ * DASL: <http://foo.bar.com/syntax1>
+ * DASL: <http://akuma.com/syntax2>
+ */
+ dav_error * (*set_option_head)(request_rec *r);
+
+ /* Search resources
+ * An error may be returned. *response will contain multistatus
+ * responses (if any) suitable for the body of the error. It is also
+ * possible to return NULL, yet still have multistatus responses.
+ * In this case, typically the caller should return a 207 (Multistatus)
+ * and the responses (in the body) as the HTTP response.
+ */
+ dav_error * (*search_resource)(request_rec *r,
+ dav_response **response);
+
+ /*
+ ** If a provider needs a context to associate with this hooks structure,
+ ** then this field may be used. In most cases, it will just be NULL.
+ */
+ void *ctx;
+
+};
+
+
+/* --------------------------------------------------------------------
+**
+** MISCELLANEOUS STUFF
+*/
+
+/* fetch the "LimitXMLRequestBody" in force for this resource */
+DAV_DECLARE(apr_size_t) dav_get_limit_xml_body(const request_rec *r);
+
+typedef struct {
+ int propid; /* live property ID */
+ const dav_hooks_liveprop *provider; /* the provider defining this prop */
+} dav_elem_private;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MOD_DAV_H_ */
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/modules.mk b/rubbos/app/httpd-2.0.64/modules/dav/main/modules.mk
new file mode 100644
index 00000000..ceb52a1b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/modules.mk
@@ -0,0 +1,3 @@
+DISTCLEAN_TARGETS = modules.mk
+static =
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/props.c b/rubbos/app/httpd-2.0.64/modules/dav/main/props.c
new file mode 100644
index 00000000..fc28532d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/props.c
@@ -0,0 +1,1116 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+** DAV extension module for Apache 2.0.*
+** - Property database handling (repository-independent)
+**
+** NOTES:
+**
+** PROPERTY DATABASE
+**
+** This version assumes that there is a per-resource database provider
+** to record properties. The database provider decides how and where to
+** store these databases.
+**
+** The DBM keys for the properties have the following form:
+**
+** namespace ":" propname
+**
+** For example: 5:author
+**
+** The namespace provides an integer index into the namespace table
+** (see below). propname is simply the property name, without a namespace
+** prefix.
+**
+** A special case exists for properties that had a prefix starting with
+** "xml". The XML Specification reserves these for future use. mod_dav
+** stores and retrieves them unchanged. The keys for these properties
+** have the form:
+**
+** ":" propname
+**
+** The propname will contain the prefix and the property name. For
+** example, a key might be ":xmlfoo:name"
+**
+** The ":name" style will also be used for properties that do not
+** exist within a namespace.
+**
+** The DBM values consist of two null-terminated strings, appended
+** together (the null-terms are retained and stored in the database).
+** The first string is the xml:lang value for the property. An empty
+** string signifies that a lang value was not in context for the value.
+** The second string is the property value itself.
+**
+**
+** NAMESPACE TABLE
+**
+** The namespace table is an array that lists each of the namespaces
+** that are in use by the properties in the given propdb. Each entry
+** in the array is a simple URI.
+**
+** For example: http://www.foo.bar/standards/props/
+**
+** The prefix used for the property is stripped and the URI for it
+** is entered into the namespace table. Also, any namespaces used
+** within the property value will be entered into the table (and
+** stripped from the child elements).
+**
+** The namespaces are stored in the DBM database under the "METADATA" key.
+**
+**
+** STRIPPING NAMESPACES
+**
+** Within the property values, the namespace declarations (xmlns...)
+** are stripped. Each element and attribute will have its prefix removed
+** and a new prefix inserted.
+**
+** This must be done so that we can return multiple properties in a
+** PROPFIND which may have (originally) used conflicting prefixes. For
+** that case, we must bind all property value elements to new namespace
+** values.
+**
+** This implies that clients must NOT be sensitive to the namespace
+** prefix used for their properties. It WILL change when the properties
+** are returned (we return them as "ns<index>", e.g. "ns5"). Also, the
+** property value can contain ONLY XML elements and CDATA. PI and comment
+** elements will be stripped. CDATA whitespace will be preserved, but
+** whitespace within element tags will be altered. Attribute ordering
+** may be altered. Element and CDATA ordering will be preserved.
+**
+**
+** ATTRIBUTES ON PROPERTY NAME ELEMENTS
+**
+** When getting/setting properties, the XML used looks like:
+**
+** <prop>
+** <propname1>value</propname1>
+** <propname2>value</propname1>
+** </prop>
+**
+** This implementation (mod_dav) DOES NOT save any attributes that are
+** associated with the <propname1> element. The property value is deemed
+** to be only the contents ("value" in the above example).
+**
+** We do store the xml:lang value (if any) that applies to the context
+** of the <propname1> element. Whether the xml:lang attribute is on
+** <propname1> itself, or from a higher level element, we will store it
+** with the property value.
+**
+**
+** VERSIONING
+**
+** The DBM db contains a key named "METADATA" that holds database-level
+** information, such as the namespace table. The record also contains the
+** db's version number as the very first 16-bit value. This first number
+** is actually stored as two single bytes: the first byte is a "major"
+** version number. The second byte is a "minor" number.
+**
+** If the major number is not what mod_dav expects, then the db is closed
+** immediately and an error is returned. A minor number change is
+** acceptable -- it is presumed that old/new dav_props.c can deal with
+** the database format. For example, a newer dav_props might update the
+** minor value and append information to the end of the metadata record
+** (which would be ignored by previous versions).
+**
+**
+** ISSUES:
+**
+** At the moment, for the dav_get_allprops() and dav_get_props() functions,
+** we must return a set of xmlns: declarations for ALL known namespaces
+** in the file. There isn't a way to filter this because we don't know
+** which are going to be used or not. Examining property names is not
+** sufficient because the property values could use entirely different
+** namespaces.
+**
+** ==> we must devise a scheme where we can "garbage collect" the namespace
+** entries from the property database.
+*/
+
+#include "apr.h"
+#include "apr_strings.h"
+
+#define APR_WANT_STDIO
+#define APR_WANT_BYTEFUNC
+#include "apr_want.h"
+
+#include "mod_dav.h"
+
+#include "http_log.h"
+#include "http_request.h"
+
+/*
+** There is some rough support for writable DAV:getcontenttype and
+** DAV:getcontentlanguage properties. If this #define is (1), then
+** this support is disabled.
+**
+** We are disabling it because of a lack of support in GET and PUT
+** operations. For GET, it would be "expensive" to look for a propdb,
+** open it, and attempt to extract the Content-Type and Content-Language
+** values for the response.
+** (Handling the PUT would not be difficult, though)
+*/
+#define DAV_DISABLE_WRITABLE_PROPS 1
+
+#define DAV_EMPTY_VALUE "\0" /* TWO null terms */
+
+struct dav_propdb {
+ apr_pool_t *p; /* the pool we should use */
+ request_rec *r; /* the request record */
+
+ const dav_resource *resource; /* the target resource */
+
+ int deferred; /* open of db has been deferred */
+ dav_db *db; /* underlying database containing props */
+
+ apr_array_header_t *ns_xlate; /* translation of an elem->ns to URI */
+ dav_namespace_map *mapping; /* namespace mapping */
+
+ dav_lockdb *lockdb; /* the lock database */
+
+ dav_buffer wb_lock; /* work buffer for lockdiscovery property */
+
+ /* if we ever run a GET subreq, it will be stored here */
+ request_rec *subreq;
+
+ /* hooks we should use for processing (based on the target resource) */
+ const dav_hooks_db *db_hooks;
+};
+
+/* NOTE: dav_core_props[] and the following enum must stay in sync. */
+/* ### move these into a "core" liveprop provider? */
+static const char * const dav_core_props[] =
+{
+ "getcontenttype",
+ "getcontentlanguage",
+ "lockdiscovery",
+ "supportedlock",
+
+ NULL /* sentinel */
+};
+enum {
+ DAV_PROPID_CORE_getcontenttype = DAV_PROPID_CORE,
+ DAV_PROPID_CORE_getcontentlanguage,
+ DAV_PROPID_CORE_lockdiscovery,
+ DAV_PROPID_CORE_supportedlock,
+
+ DAV_PROPID_CORE_UNKNOWN
+};
+
+/*
+** This structure is used to track information needed for a rollback.
+*/
+typedef struct dav_rollback_item {
+ /* select one of the two rollback context structures based on the
+ value of dav_prop_ctx.is_liveprop */
+ dav_deadprop_rollback *deadprop;
+ dav_liveprop_rollback *liveprop;
+
+} dav_rollback_item;
+
+
+static int dav_find_liveprop_provider(dav_propdb *propdb,
+ const char *ns_uri,
+ const char *propname,
+ const dav_hooks_liveprop **provider)
+{
+ int propid;
+
+ *provider = NULL;
+
+ if (ns_uri == NULL) {
+ /* policy: liveprop providers cannot define no-namespace properties */
+ return DAV_PROPID_CORE_UNKNOWN;
+ }
+
+ /* check liveprop providers first, so they can define core properties */
+ propid = dav_run_find_liveprop(propdb->resource, ns_uri, propname,
+ provider);
+ if (propid != 0) {
+ return propid;
+ }
+
+ /* check for core property */
+ if (strcmp(ns_uri, "DAV:") == 0) {
+ const char * const *p = dav_core_props;
+
+ for (propid = DAV_PROPID_CORE; *p != NULL; ++p, ++propid)
+ if (strcmp(propname, *p) == 0) {
+ return propid;
+ }
+ }
+
+ /* no provider for this property */
+ return DAV_PROPID_CORE_UNKNOWN;
+}
+
+static void dav_find_liveprop(dav_propdb *propdb, apr_xml_elem *elem)
+{
+ const char *ns_uri;
+ dav_elem_private *priv = elem->priv;
+ const dav_hooks_liveprop *hooks;
+
+
+ if (elem->ns == APR_XML_NS_NONE)
+ ns_uri = NULL;
+ else if (elem->ns == APR_XML_NS_DAV_ID)
+ ns_uri = "DAV:";
+ else
+ ns_uri = APR_XML_GET_URI_ITEM(propdb->ns_xlate, elem->ns);
+
+ priv->propid = dav_find_liveprop_provider(propdb, ns_uri, elem->name,
+ &hooks);
+
+ /* ### this test seems redundant... */
+ if (priv->propid != DAV_PROPID_CORE_UNKNOWN) {
+ priv->provider = hooks;
+ }
+}
+
+/* is the live property read/write? */
+static int dav_rw_liveprop(dav_propdb *propdb, dav_elem_private *priv)
+{
+ int propid = priv->propid;
+
+ /*
+ ** Check the liveprop provider (if this is a provider-defined prop)
+ */
+ if (priv->provider != NULL) {
+ return (*priv->provider->is_writable)(propdb->resource, propid);
+ }
+
+ /* these are defined as read-only */
+ if (propid == DAV_PROPID_CORE_lockdiscovery
+#if DAV_DISABLE_WRITABLE_PROPS
+ || propid == DAV_PROPID_CORE_getcontenttype
+ || propid == DAV_PROPID_CORE_getcontentlanguage
+#endif
+ || propid == DAV_PROPID_CORE_supportedlock
+ ) {
+
+ return 0;
+ }
+
+ /* these are defined as read/write */
+ if (propid == DAV_PROPID_CORE_getcontenttype
+ || propid == DAV_PROPID_CORE_getcontentlanguage
+ || propid == DAV_PROPID_CORE_UNKNOWN) {
+
+ return 1;
+ }
+
+ /*
+ ** We don't recognize the property, so it must be dead (and writable)
+ */
+ return 1;
+}
+
+/* do a sub-request to fetch properties for the target resource's URI. */
+static void dav_do_prop_subreq(dav_propdb *propdb)
+{
+ /* perform a "GET" on the resource's URI (note that the resource
+ may not correspond to the current request!). */
+ propdb->subreq = ap_sub_req_lookup_uri(propdb->resource->uri, propdb->r,
+ NULL);
+}
+
+static dav_error * dav_insert_coreprop(dav_propdb *propdb,
+ int propid, const char *name,
+ dav_prop_insert what,
+ apr_text_header *phdr,
+ dav_prop_insert *inserted)
+{
+ const char *value = NULL;
+ dav_error *err;
+
+ *inserted = DAV_PROP_INSERT_NOTDEF;
+
+ /* fast-path the common case */
+ if (propid == DAV_PROPID_CORE_UNKNOWN)
+ return NULL;
+
+ switch (propid) {
+
+ case DAV_PROPID_CORE_lockdiscovery:
+ if (propdb->lockdb != NULL) {
+ dav_lock *locks;
+
+ if ((err = dav_lock_query(propdb->lockdb, propdb->resource,
+ &locks)) != NULL) {
+ return dav_push_error(propdb->p, err->status, 0,
+ "DAV:lockdiscovery could not be "
+ "determined due to a problem fetching "
+ "the locks for this resource.",
+ err);
+ }
+
+ /* fast-path the no-locks case */
+ if (locks == NULL) {
+ value = "";
+ }
+ else {
+ /*
+ ** This may modify the buffer. value may point to
+ ** wb_lock.pbuf or a string constant.
+ */
+ value = dav_lock_get_activelock(propdb->r, locks,
+ &propdb->wb_lock);
+
+ /* make a copy to isolate it from changes to wb_lock */
+ value = apr_pstrdup(propdb->p, propdb->wb_lock.buf);
+ }
+ }
+ break;
+
+ case DAV_PROPID_CORE_supportedlock:
+ if (propdb->lockdb != NULL) {
+ value = (*propdb->lockdb->hooks->get_supportedlock)(propdb->resource);
+ }
+ break;
+
+ case DAV_PROPID_CORE_getcontenttype:
+ if (propdb->subreq == NULL) {
+ dav_do_prop_subreq(propdb);
+ }
+ if (propdb->subreq->content_type != NULL) {
+ value = propdb->subreq->content_type;
+ }
+ break;
+
+ case DAV_PROPID_CORE_getcontentlanguage:
+ {
+ const char *lang;
+
+ if (propdb->subreq == NULL) {
+ dav_do_prop_subreq(propdb);
+ }
+ if ((lang = apr_table_get(propdb->subreq->headers_out,
+ "Content-Language")) != NULL) {
+ value = lang;
+ }
+ break;
+ }
+
+ default:
+ /* fall through to interpret as a dead property */
+ break;
+ }
+
+ /* if something was supplied, then insert it */
+ if (value != NULL) {
+ const char *s;
+
+ if (what == DAV_PROP_INSERT_SUPPORTED) {
+ /* use D: prefix to refer to the DAV: namespace URI,
+ * and let the namespace attribute default to "DAV:"
+ */
+ s = apr_psprintf(propdb->p,
+ "<D:supported-live-property D:name=\"%s\"/>" DEBUG_CR,
+ name);
+ }
+ else if (what == DAV_PROP_INSERT_VALUE && *value != '\0') {
+ /* use D: prefix to refer to the DAV: namespace URI */
+ s = apr_psprintf(propdb->p, "<D:%s>%s</D:%s>" DEBUG_CR,
+ name, value, name);
+ }
+ else {
+ /* use D: prefix to refer to the DAV: namespace URI */
+ s = apr_psprintf(propdb->p, "<D:%s/>" DEBUG_CR, name);
+ }
+ apr_text_append(propdb->p, phdr, s);
+
+ *inserted = what;
+ }
+
+ return NULL;
+}
+
+static dav_error * dav_insert_liveprop(dav_propdb *propdb,
+ const apr_xml_elem *elem,
+ dav_prop_insert what,
+ apr_text_header *phdr,
+ dav_prop_insert *inserted)
+{
+ dav_elem_private *priv = elem->priv;
+
+ *inserted = DAV_PROP_INSERT_NOTDEF;
+
+ if (priv->provider == NULL) {
+ /* this is a "core" property that we define */
+ return dav_insert_coreprop(propdb, priv->propid, elem->name,
+ what, phdr, inserted);
+ }
+
+ /* ask the provider (that defined this prop) to insert the prop */
+ *inserted = (*priv->provider->insert_prop)(propdb->resource, priv->propid,
+ what, phdr);
+
+ return NULL;
+}
+
+static void dav_output_prop_name(apr_pool_t *pool,
+ const dav_prop_name *name,
+ dav_xmlns_info *xi,
+ apr_text_header *phdr)
+{
+ const char *s;
+
+ if (*name->ns == '\0')
+ s = apr_psprintf(pool, "<%s/>" DEBUG_CR, name->name);
+ else {
+ const char *prefix = dav_xmlns_add_uri(xi, name->ns);
+
+ s = apr_psprintf(pool, "<%s:%s/>" DEBUG_CR, prefix, name->name);
+ }
+
+ apr_text_append(pool, phdr, s);
+}
+
+static void dav_insert_xmlns(apr_pool_t *p, const char *pre_prefix, int ns,
+ const char *ns_uri, apr_text_header *phdr)
+{
+ const char *s;
+
+ s = apr_psprintf(p, " xmlns:%s%d=\"%s\"", pre_prefix, ns, ns_uri);
+ apr_text_append(p, phdr, s);
+}
+
+static dav_error *dav_really_open_db(dav_propdb *propdb, int ro)
+{
+ dav_error *err;
+
+ /* we're trying to open the db; turn off the 'deferred' flag */
+ propdb->deferred = 0;
+
+ /* ask the DB provider to open the thing */
+ err = (*propdb->db_hooks->open)(propdb->p, propdb->resource, ro,
+ &propdb->db);
+ if (err != NULL) {
+ return dav_push_error(propdb->p, HTTP_INTERNAL_SERVER_ERROR,
+ DAV_ERR_PROP_OPENING,
+ "Could not open the property database.",
+ err);
+ }
+
+ /*
+ ** NOTE: propdb->db could be NULL if we attempted to open a readonly
+ ** database that doesn't exist. If we require read/write
+ ** access, then a database was created and opened.
+ */
+
+ return NULL;
+}
+
+DAV_DECLARE(dav_error *)dav_open_propdb(request_rec *r, dav_lockdb *lockdb,
+ const dav_resource *resource,
+ int ro,
+ apr_array_header_t * ns_xlate,
+ dav_propdb **p_propdb)
+{
+ dav_propdb *propdb = apr_pcalloc(r->pool, sizeof(*propdb));
+
+ *p_propdb = NULL;
+
+#if DAV_DEBUG
+ if (resource->uri == NULL) {
+ return dav_new_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "INTERNAL DESIGN ERROR: resource must define "
+ "its URI.");
+ }
+#endif
+
+ propdb->r = r;
+ propdb->p = r->pool; /* ### get rid of this */
+ propdb->resource = resource;
+ propdb->ns_xlate = ns_xlate;
+
+ propdb->db_hooks = DAV_GET_HOOKS_PROPDB(r);
+
+ propdb->lockdb = lockdb;
+
+ /* always defer actual open, to avoid expense of accessing db
+ * when only live properties are involved
+ */
+ propdb->deferred = 1;
+
+ /* ### what to do about closing the propdb on server failure? */
+
+ *p_propdb = propdb;
+ return NULL;
+}
+
+DAV_DECLARE(void) dav_close_propdb(dav_propdb *propdb)
+{
+ if (propdb->db == NULL)
+ return;
+
+ (*propdb->db_hooks->close)(propdb->db);
+}
+
+DAV_DECLARE(dav_get_props_result) dav_get_allprops(dav_propdb *propdb,
+ dav_prop_insert what)
+{
+ const dav_hooks_db *db_hooks = propdb->db_hooks;
+ apr_text_header hdr = { 0 };
+ apr_text_header hdr_ns = { 0 };
+ dav_get_props_result result = { 0 };
+ int found_contenttype = 0;
+ int found_contentlang = 0;
+ dav_prop_insert unused_inserted;
+
+ /* if not just getting supported live properties,
+ * scan all properties in the dead prop database
+ */
+ if (what != DAV_PROP_INSERT_SUPPORTED) {
+ if (propdb->deferred) {
+ /* ### what to do with db open error? */
+ (void) dav_really_open_db(propdb, 1 /*ro*/);
+ }
+
+ /* initialize the result with some start tags... */
+ apr_text_append(propdb->p, &hdr,
+ "<D:propstat>" DEBUG_CR
+ "<D:prop>" DEBUG_CR);
+
+ /* if there ARE properties, then scan them */
+ if (propdb->db != NULL) {
+ dav_xmlns_info *xi = dav_xmlns_create(propdb->p);
+ dav_prop_name name;
+
+ /* define (up front) any namespaces the db might need */
+ (void) (*db_hooks->define_namespaces)(propdb->db, xi);
+
+ /* get the first property name, beginning the scan */
+ (void) (*db_hooks->first_name)(propdb->db, &name);
+ while (name.ns != NULL) {
+
+ /*
+ ** We also look for <DAV:getcontenttype> and
+ ** <DAV:getcontentlanguage>. If they are not stored as dead
+ ** properties, then we need to perform a subrequest to get
+ ** their values (if any).
+ */
+ if (*name.ns == 'D' && strcmp(name.ns, "DAV:") == 0
+ && *name.name == 'g') {
+ if (strcmp(name.name, "getcontenttype") == 0) {
+ found_contenttype = 1;
+ }
+ else if (strcmp(name.name, "getcontentlanguage") == 0) {
+ found_contentlang = 1;
+ }
+ }
+
+ if (what == DAV_PROP_INSERT_VALUE) {
+ dav_error *err;
+ int found;
+
+ if ((err = (*db_hooks->output_value)(propdb->db, &name,
+ xi, &hdr,
+ &found)) != NULL) {
+ /* ### anything better to do? */
+ /* ### probably should enter a 500 error */
+ goto next_key;
+ }
+ /* assert: found == 1 */
+ }
+ else {
+ /* the value was not requested, so just add an empty
+ tag specifying the property name. */
+ dav_output_prop_name(propdb->p, &name, xi, &hdr);
+ }
+
+ next_key:
+ (void) (*db_hooks->next_name)(propdb->db, &name);
+ }
+
+ /* all namespaces have been entered into xi. generate them into
+ the output now. */
+ dav_xmlns_generate(xi, &hdr_ns);
+
+ } /* propdb->db != NULL */
+
+ /* add namespaces for all the liveprop providers */
+ dav_add_all_liveprop_xmlns(propdb->p, &hdr_ns);
+ }
+
+ /* ask the liveprop providers to insert their properties */
+ dav_run_insert_all_liveprops(propdb->r, propdb->resource, what, &hdr);
+
+ /* insert the standard properties */
+ /* ### should be handling the return errors here */
+ (void)dav_insert_coreprop(propdb,
+ DAV_PROPID_CORE_supportedlock, "supportedlock",
+ what, &hdr, &unused_inserted);
+ (void)dav_insert_coreprop(propdb,
+ DAV_PROPID_CORE_lockdiscovery, "lockdiscovery",
+ what, &hdr, &unused_inserted);
+
+ /* if we didn't find these, then do the whole subreq thing. */
+ if (!found_contenttype) {
+ /* ### should be handling the return error here */
+ (void)dav_insert_coreprop(propdb,
+ DAV_PROPID_CORE_getcontenttype,
+ "getcontenttype",
+ what, &hdr, &unused_inserted);
+ }
+ if (!found_contentlang) {
+ /* ### should be handling the return error here */
+ (void)dav_insert_coreprop(propdb,
+ DAV_PROPID_CORE_getcontentlanguage,
+ "getcontentlanguage",
+ what, &hdr, &unused_inserted);
+ }
+
+ /* if not just reporting on supported live props,
+ * terminate the result */
+ if (what != DAV_PROP_INSERT_SUPPORTED) {
+ apr_text_append(propdb->p, &hdr,
+ "</D:prop>" DEBUG_CR
+ "<D:status>HTTP/1.1 200 OK</D:status>" DEBUG_CR
+ "</D:propstat>" DEBUG_CR);
+ }
+
+ result.propstats = hdr.first;
+ result.xmlns = hdr_ns.first;
+ return result;
+}
+
+DAV_DECLARE(dav_get_props_result) dav_get_props(dav_propdb *propdb,
+ apr_xml_doc *doc)
+{
+ const dav_hooks_db *db_hooks = propdb->db_hooks;
+ apr_xml_elem *elem = dav_find_child(doc->root, "prop");
+ apr_text_header hdr_good = { 0 };
+ apr_text_header hdr_bad = { 0 };
+ apr_text_header hdr_ns = { 0 };
+ int have_good = 0;
+ dav_get_props_result result = { 0 };
+ char *marks_liveprop;
+ dav_xmlns_info *xi;
+ int xi_filled = 0;
+
+ /* ### NOTE: we should pass in TWO buffers -- one for keys, one for
+ the marks */
+
+ /* we will ALWAYS provide a "good" result, even if it is EMPTY */
+ apr_text_append(propdb->p, &hdr_good,
+ "<D:propstat>" DEBUG_CR
+ "<D:prop>" DEBUG_CR);
+
+ /* ### the marks should be in a buffer! */
+ /* allocate zeroed-memory for the marks. These marks indicate which
+ liveprop namespaces we've generated into the output xmlns buffer */
+
+ /* same for the liveprops */
+ marks_liveprop = apr_pcalloc(propdb->p, dav_get_liveprop_ns_count() + 1);
+
+ xi = dav_xmlns_create(propdb->p);
+
+ for (elem = elem->first_child; elem; elem = elem->next) {
+ dav_elem_private *priv;
+ dav_error *err;
+ dav_prop_insert inserted;
+ dav_prop_name name;
+
+ /*
+ ** First try live property providers; if they don't handle
+ ** the property, then try looking it up in the propdb.
+ */
+
+ if (elem->priv == NULL) {
+ elem->priv = apr_pcalloc(propdb->p, sizeof(*priv));
+ }
+ priv = elem->priv;
+
+ /* cache the propid; dav_get_props() could be called many times */
+ if (priv->propid == 0)
+ dav_find_liveprop(propdb, elem);
+
+ if (priv->propid != DAV_PROPID_CORE_UNKNOWN) {
+
+ /* insert the property. returns 1 if an insertion was done. */
+ if ((err = dav_insert_liveprop(propdb, elem, DAV_PROP_INSERT_VALUE,
+ &hdr_good, &inserted)) != NULL) {
+ /* ### need to propagate the error to the caller... */
+ /* ### skip it for now, as if nothing was inserted */
+ }
+ if (inserted == DAV_PROP_INSERT_VALUE) {
+ have_good = 1;
+
+ /*
+ ** Add the liveprop's namespace URIs. Note that provider==NULL
+ ** for core properties.
+ */
+ if (priv->provider != NULL) {
+ const char * const * scan_ns_uri;
+
+ for (scan_ns_uri = priv->provider->namespace_uris;
+ *scan_ns_uri != NULL;
+ ++scan_ns_uri) {
+ int ns;
+
+ ns = dav_get_liveprop_ns_index(*scan_ns_uri);
+ if (marks_liveprop[ns])
+ continue;
+ marks_liveprop[ns] = 1;
+
+ dav_insert_xmlns(propdb->p, "lp", ns, *scan_ns_uri,
+ &hdr_ns);
+ }
+ }
+
+ /* property added. move on to the next property. */
+ continue;
+ }
+ else if (inserted == DAV_PROP_INSERT_NOTDEF) {
+ /* nothing to do. fall thru to allow property to be handled
+ as a dead property */
+ }
+#if DAV_DEBUG
+ else {
+#if 0
+ /* ### need to change signature to return an error */
+ return dav_new_error(propdb->p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "INTERNAL DESIGN ERROR: insert_liveprop "
+ "did not insert what was asked for.");
+#endif
+ }
+#endif
+ }
+
+ /* The property wasn't a live property, so look in the dead property
+ database. */
+
+ /* make sure propdb is really open */
+ if (propdb->deferred) {
+ /* ### what to do with db open error? */
+ (void) dav_really_open_db(propdb, 1 /*ro*/);
+ }
+
+ if (elem->ns == APR_XML_NS_NONE)
+ name.ns = "";
+ else
+ name.ns = APR_XML_GET_URI_ITEM(propdb->ns_xlate, elem->ns);
+ name.name = elem->name;
+
+ /* only bother to look if a database exists */
+ if (propdb->db != NULL) {
+ int found;
+
+ if ((err = (*db_hooks->output_value)(propdb->db, &name,
+ xi, &hdr_good,
+ &found)) != NULL) {
+ /* ### what to do? continue doesn't seem right... */
+ continue;
+ }
+
+ if (found) {
+ have_good = 1;
+
+ /* if we haven't added the db's namespaces, then do so... */
+ if (!xi_filled) {
+ (void) (*db_hooks->define_namespaces)(propdb->db, xi);
+ xi_filled = 1;
+ }
+ continue;
+ }
+ }
+
+ /* not found as a live OR dead property. add a record to the "bad"
+ propstats */
+
+ /* make sure we've started our "bad" propstat */
+ if (hdr_bad.first == NULL) {
+ apr_text_append(propdb->p, &hdr_bad,
+ "<D:propstat>" DEBUG_CR
+ "<D:prop>" DEBUG_CR);
+ }
+
+ /* output this property's name (into the bad propstats) */
+ dav_output_prop_name(propdb->p, &name, xi, &hdr_bad);
+ }
+
+ apr_text_append(propdb->p, &hdr_good,
+ "</D:prop>" DEBUG_CR
+ "<D:status>HTTP/1.1 200 OK</D:status>" DEBUG_CR
+ "</D:propstat>" DEBUG_CR);
+
+ /* default to start with the good */
+ result.propstats = hdr_good.first;
+
+ /* we may not have any "bad" results */
+ if (hdr_bad.first != NULL) {
+ /* "close" the bad propstat */
+ apr_text_append(propdb->p, &hdr_bad,
+ "</D:prop>" DEBUG_CR
+ "<D:status>HTTP/1.1 404 Not Found</D:status>" DEBUG_CR
+ "</D:propstat>" DEBUG_CR);
+
+ /* if there are no good props, then just return the bad */
+ if (!have_good) {
+ result.propstats = hdr_bad.first;
+ }
+ else {
+ /* hook the bad propstat to the end of the good one */
+ hdr_good.last->next = hdr_bad.first;
+ }
+ }
+
+ /* add in all the various namespaces, and return them */
+ dav_xmlns_generate(xi, &hdr_ns);
+ result.xmlns = hdr_ns.first;
+
+ return result;
+}
+
+DAV_DECLARE(void) dav_get_liveprop_supported(dav_propdb *propdb,
+ const char *ns_uri,
+ const char *propname,
+ apr_text_header *body)
+{
+ int propid;
+ const dav_hooks_liveprop *hooks;
+
+ propid = dav_find_liveprop_provider(propdb, ns_uri, propname, &hooks);
+
+ if (propid != DAV_PROPID_CORE_UNKNOWN) {
+ if (hooks == NULL) {
+ /* this is a "core" property that we define */
+ dav_prop_insert unused_inserted;
+ dav_insert_coreprop(propdb, propid, propname,
+ DAV_PROP_INSERT_SUPPORTED, body, &unused_inserted);
+ }
+ else {
+ (*hooks->insert_prop)(propdb->resource, propid,
+ DAV_PROP_INSERT_SUPPORTED, body);
+ }
+ }
+}
+
+DAV_DECLARE_NONSTD(void) dav_prop_validate(dav_prop_ctx *ctx)
+{
+ dav_propdb *propdb = ctx->propdb;
+ apr_xml_elem *prop = ctx->prop;
+ dav_elem_private *priv;
+
+ priv = ctx->prop->priv = apr_pcalloc(propdb->p, sizeof(*priv));
+
+ /*
+ ** Check to see if this is a live property, and fill the fields
+ ** in the XML elem, as appropriate.
+ **
+ ** Verify that the property is read/write. If not, then it cannot
+ ** be SET or DELETEd.
+ */
+ if (priv->propid == 0) {
+ dav_find_liveprop(propdb, prop);
+
+ /* it's a liveprop if a provider was found */
+ /* ### actually the "core" props should really be liveprops, but
+ ### there is no "provider" for those and the r/w props are
+ ### treated as dead props anyhow */
+ ctx->is_liveprop = priv->provider != NULL;
+ }
+
+ if (!dav_rw_liveprop(propdb, priv)) {
+ ctx->err = dav_new_error(propdb->p, HTTP_CONFLICT,
+ DAV_ERR_PROP_READONLY,
+ "Property is read-only.");
+ return;
+ }
+
+ if (ctx->is_liveprop) {
+ int defer_to_dead = 0;
+
+ ctx->err = (*priv->provider->patch_validate)(propdb->resource,
+ prop, ctx->operation,
+ &ctx->liveprop_ctx,
+ &defer_to_dead);
+ if (ctx->err != NULL || !defer_to_dead)
+ return;
+
+ /* clear is_liveprop -- act as a dead prop now */
+ ctx->is_liveprop = 0;
+ }
+
+ /*
+ ** The property is supposed to be stored into the dead-property
+ ** database. Make sure the thing is truly open (and writable).
+ */
+ if (propdb->deferred
+ && (ctx->err = dav_really_open_db(propdb, 0 /* ro */)) != NULL) {
+ return;
+ }
+
+ /*
+ ** There should be an open, writable database in here!
+ **
+ ** Note: the database would be NULL if it was opened readonly and it
+ ** did not exist.
+ */
+ if (propdb->db == NULL) {
+ ctx->err = dav_new_error(propdb->p, HTTP_INTERNAL_SERVER_ERROR,
+ DAV_ERR_PROP_NO_DATABASE,
+ "Attempted to set/remove a property "
+ "without a valid, open, read/write "
+ "property database.");
+ return;
+ }
+
+ if (ctx->operation == DAV_PROP_OP_SET) {
+ /*
+ ** Prep the element => propdb namespace index mapping, inserting
+ ** namespace URIs into the propdb that don't exist.
+ */
+ (void) (*propdb->db_hooks->map_namespaces)(propdb->db,
+ propdb->ns_xlate,
+ &propdb->mapping);
+ }
+ else if (ctx->operation == DAV_PROP_OP_DELETE) {
+ /*
+ ** There are no checks to perform here. If a property exists, then
+ ** we will delete it. If it does not exist, then it does not matter
+ ** (see S12.13.1).
+ **
+ ** Note that if a property does not exist, that does not rule out
+ ** that a SET will occur during this PROPPATCH (thusly creating it).
+ */
+ }
+}
+
+DAV_DECLARE_NONSTD(void) dav_prop_exec(dav_prop_ctx *ctx)
+{
+ dav_propdb *propdb = ctx->propdb;
+ dav_error *err = NULL;
+ dav_elem_private *priv = ctx->prop->priv;
+
+ ctx->rollback = apr_pcalloc(propdb->p, sizeof(*ctx->rollback));
+
+ if (ctx->is_liveprop) {
+ err = (*priv->provider->patch_exec)(propdb->resource,
+ ctx->prop, ctx->operation,
+ ctx->liveprop_ctx,
+ &ctx->rollback->liveprop);
+ }
+ else {
+ dav_prop_name name;
+
+ if (ctx->prop->ns == APR_XML_NS_NONE)
+ name.ns = "";
+ else
+ name.ns = APR_XML_GET_URI_ITEM(propdb->ns_xlate, ctx->prop->ns);
+ name.name = ctx->prop->name;
+
+ /* save the old value so that we can do a rollback. */
+ if ((err = (*propdb->db_hooks
+ ->get_rollback)(propdb->db, &name,
+ &ctx->rollback->deadprop)) != NULL)
+ goto error;
+
+ if (ctx->operation == DAV_PROP_OP_SET) {
+
+ /* Note: propdb->mapping was set in dav_prop_validate() */
+ err = (*propdb->db_hooks->store)(propdb->db, &name, ctx->prop,
+ propdb->mapping);
+
+ /*
+ ** If an error occurred, then assume that we didn't change the
+ ** value. Remove the rollback item so that we don't try to set
+ ** its value during the rollback.
+ */
+ /* ### euh... where is the removal? */
+ }
+ else if (ctx->operation == DAV_PROP_OP_DELETE) {
+
+ /*
+ ** Delete the property. Ignore errors -- the property is there, or
+ ** we are deleting it for a second time.
+ */
+ /* ### but what about other errors? */
+ (void) (*propdb->db_hooks->remove)(propdb->db, &name);
+ }
+ }
+
+ error:
+ /* push a more specific error here */
+ if (err != NULL) {
+ /*
+ ** Use HTTP_INTERNAL_SERVER_ERROR because we shouldn't have seen
+ ** any errors at this point.
+ */
+ ctx->err = dav_push_error(propdb->p, HTTP_INTERNAL_SERVER_ERROR,
+ DAV_ERR_PROP_EXEC,
+ "Could not execute PROPPATCH.", err);
+ }
+}
+
+DAV_DECLARE_NONSTD(void) dav_prop_commit(dav_prop_ctx *ctx)
+{
+ dav_elem_private *priv = ctx->prop->priv;
+
+ /*
+ ** Note that a commit implies ctx->err is NULL. The caller should assume
+ ** a status of HTTP_OK for this case.
+ */
+
+ if (ctx->is_liveprop) {
+ (*priv->provider->patch_commit)(ctx->propdb->resource,
+ ctx->operation,
+ ctx->liveprop_ctx,
+ ctx->rollback->liveprop);
+ }
+}
+
+DAV_DECLARE_NONSTD(void) dav_prop_rollback(dav_prop_ctx *ctx)
+{
+ dav_error *err = NULL;
+ dav_elem_private *priv = ctx->prop->priv;
+
+ /* do nothing if there is no rollback information. */
+ if (ctx->rollback == NULL)
+ return;
+
+ /*
+ ** ### if we have an error, and a rollback occurs, then the namespace
+ ** ### mods should not happen at all. Basically, the namespace management
+ ** ### is simply a bitch.
+ */
+
+ if (ctx->is_liveprop) {
+ err = (*priv->provider->patch_rollback)(ctx->propdb->resource,
+ ctx->operation,
+ ctx->liveprop_ctx,
+ ctx->rollback->liveprop);
+ }
+ else {
+ err = (*ctx->propdb->db_hooks
+ ->apply_rollback)(ctx->propdb->db, ctx->rollback->deadprop);
+ }
+
+ if (err != NULL) {
+ if (ctx->err == NULL)
+ ctx->err = err;
+ else {
+ dav_error *scan = err;
+
+ /* hook previous errors at the end of the rollback error */
+ while (scan->prev != NULL)
+ scan = scan->prev;
+ scan->prev = ctx->err;
+ ctx->err = err;
+ }
+ }
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/providers.c b/rubbos/app/httpd-2.0.64/modules/dav/main/providers.c
new file mode 100644
index 00000000..a2ccd1ca
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/providers.c
@@ -0,0 +1,33 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_pools.h"
+#include "apr_hash.h"
+#include "ap_provider.h"
+#include "mod_dav.h"
+
+#define DAV_PROVIDER_GROUP "dav"
+
+DAV_DECLARE(void) dav_register_provider(apr_pool_t *p, const char *name,
+ const dav_provider *provider)
+{
+ ap_register_provider(p, DAV_PROVIDER_GROUP, name, "0", provider);
+}
+
+DAV_DECLARE(const dav_provider *) dav_lookup_provider(const char *name)
+{
+ return ap_lookup_provider(DAV_PROVIDER_GROUP, name, "0");
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/std_liveprop.c b/rubbos/app/httpd-2.0.64/modules/dav/main/std_liveprop.c
new file mode 100644
index 00000000..e97d0fda
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/std_liveprop.c
@@ -0,0 +1,194 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "httpd.h"
+#include "util_xml.h"
+#include "apr_strings.h"
+
+#include "mod_dav.h"
+
+/* forward-declare */
+static const dav_hooks_liveprop dav_core_hooks_liveprop;
+
+/*
+** The namespace URIs that we use. There will only ever be "DAV:".
+*/
+static const char * const dav_core_namespace_uris[] =
+{
+ "DAV:",
+ NULL /* sentinel */
+};
+
+/*
+** Define each of the core properties that this provider will handle.
+** Note that all of them are in the DAV: namespace, which has a
+** provider-local index of 0.
+*/
+static const dav_liveprop_spec dav_core_props[] =
+{
+ { 0, "comment", DAV_PROPID_comment, 1 },
+ { 0, "creator-displayname", DAV_PROPID_creator_displayname, 1 },
+ { 0, "displayname", DAV_PROPID_displayname, 1 },
+ { 0, "resourcetype", DAV_PROPID_resourcetype, 0 },
+ { 0, "source", DAV_PROPID_source, 1 },
+
+ { 0 } /* sentinel */
+};
+
+static const dav_liveprop_group dav_core_liveprop_group =
+{
+ dav_core_props,
+ dav_core_namespace_uris,
+ &dav_core_hooks_liveprop
+};
+
+static dav_prop_insert dav_core_insert_prop(const dav_resource *resource,
+ int propid, dav_prop_insert what,
+ apr_text_header *phdr)
+{
+ const char *value;
+ const char *s;
+ apr_pool_t *p = resource->pool;
+ const dav_liveprop_spec *info;
+ int global_ns;
+
+ switch (propid)
+ {
+ case DAV_PROPID_resourcetype:
+ switch (resource->type) {
+ case DAV_RESOURCE_TYPE_VERSION:
+ if (resource->baselined) {
+ value = "<D:baseline/>";
+ break;
+ }
+ /* fall through */
+ case DAV_RESOURCE_TYPE_REGULAR:
+ case DAV_RESOURCE_TYPE_WORKING:
+ if (resource->collection) {
+ value = "<D:collection/>";
+ }
+ else {
+ /* ### should we denote lock-null resources? */
+
+ value = ""; /* becomes: <D:resourcetype/> */
+ }
+ break;
+ case DAV_RESOURCE_TYPE_HISTORY:
+ value = "<D:version-history/>";
+ break;
+ case DAV_RESOURCE_TYPE_WORKSPACE:
+ value = "<D:collection/>";
+ break;
+ case DAV_RESOURCE_TYPE_ACTIVITY:
+ value = "<D:activity/>";
+ break;
+
+ default:
+ /* ### bad juju */
+ return DAV_PROP_INSERT_NOTDEF;
+ }
+ break;
+
+ case DAV_PROPID_comment:
+ case DAV_PROPID_creator_displayname:
+ case DAV_PROPID_displayname:
+ case DAV_PROPID_source:
+ default:
+ /*
+ ** This property is known, but not defined as a liveprop. However,
+ ** it may be a dead property.
+ */
+ return DAV_PROP_INSERT_NOTDEF;
+ }
+
+ /* assert: value != NULL */
+
+ /* get the information and global NS index for the property */
+ global_ns = dav_get_liveprop_info(propid, &dav_core_liveprop_group, &info);
+
+ /* assert: info != NULL && info->name != NULL */
+
+ if (what == DAV_PROP_INSERT_SUPPORTED) {
+ s = apr_psprintf(p,
+ "<D:supported-live-property D:name=\"%s\" "
+ "D:namespace=\"%s\"/>" DEBUG_CR,
+ info->name, dav_core_namespace_uris[info->ns]);
+ }
+ else if (what == DAV_PROP_INSERT_VALUE && *value != '\0') {
+ s = apr_psprintf(p, "<lp%d:%s>%s</lp%d:%s>" DEBUG_CR,
+ global_ns, info->name, value, global_ns, info->name);
+ }
+ else {
+ s = apr_psprintf(p, "<lp%d:%s/>" DEBUG_CR, global_ns, info->name);
+ }
+ apr_text_append(p, phdr, s);
+
+ /* we inserted what was asked for */
+ return what;
+}
+
+static int dav_core_is_writable(const dav_resource *resource, int propid)
+{
+ const dav_liveprop_spec *info;
+
+ (void) dav_get_liveprop_info(propid, &dav_core_liveprop_group, &info);
+ return info->is_writable;
+}
+
+static dav_error * dav_core_patch_validate(const dav_resource *resource,
+ const apr_xml_elem *elem,
+ int operation, void **context,
+ int *defer_to_dead)
+{
+ /* all of our writable props go in the dead prop database */
+ *defer_to_dead = 1;
+
+ return NULL;
+}
+
+static const dav_hooks_liveprop dav_core_hooks_liveprop = {
+ dav_core_insert_prop,
+ dav_core_is_writable,
+ dav_core_namespace_uris,
+ dav_core_patch_validate,
+ NULL, /* patch_exec */
+ NULL, /* patch_commit */
+ NULL, /* patch_rollback */
+};
+
+DAV_DECLARE_NONSTD(int) dav_core_find_liveprop(
+ const dav_resource *resource,
+ const char *ns_uri, const char *name,
+ const dav_hooks_liveprop **hooks)
+{
+ return dav_do_find_liveprop(ns_uri, name, &dav_core_liveprop_group, hooks);
+}
+
+DAV_DECLARE_NONSTD(void) dav_core_insert_all_liveprops(
+ request_rec *r,
+ const dav_resource *resource,
+ dav_prop_insert what,
+ apr_text_header *phdr)
+{
+ (void) dav_core_insert_prop(resource, DAV_PROPID_resourcetype,
+ what, phdr);
+}
+
+DAV_DECLARE_NONSTD(void) dav_core_register_uris(apr_pool_t *p)
+{
+ /* register the namespace URIs */
+ dav_register_liveprop_group(p, &dav_core_liveprop_group);
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/util.c b/rubbos/app/httpd-2.0.64/modules/dav/main/util.c
new file mode 100644
index 00000000..3ff3a19f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/util.c
@@ -0,0 +1,2021 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+** DAV extension module for Apache 2.0.*
+** - various utilities, repository-independent
+*/
+
+#include "apr_strings.h"
+#include "apr_lib.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "mod_dav.h"
+
+#include "http_request.h"
+#include "http_config.h"
+#include "http_vhost.h"
+#include "http_log.h"
+#include "http_protocol.h"
+
+DAV_DECLARE(dav_error*) dav_new_error(apr_pool_t *p, int status,
+ int error_id, const char *desc)
+{
+ int save_errno = errno;
+ dav_error *err = apr_pcalloc(p, sizeof(*err));
+
+ /* DBG3("dav_new_error: %d %d %s", status, error_id, desc ? desc : "(no desc)"); */
+
+ err->status = status;
+ err->error_id = error_id;
+ err->desc = desc;
+ err->save_errno = save_errno;
+
+ return err;
+}
+
+DAV_DECLARE(dav_error*) dav_new_error_tag(apr_pool_t *p, int status,
+ int error_id, const char *desc,
+ const char *namespace,
+ const char *tagname)
+{
+ dav_error *err = dav_new_error(p, status, error_id, desc);
+
+ err->tagname = tagname;
+ err->namespace = namespace;
+
+ return err;
+}
+
+
+DAV_DECLARE(dav_error*) dav_push_error(apr_pool_t *p, int status,
+ int error_id, const char *desc,
+ dav_error *prev)
+{
+ dav_error *err = apr_pcalloc(p, sizeof(*err));
+
+ err->status = status;
+ err->error_id = error_id;
+ err->desc = desc;
+ err->prev = prev;
+
+ return err;
+}
+
+DAV_DECLARE(void) dav_check_bufsize(apr_pool_t * p, dav_buffer *pbuf,
+ apr_size_t extra_needed)
+{
+ /* grow the buffer if necessary */
+ if (pbuf->cur_len + extra_needed > pbuf->alloc_len) {
+ char *newbuf;
+
+ pbuf->alloc_len += extra_needed + DAV_BUFFER_PAD;
+ newbuf = apr_palloc(p, pbuf->alloc_len);
+ memcpy(newbuf, pbuf->buf, pbuf->cur_len);
+ pbuf->buf = newbuf;
+ }
+}
+
+DAV_DECLARE(void) dav_set_bufsize(apr_pool_t * p, dav_buffer *pbuf,
+ apr_size_t size)
+{
+ /* NOTE: this does not retain prior contents */
+
+ /* NOTE: this function is used to init the first pointer, too, since
+ the PAD will be larger than alloc_len (0) for zeroed structures */
+
+ /* grow if we don't have enough for the requested size plus padding */
+ if (size + DAV_BUFFER_PAD > pbuf->alloc_len) {
+ /* set the new length; min of MINSIZE */
+ pbuf->alloc_len = size + DAV_BUFFER_PAD;
+ if (pbuf->alloc_len < DAV_BUFFER_MINSIZE)
+ pbuf->alloc_len = DAV_BUFFER_MINSIZE;
+
+ pbuf->buf = apr_palloc(p, pbuf->alloc_len);
+ }
+ pbuf->cur_len = size;
+}
+
+
+/* initialize a buffer and copy the specified (null-term'd) string into it */
+DAV_DECLARE(void) dav_buffer_init(apr_pool_t *p, dav_buffer *pbuf,
+ const char *str)
+{
+ dav_set_bufsize(p, pbuf, strlen(str));
+ memcpy(pbuf->buf, str, pbuf->cur_len + 1);
+}
+
+/* append a string to the end of the buffer, adjust length */
+DAV_DECLARE(void) dav_buffer_append(apr_pool_t *p, dav_buffer *pbuf,
+ const char *str)
+{
+ apr_size_t len = strlen(str);
+
+ dav_check_bufsize(p, pbuf, len + 1);
+ memcpy(pbuf->buf + pbuf->cur_len, str, len + 1);
+ pbuf->cur_len += len;
+}
+
+/* place a string on the end of the buffer, do NOT adjust length */
+DAV_DECLARE(void) dav_buffer_place(apr_pool_t *p, dav_buffer *pbuf,
+ const char *str)
+{
+ apr_size_t len = strlen(str);
+
+ dav_check_bufsize(p, pbuf, len + 1);
+ memcpy(pbuf->buf + pbuf->cur_len, str, len + 1);
+}
+
+/* place some memory on the end of a buffer; do NOT adjust length */
+DAV_DECLARE(void) dav_buffer_place_mem(apr_pool_t *p, dav_buffer *pbuf,
+ const void *mem, apr_size_t amt,
+ apr_size_t pad)
+{
+ dav_check_bufsize(p, pbuf, amt + pad);
+ memcpy(pbuf->buf + pbuf->cur_len, mem, amt);
+}
+
+/*
+** dav_lookup_uri()
+**
+** Extension for ap_sub_req_lookup_uri() which can't handle absolute
+** URIs properly.
+**
+** If NULL is returned, then an error occurred with parsing the URI or
+** the URI does not match the current server.
+*/
+DAV_DECLARE(dav_lookup_result) dav_lookup_uri(const char *uri,
+ request_rec * r,
+ int must_be_absolute)
+{
+ dav_lookup_result result = { 0 };
+ const char *scheme;
+ apr_port_t port;
+ apr_uri_t comp;
+ char *new_file;
+ const char *domain;
+
+ /* first thing to do is parse the URI into various components */
+ if (apr_uri_parse(r->pool, uri, &comp) != APR_SUCCESS) {
+ result.err.status = HTTP_BAD_REQUEST;
+ result.err.desc = "Invalid syntax in Destination URI.";
+ return result;
+ }
+
+ /* the URI must be an absoluteURI (WEBDAV S9.3) */
+ if (comp.scheme == NULL && must_be_absolute) {
+ result.err.status = HTTP_BAD_REQUEST;
+ result.err.desc = "Destination URI must be an absolute URI.";
+ return result;
+ }
+
+ /* the URI must not have a query (args) or a fragment */
+ if (comp.query != NULL || comp.fragment != NULL) {
+ result.err.status = HTTP_BAD_REQUEST;
+ result.err.desc =
+ "Destination URI contains invalid components "
+ "(a query or a fragment).";
+ return result;
+ }
+
+ /* If the scheme or port was provided, then make sure that it matches
+ the scheme/port of this request. If the request must be absolute,
+ then require the (explicit/implicit) scheme/port be matching.
+
+ ### hmm. if a port wasn't provided (does the parse return port==0?),
+ ### but we're on a non-standard port, then we won't detect that the
+ ### URI's port implies the wrong one.
+ */
+ if (comp.scheme != NULL || comp.port != 0 || must_be_absolute)
+ {
+ /* ### not sure this works if the current request came in via https: */
+ scheme = r->parsed_uri.scheme;
+ if (scheme == NULL)
+ scheme = ap_http_method(r);
+
+ /* insert a port if the URI did not contain one */
+ if (comp.port == 0)
+ comp.port = apr_uri_port_of_scheme(comp.scheme);
+
+ /* now, verify that the URI uses the same scheme as the current.
+ request. the port must match our port.
+ */
+ apr_sockaddr_port_get(&port, r->connection->local_addr);
+ if (strcasecmp(comp.scheme, scheme) != 0
+#ifdef APACHE_PORT_HANDLING_IS_BUSTED
+ || comp.port != port
+#endif
+ ) {
+ result.err.status = HTTP_BAD_GATEWAY;
+ result.err.desc = apr_psprintf(r->pool,
+ "Destination URI refers to "
+ "different scheme or port "
+ "(%s://hostname:%d)" APR_EOL_STR
+ "(want: %s://hostname:%d)",
+ comp.scheme ? comp.scheme : scheme,
+ comp.port ? comp.port : port,
+ scheme, port);
+ return result;
+ }
+ }
+
+ /* we have verified the scheme, port, and general structure */
+
+ /*
+ ** Hrm. IE5 will pass unqualified hostnames for both the
+ ** Host: and Destination: headers. This breaks the
+ ** http_vhost.c::matches_aliases function.
+ **
+ ** For now, qualify unqualified comp.hostnames with
+ ** r->server->server_hostname.
+ **
+ ** ### this is a big hack. Apache should provide a better way.
+ ** ### maybe the admin should list the unqualified hosts in a
+ ** ### <ServerAlias> block?
+ */
+ if (comp.hostname != NULL
+ && strrchr(comp.hostname, '.') == NULL
+ && (domain = strchr(r->server->server_hostname, '.')) != NULL) {
+ comp.hostname = apr_pstrcat(r->pool, comp.hostname, domain, NULL);
+ }
+
+ /* now, if a hostname was provided, then verify that it represents the
+ same server as the current connection. note that we just use our
+ port, since we've verified the URI matches ours */
+#ifdef APACHE_PORT_HANDLING_IS_BUSTED
+ if (comp.hostname != NULL &&
+ !ap_matches_request_vhost(r, comp.hostname, port)) {
+ result.err.status = HTTP_BAD_GATEWAY;
+ result.err.desc = "Destination URI refers to a different server.";
+ return result;
+ }
+#endif
+
+ /* we have verified that the requested URI denotes the same server as
+ the current request. Therefore, we can use ap_sub_req_lookup_uri() */
+
+ /* reconstruct a URI as just the path */
+ new_file = apr_uri_unparse(r->pool, &comp, APR_URI_UNP_OMITSITEPART);
+
+ /*
+ * Lookup the URI and return the sub-request. Note that we use the
+ * same HTTP method on the destination. This allows the destination
+ * to apply appropriate restrictions (e.g. readonly).
+ */
+ result.rnew = ap_sub_req_method_uri(r->method, new_file, r, NULL);
+
+ return result;
+}
+
+/* ---------------------------------------------------------------
+**
+** XML UTILITY FUNCTIONS
+*/
+
+/* validate that the root element uses a given DAV: tagname (TRUE==valid) */
+DAV_DECLARE(int) dav_validate_root(const apr_xml_doc *doc,
+ const char *tagname)
+{
+ return doc->root &&
+ doc->root->ns == APR_XML_NS_DAV_ID &&
+ strcmp(doc->root->name, tagname) == 0;
+}
+
+/* find and return the (unique) child with a given DAV: tagname */
+DAV_DECLARE(apr_xml_elem *) dav_find_child(const apr_xml_elem *elem,
+ const char *tagname)
+{
+ apr_xml_elem *child = elem->first_child;
+
+ for (; child; child = child->next)
+ if (child->ns == APR_XML_NS_DAV_ID && !strcmp(child->name, tagname))
+ return child;
+ return NULL;
+}
+
+/* gather up all the CDATA into a single string */
+DAV_DECLARE(const char *) dav_xml_get_cdata(const apr_xml_elem *elem, apr_pool_t *pool,
+ int strip_white)
+{
+ apr_size_t len = 0;
+ apr_text *scan;
+ const apr_xml_elem *child;
+ char *cdata;
+ char *s;
+ apr_size_t tlen;
+ const char *found_text = NULL; /* initialize to avoid gcc warning */
+ int found_count = 0;
+
+ for (scan = elem->first_cdata.first; scan != NULL; scan = scan->next) {
+ found_text = scan->text;
+ ++found_count;
+ len += strlen(found_text);
+ }
+
+ for (child = elem->first_child; child != NULL; child = child->next) {
+ for (scan = child->following_cdata.first;
+ scan != NULL;
+ scan = scan->next) {
+ found_text = scan->text;
+ ++found_count;
+ len += strlen(found_text);
+ }
+ }
+
+ /* some fast-path cases:
+ * 1) zero-length cdata
+ * 2) a single piece of cdata with no whitespace to strip
+ */
+ if (len == 0)
+ return "";
+ if (found_count == 1) {
+ if (!strip_white
+ || (!apr_isspace(*found_text)
+ && !apr_isspace(found_text[len - 1])))
+ return found_text;
+ }
+
+ cdata = s = apr_palloc(pool, len + 1);
+
+ for (scan = elem->first_cdata.first; scan != NULL; scan = scan->next) {
+ tlen = strlen(scan->text);
+ memcpy(s, scan->text, tlen);
+ s += tlen;
+ }
+
+ for (child = elem->first_child; child != NULL; child = child->next) {
+ for (scan = child->following_cdata.first;
+ scan != NULL;
+ scan = scan->next) {
+ tlen = strlen(scan->text);
+ memcpy(s, scan->text, tlen);
+ s += tlen;
+ }
+ }
+
+ *s = '\0';
+
+ if (strip_white) {
+ /* trim leading whitespace */
+ while (apr_isspace(*cdata)) /* assume: return false for '\0' */
+ ++cdata;
+
+ /* trim trailing whitespace */
+ while (len-- > 0 && apr_isspace(cdata[len]))
+ continue;
+ cdata[len + 1] = '\0';
+ }
+
+ return cdata;
+}
+
+DAV_DECLARE(dav_xmlns_info *) dav_xmlns_create(apr_pool_t *pool)
+{
+ dav_xmlns_info *xi = apr_pcalloc(pool, sizeof(*xi));
+
+ xi->pool = pool;
+ xi->uri_prefix = apr_hash_make(pool);
+ xi->prefix_uri = apr_hash_make(pool);
+
+ return xi;
+}
+
+DAV_DECLARE(void) dav_xmlns_add(dav_xmlns_info *xi,
+ const char *prefix, const char *uri)
+{
+ /* this "should" not overwrite a prefix mapping */
+ apr_hash_set(xi->prefix_uri, prefix, APR_HASH_KEY_STRING, uri);
+
+ /* note: this may overwrite an existing URI->prefix mapping, but it
+ doesn't matter -- any prefix is usuable to specify the URI. */
+ apr_hash_set(xi->uri_prefix, uri, APR_HASH_KEY_STRING, prefix);
+}
+
+DAV_DECLARE(const char *) dav_xmlns_add_uri(dav_xmlns_info *xi,
+ const char *uri)
+{
+ const char *prefix;
+
+ if ((prefix = apr_hash_get(xi->uri_prefix, uri,
+ APR_HASH_KEY_STRING)) != NULL)
+ return prefix;
+
+ prefix = apr_psprintf(xi->pool, "g%d", xi->count++);
+ dav_xmlns_add(xi, prefix, uri);
+ return prefix;
+}
+
+DAV_DECLARE(const char *) dav_xmlns_get_uri(dav_xmlns_info *xi,
+ const char *prefix)
+{
+ return apr_hash_get(xi->prefix_uri, prefix, APR_HASH_KEY_STRING);
+}
+
+DAV_DECLARE(const char *) dav_xmlns_get_prefix(dav_xmlns_info *xi,
+ const char *uri)
+{
+ return apr_hash_get(xi->uri_prefix, uri, APR_HASH_KEY_STRING);
+}
+
+DAV_DECLARE(void) dav_xmlns_generate(dav_xmlns_info *xi,
+ apr_text_header *phdr)
+{
+ apr_hash_index_t *hi = apr_hash_first(xi->pool, xi->prefix_uri);
+
+ for (; hi != NULL; hi = apr_hash_next(hi)) {
+ const void *prefix;
+ void *uri;
+ const char *s;
+
+ apr_hash_this(hi, &prefix, NULL, &uri);
+
+ s = apr_psprintf(xi->pool, " xmlns:%s=\"%s\"",
+ (const char *)prefix, (const char *)uri);
+ apr_text_append(xi->pool, phdr, s);
+ }
+}
+
+/* ---------------------------------------------------------------
+**
+** Timeout header processing
+**
+*/
+
+/* dav_get_timeout: If the Timeout: header exists, return a time_t
+ * when this lock is expected to expire. Otherwise, return
+ * a time_t of DAV_TIMEOUT_INFINITE.
+ *
+ * It's unclear if DAV clients are required to understand
+ * Seconds-xxx and Infinity time values. We assume that they do.
+ * In addition, for now, that's all we understand, too.
+ */
+DAV_DECLARE(time_t) dav_get_timeout(request_rec *r)
+{
+ time_t now, expires = DAV_TIMEOUT_INFINITE;
+
+ const char *timeout_const = apr_table_get(r->headers_in, "Timeout");
+ const char *timeout = apr_pstrdup(r->pool, timeout_const), *val;
+
+ if (timeout == NULL)
+ return DAV_TIMEOUT_INFINITE;
+
+ /* Use the first thing we understand, or infinity if
+ * we don't understand anything.
+ */
+
+ while ((val = ap_getword_white(r->pool, &timeout)) && strlen(val)) {
+ if (!strncmp(val, "Infinite", 8)) {
+ return DAV_TIMEOUT_INFINITE;
+ }
+
+ if (!strncmp(val, "Second-", 7)) {
+ val += 7;
+ /* ### We need to handle overflow better:
+ * ### timeout will be <= 2^32 - 1
+ */
+ expires = atol(val);
+ now = time(NULL);
+ return now + expires;
+ }
+ }
+
+ return DAV_TIMEOUT_INFINITE;
+}
+
+/* ---------------------------------------------------------------
+**
+** If Header processing
+**
+*/
+
+/* add_if_resource returns a new if_header, linking it to next_ih.
+ */
+static dav_if_header *dav_add_if_resource(apr_pool_t *p, dav_if_header *next_ih,
+ const char *uri, apr_size_t uri_len)
+{
+ dav_if_header *ih;
+
+ if ((ih = apr_pcalloc(p, sizeof(*ih))) == NULL)
+ return NULL;
+
+ ih->uri = uri;
+ ih->uri_len = uri_len;
+ ih->next = next_ih;
+
+ return ih;
+}
+
+/* add_if_state adds a condition to an if_header.
+ */
+static dav_error * dav_add_if_state(apr_pool_t *p, dav_if_header *ih,
+ const char *state_token,
+ dav_if_state_type t, int condition,
+ const dav_hooks_locks *locks_hooks)
+{
+ dav_if_state_list *new_sl;
+
+ new_sl = apr_pcalloc(p, sizeof(*new_sl));
+
+ new_sl->condition = condition;
+ new_sl->type = t;
+
+ if (t == dav_if_opaquelock) {
+ dav_error *err;
+
+ if ((err = (*locks_hooks->parse_locktoken)(p, state_token,
+ &new_sl->locktoken)) != NULL) {
+ /* In cases where the state token is invalid, we'll just skip
+ * it rather than return 400.
+ */
+ if (err->error_id == DAV_ERR_LOCK_UNK_STATE_TOKEN) {
+ return NULL;
+ }
+ else {
+ /* ### maybe add a higher-level description */
+ return err;
+ }
+ }
+ }
+ else
+ new_sl->etag = state_token;
+
+ new_sl->next = ih->state;
+ ih->state = new_sl;
+
+ return NULL;
+}
+
+/* fetch_next_token returns the substring from str+1
+ * to the next occurence of char term, or \0, whichever
+ * occurs first. Leading whitespace is ignored.
+ */
+static char *dav_fetch_next_token(char **str, char term)
+{
+ char *sp;
+ char *token;
+
+ token = *str + 1;
+
+ while (*token && (*token == ' ' || *token == '\t'))
+ token++;
+
+ if ((sp = strchr(token, term)) == NULL)
+ return NULL;
+
+ *sp = '\0';
+ *str = sp;
+ return token;
+}
+
+/* dav_process_if_header:
+ *
+ * If NULL (no error) is returned, then **if_header points to the
+ * "If" productions structure (or NULL if "If" is not present).
+ *
+ * ### this part is bogus:
+ * If an error is encountered, the error is logged. Parent should
+ * return err->status.
+ */
+static dav_error * dav_process_if_header(request_rec *r, dav_if_header **p_ih)
+{
+ dav_error *err;
+ char *str;
+ char *list;
+ const char *state_token;
+ const char *uri = NULL; /* scope of current production; NULL=no-tag */
+ apr_size_t uri_len = 0;
+ dav_if_header *ih = NULL;
+ apr_uri_t parsed_uri;
+ const dav_hooks_locks *locks_hooks = DAV_GET_HOOKS_LOCKS(r);
+ enum {no_tagged, tagged, unknown} list_type = unknown;
+ int condition;
+
+ *p_ih = NULL;
+
+ if ((str = apr_pstrdup(r->pool, apr_table_get(r->headers_in, "If"))) == NULL)
+ return NULL;
+
+ while (*str) {
+ switch(*str) {
+ case '<':
+ /* Tagged-list production - following states apply to this uri */
+ if (list_type == no_tagged
+ || ((uri = dav_fetch_next_token(&str, '>')) == NULL)) {
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST,
+ DAV_ERR_IF_TAGGED,
+ "Invalid If-header: unclosed \"<\" or "
+ "unexpected tagged-list production.");
+ }
+
+ /* 2518 specifies this must be an absolute URI; just take the
+ * relative part for later comparison against r->uri */
+ if (apr_uri_parse(r->pool, uri, &parsed_uri) != APR_SUCCESS
+ || !parsed_uri.path) {
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST,
+ DAV_ERR_IF_TAGGED,
+ "Invalid URI in tagged If-header.");
+ }
+ /* note that parsed_uri.path is allocated; we can trash it */
+
+ /* clean up the URI a bit */
+ ap_getparents(parsed_uri.path);
+ uri_len = strlen(parsed_uri.path);
+ if (uri_len > 1 && parsed_uri.path[uri_len - 1] == '/')
+ parsed_uri.path[--uri_len] = '\0';
+
+ uri = parsed_uri.path;
+ list_type = tagged;
+ break;
+
+ case '(':
+ /* List production */
+
+ /* If a uri has not been encountered, this is a No-Tagged-List */
+ if (list_type == unknown)
+ list_type = no_tagged;
+
+ if ((list = dav_fetch_next_token(&str, ')')) == NULL) {
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST,
+ DAV_ERR_IF_UNCLOSED_PAREN,
+ "Invalid If-header: unclosed \"(\".");
+ }
+
+ if ((ih = dav_add_if_resource(r->pool, ih, uri, uri_len)) == NULL) {
+ /* ### dav_add_if_resource() should return an error for us! */
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST,
+ DAV_ERR_IF_PARSE,
+ "Internal server error parsing \"If:\" "
+ "header.");
+ }
+
+ condition = DAV_IF_COND_NORMAL;
+
+ while (*list) {
+ /* List is the entire production (in a uri scope) */
+
+ switch (*list) {
+ case '<':
+ if ((state_token = dav_fetch_next_token(&list, '>')) == NULL) {
+ /* ### add a description to this error */
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST,
+ DAV_ERR_IF_PARSE, NULL);
+ }
+
+ if ((err = dav_add_if_state(r->pool, ih, state_token, dav_if_opaquelock,
+ condition, locks_hooks)) != NULL) {
+ /* ### maybe add a higher level description */
+ return err;
+ }
+ condition = DAV_IF_COND_NORMAL;
+ break;
+
+ case '[':
+ if ((state_token = dav_fetch_next_token(&list, ']')) == NULL) {
+ /* ### add a description to this error */
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST,
+ DAV_ERR_IF_PARSE, NULL);
+ }
+
+ if ((err = dav_add_if_state(r->pool, ih, state_token, dav_if_etag,
+ condition, locks_hooks)) != NULL) {
+ /* ### maybe add a higher level description */
+ return err;
+ }
+ condition = DAV_IF_COND_NORMAL;
+ break;
+
+ case 'N':
+ if (list[1] == 'o' && list[2] == 't') {
+ if (condition != DAV_IF_COND_NORMAL) {
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST,
+ DAV_ERR_IF_MULTIPLE_NOT,
+ "Invalid \"If:\" header: "
+ "Multiple \"not\" entries "
+ "for the same state.");
+ }
+ condition = DAV_IF_COND_NOT;
+ }
+ list += 2;
+ break;
+
+ case ' ':
+ case '\t':
+ break;
+
+ default:
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST,
+ DAV_ERR_IF_UNK_CHAR,
+ apr_psprintf(r->pool,
+ "Invalid \"If:\" "
+ "header: Unexpected "
+ "character encountered "
+ "(0x%02x, '%c').",
+ *list, *list));
+ }
+
+ list++;
+ }
+ break;
+
+ case ' ':
+ case '\t':
+ break;
+
+ default:
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST,
+ DAV_ERR_IF_UNK_CHAR,
+ apr_psprintf(r->pool,
+ "Invalid \"If:\" header: "
+ "Unexpected character "
+ "encountered (0x%02x, '%c').",
+ *str, *str));
+ }
+
+ str++;
+ }
+
+ *p_ih = ih;
+ return NULL;
+}
+
+static int dav_find_submitted_locktoken(const dav_if_header *if_header,
+ const dav_lock *lock_list,
+ const dav_hooks_locks *locks_hooks)
+{
+ for (; if_header != NULL; if_header = if_header->next) {
+ const dav_if_state_list *state_list;
+
+ for (state_list = if_header->state;
+ state_list != NULL;
+ state_list = state_list->next) {
+
+ if (state_list->type == dav_if_opaquelock) {
+ const dav_lock *lock;
+
+ /* given state_list->locktoken, match it */
+
+ /*
+ ** The resource will have one or more lock tokens. We only
+ ** need to match one of them against any token in the
+ ** If: header.
+ **
+ ** One token case: It is an exclusive or shared lock. Either
+ ** way, we must find it.
+ **
+ ** N token case: They are shared locks. By policy, we need
+ ** to match only one. The resource's other
+ ** tokens may belong to somebody else (so we
+ ** shouldn't see them in the If: header anyway)
+ */
+ for (lock = lock_list; lock != NULL; lock = lock->next) {
+
+ if (!(*locks_hooks->compare_locktoken)(state_list->locktoken, lock->locktoken)) {
+ return 1;
+ }
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* dav_validate_resource_state:
+ * Returns NULL if path/uri meets if-header and lock requirements
+ */
+static dav_error * dav_validate_resource_state(apr_pool_t *p,
+ const dav_resource *resource,
+ dav_lockdb *lockdb,
+ const dav_if_header *if_header,
+ int flags,
+ dav_buffer *pbuf,
+ request_rec *r)
+{
+ dav_error *err;
+ const char *uri;
+ const char *etag;
+ const dav_hooks_locks *locks_hooks = (lockdb ? lockdb->hooks : NULL);
+ const dav_if_header *ifhdr_scan;
+ dav_if_state_list *state_list;
+ dav_lock *lock_list;
+ dav_lock *lock;
+ int num_matched;
+ int num_that_apply;
+ int seen_locktoken;
+ apr_size_t uri_len;
+ const char *reason = NULL;
+
+ /* DBG1("validate: <%s>", resource->uri); */
+
+ /*
+ ** The resource will have one of three states:
+ **
+ ** 1) No locks. We have no special requirements that the user supply
+ ** specific locktokens. One of the state lists must match, and
+ ** we're done.
+ **
+ ** 2) One exclusive lock. The locktoken must appear *anywhere* in the
+ ** If: header. Of course, asserting the token in a "Not" term will
+ ** quickly fail that state list :-). If the locktoken appears in
+ ** one of the state lists *and* one state list matches, then we're
+ ** done.
+ **
+ ** 3) One or more shared locks. One of the locktokens must appear
+ ** *anywhere* in the If: header. If one of the locktokens appears,
+ ** and we match one state list, then we are done.
+ **
+ ** The <seen_locktoken> variable determines whether we have seen one
+ ** of this resource's locktokens in the If: header.
+ */
+
+ /*
+ ** If this is a new lock request, <flags> will contain the requested
+ ** lock scope. Three rules apply:
+ **
+ ** 1) Do not require a (shared) locktoken to be seen (when we are
+ ** applying another shared lock)
+ ** 2) If the scope is exclusive and we see any locks, fail.
+ ** 3) If the scope is shared and we see an exclusive lock, fail.
+ */
+
+ if (lockdb == NULL) {
+ /* we're in State 1. no locks. */
+ lock_list = NULL;
+ }
+ else {
+ /*
+ ** ### hrm... we don't need to have these fully
+ ** ### resolved since we're only looking at the
+ ** ### locktokens...
+ **
+ ** ### use get_locks w/ calltype=PARTIAL
+ */
+ if ((err = dav_lock_query(lockdb, resource, &lock_list)) != NULL) {
+ return dav_push_error(p,
+ HTTP_INTERNAL_SERVER_ERROR, 0,
+ "The locks could not be queried for "
+ "verification against a possible \"If:\" "
+ "header.",
+ err);
+ }
+
+ /* lock_list now determines whether we're in State 1, 2, or 3. */
+ }
+
+ /*
+ ** For a new, exclusive lock: if any locks exist, fail.
+ ** For a new, shared lock: if an exclusive lock exists, fail.
+ ** else, do not require a token to be seen.
+ */
+ if (flags & DAV_LOCKSCOPE_EXCLUSIVE) {
+ if (lock_list != NULL) {
+ return dav_new_error(p, HTTP_LOCKED, 0,
+ "Existing lock(s) on the requested resource "
+ "prevent an exclusive lock.");
+ }
+
+ /*
+ ** There are no locks, so we can pretend that we've already met
+ ** any requirement to find the resource's locks in an If: header.
+ */
+ seen_locktoken = 1;
+ }
+ else if (flags & DAV_LOCKSCOPE_SHARED) {
+ /*
+ ** Strictly speaking, we don't need this loop. Either the first
+ ** (and only) lock will be EXCLUSIVE, or none of them will be.
+ */
+ for (lock = lock_list; lock != NULL; lock = lock->next) {
+ if (lock->scope == DAV_LOCKSCOPE_EXCLUSIVE) {
+ return dav_new_error(p, HTTP_LOCKED, 0,
+ "The requested resource is already "
+ "locked exclusively.");
+ }
+ }
+
+ /*
+ ** The locks on the resource (if any) are all shared. Set the
+ ** <seen_locktoken> flag to indicate that we do not need to find
+ ** the locks in an If: header.
+ */
+ seen_locktoken = 1;
+ }
+ else {
+ /*
+ ** For methods other than LOCK:
+ **
+ ** If we have no locks, then <seen_locktoken> can be set to true --
+ ** pretending that we've already met the requirement of seeing one
+ ** of the resource's locks in the If: header.
+ **
+ ** Otherwise, it must be cleared and we'll look for one.
+ */
+ seen_locktoken = (lock_list == NULL);
+ }
+
+ /*
+ ** If there is no If: header, then we can shortcut some logic:
+ **
+ ** 1) if we do not need to find a locktoken in the (non-existent) If:
+ ** header, then we are successful.
+ **
+ ** 2) if we must find a locktoken in the (non-existent) If: header, then
+ ** we fail.
+ */
+ if (if_header == NULL) {
+ if (seen_locktoken)
+ return NULL;
+
+ return dav_new_error(p, HTTP_LOCKED, 0,
+ "This resource is locked and an \"If:\" header "
+ "was not supplied to allow access to the "
+ "resource.");
+ }
+ /* the If: header is present */
+
+ /*
+ ** If a dummy header is present (because of a Lock-Token: header), then
+ ** we are required to find that token in this resource's set of locks.
+ ** If we have no locks, then we immediately fail.
+ **
+ ** This is a 400 (Bad Request) since they should only submit a locktoken
+ ** that actually exists.
+ **
+ ** Don't issue this response if we're talking about the parent resource.
+ ** It is okay for that resource to NOT have this locktoken.
+ ** (in fact, it certainly will not: a dummy_header only occurs for the
+ ** UNLOCK method, the parent is checked only for locknull resources,
+ ** and the parent certainly does not have the (locknull's) locktoken)
+ */
+ if (lock_list == NULL && if_header->dummy_header) {
+ if (flags & DAV_VALIDATE_IS_PARENT)
+ return NULL;
+ return dav_new_error(p, HTTP_BAD_REQUEST, 0,
+ "The locktoken specified in the \"Lock-Token:\" "
+ "header is invalid because this resource has no "
+ "outstanding locks.");
+ }
+
+ /*
+ ** Prepare the input URI. We want the URI to never have a trailing slash.
+ **
+ ** When URIs are placed into the dav_if_header structure, they are
+ ** guaranteed to never have a trailing slash. If the URIs are equivalent,
+ ** then it doesn't matter if they both lack a trailing slash -- they're
+ ** still equivalent.
+ **
+ ** Note: we could also ensure that a trailing slash is present on both
+ ** URIs, but the majority of URIs provided to us via a resource walk
+ ** will not contain that trailing slash.
+ */
+ uri = resource->uri;
+ uri_len = strlen(uri);
+ if (uri[uri_len - 1] == '/') {
+ dav_set_bufsize(p, pbuf, uri_len);
+ memcpy(pbuf->buf, uri, uri_len);
+ pbuf->buf[--uri_len] = '\0';
+ uri = pbuf->buf;
+ }
+
+ /* get the resource's etag; we may need it during the checks */
+ etag = (*resource->hooks->getetag)(resource);
+
+ /* how many state_lists apply to this URI? */
+ num_that_apply = 0;
+
+ /* If there are if-headers, fail if this resource
+ * does not match at least one state_list.
+ */
+ for (ifhdr_scan = if_header;
+ ifhdr_scan != NULL;
+ ifhdr_scan = ifhdr_scan->next) {
+
+ /* DBG2("uri=<%s> if_uri=<%s>", uri, ifhdr_scan->uri ? ifhdr_scan->uri : "(no uri)"); */
+
+ if (ifhdr_scan->uri != NULL
+ && (uri_len != ifhdr_scan->uri_len
+ || memcmp(uri, ifhdr_scan->uri, uri_len) != 0)) {
+ /*
+ ** A tagged-list's URI doesn't match this resource's URI.
+ ** Skip to the next state_list to see if it will match.
+ */
+ continue;
+ }
+
+ /* this state_list applies to this resource */
+
+ /*
+ ** ### only one state_list should ever apply! a no-tag, or a tagged
+ ** ### where S9.4.2 states only one can match.
+ **
+ ** ### revamp this code to loop thru ifhdr_scan until we find the
+ ** ### matching state_list. process it. stop.
+ */
+ ++num_that_apply;
+
+ /* To succeed, resource must match *all* of the states
+ * specified in the state_list.
+ */
+ for (state_list = ifhdr_scan->state;
+ state_list != NULL;
+ state_list = state_list->next) {
+
+ switch(state_list->type) {
+ case dav_if_etag:
+ {
+ int mismatch = strcmp(state_list->etag, etag);
+
+ if (state_list->condition == DAV_IF_COND_NORMAL && mismatch) {
+ /*
+ ** The specified entity-tag does not match the
+ ** entity-tag on the resource. This state_list is
+ ** not going to match. Bust outta here.
+ */
+ reason =
+ "an entity-tag was specified, but the resource's "
+ "actual ETag does not match.";
+ goto state_list_failed;
+ }
+ else if (state_list->condition == DAV_IF_COND_NOT
+ && !mismatch) {
+ /*
+ ** The specified entity-tag DOES match the
+ ** entity-tag on the resource. This state_list is
+ ** not going to match. Bust outta here.
+ */
+ reason =
+ "an entity-tag was specified using the \"Not\" form, "
+ "but the resource's actual ETag matches the provided "
+ "entity-tag.";
+ goto state_list_failed;
+ }
+ break;
+ }
+
+ case dav_if_opaquelock:
+ if (lockdb == NULL) {
+ if (state_list->condition == DAV_IF_COND_NOT) {
+ /* the locktoken is definitely not there! (success) */
+ continue;
+ }
+
+ /* condition == DAV_IF_COND_NORMAL */
+
+ /*
+ ** If no lockdb is provided, then validation fails for
+ ** this state_list (NORMAL means we were supposed to
+ ** find the token, which we obviously cannot do without
+ ** a lock database).
+ **
+ ** Go and try the next state list.
+ */
+ reason =
+ "a State-token was supplied, but a lock database "
+ "is not available for to provide the required lock.";
+ goto state_list_failed;
+ }
+
+ /* Resource validation 'fails' if:
+ * ANY of the lock->locktokens match
+ * a NOT state_list->locktoken,
+ * OR
+ * NONE of the lock->locktokens match
+ * a NORMAL state_list->locktoken.
+ */
+ num_matched = 0;
+ for (lock = lock_list; lock != NULL; lock = lock->next) {
+
+ /*
+ DBG2("compare: rsrc=%s ifhdr=%s",
+ (*locks_hooks->format_locktoken)(p, lock->locktoken),
+ (*locks_hooks->format_locktoken)(p, state_list->locktoken));
+ */
+
+ /* nothing to do if the locktokens do not match. */
+ if ((*locks_hooks->compare_locktoken)(state_list->locktoken, lock->locktoken)) {
+ continue;
+ }
+
+ /*
+ ** We have now matched up one of the resource's locktokens
+ ** to a locktoken in a State-token in the If: header.
+ ** Note this fact, so that we can pass the overall
+ ** requirement of seeing at least one of the resource's
+ ** locktokens.
+ */
+ seen_locktoken = 1;
+
+ if (state_list->condition == DAV_IF_COND_NOT) {
+ /*
+ ** This state requires that the specified locktoken
+ ** is NOT present on the resource. But we just found
+ ** it. There is no way this state-list can now
+ ** succeed, so go try another one.
+ */
+ reason =
+ "a State-token was supplied, which used a "
+ "\"Not\" condition. The State-token was found "
+ "in the locks on this resource";
+ goto state_list_failed;
+ }
+
+ /* condition == DAV_IF_COND_NORMAL */
+
+ /* Validate auth_user: If an authenticated user created
+ ** the lock, only the same user may submit that locktoken
+ ** to manipulate a resource.
+ */
+ if (lock->auth_user &&
+ (!r->user ||
+ strcmp(lock->auth_user, r->user))) {
+ const char *errmsg;
+
+ errmsg = apr_pstrcat(p, "User \"",
+ r->user,
+ "\" submitted a locktoken created "
+ "by user \"",
+ lock->auth_user, "\".", NULL);
+ return dav_new_error(p, HTTP_FORBIDDEN, 0, errmsg);
+ }
+
+ /*
+ ** We just matched a specified State-Token to one of the
+ ** resource's locktokens.
+ **
+ ** Break out of the lock scan -- we only needed to find
+ ** one match (actually, there shouldn't be any other
+ ** matches in the lock list).
+ */
+ num_matched = 1;
+ break;
+ }
+
+ if (num_matched == 0
+ && state_list->condition == DAV_IF_COND_NORMAL) {
+ /*
+ ** We had a NORMAL state, meaning that we should have
+ ** found the State-Token within the locks on this
+ ** resource. We didn't, so this state_list must fail.
+ */
+ reason =
+ "a State-token was supplied, but it was not found "
+ "in the locks on this resource.";
+ goto state_list_failed;
+ }
+
+ break;
+
+ } /* switch */
+ } /* foreach ( state_list ) */
+
+ /*
+ ** We've checked every state in this state_list and none of them
+ ** have failed. Since all of them succeeded, then we have a matching
+ ** state list and we may be done.
+ **
+ ** The next requirement is that we have seen one of the resource's
+ ** locktokens (if any). If we have, then we can just exit. If we
+ ** haven't, then we need to keep looking.
+ */
+ if (seen_locktoken) {
+ /* woo hoo! */
+ return NULL;
+ }
+
+ /*
+ ** Haven't seen one. Let's break out of the search and just look
+ ** for a matching locktoken.
+ */
+ break;
+
+ /*
+ ** This label is used when we detect that a state_list is not
+ ** going to match this resource. We bust out and try the next
+ ** state_list.
+ */
+ state_list_failed:
+ ;
+
+ } /* foreach ( ifhdr_scan ) */
+
+ /*
+ ** The above loop exits for one of two reasons:
+ ** 1) a state_list matched and seen_locktoken is false.
+ ** 2) all if_header structures were scanned, without (1) occurring
+ */
+
+ if (ifhdr_scan == NULL) {
+ /*
+ ** We finished the loop without finding any matching state lists.
+ */
+
+ /*
+ ** If none of the state_lists apply to this resource, then we
+ ** may have succeeded. Note that this scenario implies a
+ ** tagged-list with no matching state_lists. If the If: header
+ ** was a no-tag-list, then it would have applied to this resource.
+ **
+ ** S9.4.2 states that when no state_lists apply, then the header
+ ** should be ignored.
+ **
+ ** If we saw one of the resource's locktokens, then we're done.
+ ** If we did not see a locktoken, then we fail.
+ */
+ if (num_that_apply == 0) {
+ if (seen_locktoken)
+ return NULL;
+
+ /*
+ ** We may have aborted the scan before seeing the locktoken.
+ ** Rescan the If: header to see if we can find the locktoken
+ ** somewhere.
+ **
+ ** Note that seen_locktoken == 0 implies lock_list != NULL
+ ** which implies locks_hooks != NULL.
+ */
+ if (dav_find_submitted_locktoken(if_header, lock_list,
+ locks_hooks)) {
+ /*
+ ** We found a match! We're set... none of the If: header
+ ** assertions apply (implicit success), and the If: header
+ ** specified the locktoken somewhere. We're done.
+ */
+ return NULL;
+ }
+
+ return dav_new_error(p, HTTP_LOCKED, 0 /* error_id */,
+ "This resource is locked and the \"If:\" "
+ "header did not specify one of the "
+ "locktokens for this resource's lock(s).");
+ }
+ /* else: one or more state_lists were applicable, but failed. */
+
+ /*
+ ** If the dummy_header did not match, then they specified an
+ ** incorrect token in the Lock-Token header. Forget whether the
+ ** If: statement matched or not... we'll tell them about the
+ ** bad Lock-Token first. That is considered a 400 (Bad Request).
+ */
+ if (if_header->dummy_header) {
+ return dav_new_error(p, HTTP_BAD_REQUEST, 0,
+ "The locktoken specified in the "
+ "\"Lock-Token:\" header did not specify one "
+ "of this resource's locktoken(s).");
+ }
+
+ if (reason == NULL) {
+ return dav_new_error(p, HTTP_PRECONDITION_FAILED, 0,
+ "The preconditions specified by the \"If:\" "
+ "header did not match this resource.");
+ }
+
+ return dav_new_error(p, HTTP_PRECONDITION_FAILED, 0,
+ apr_psprintf(p,
+ "The precondition(s) specified by "
+ "the \"If:\" header did not match "
+ "this resource. At least one "
+ "failure is because: %s", reason));
+ }
+
+ /* assert seen_locktoken == 0 */
+
+ /*
+ ** ifhdr_scan != NULL implies we found a matching state_list.
+ **
+ ** Since we're still here, it also means that we have not yet found
+ ** one the resource's locktokens in the If: header.
+ **
+ ** Scan all the if_headers and states looking for one of this
+ ** resource's locktokens. Note that we need to go back and scan them
+ ** all -- we may have aborted a scan with a failure before we saw a
+ ** matching token.
+ **
+ ** Note that seen_locktoken == 0 implies lock_list != NULL which implies
+ ** locks_hooks != NULL.
+ */
+ if (dav_find_submitted_locktoken(if_header, lock_list, locks_hooks)) {
+ /*
+ ** We found a match! We're set... we have a matching state list,
+ ** and the If: header specified the locktoken somewhere. We're done.
+ */
+ return NULL;
+ }
+
+ /*
+ ** We had a matching state list, but the user agent did not specify one
+ ** of this resource's locktokens. Tell them so.
+ **
+ ** Note that we need to special-case the message on whether a "dummy"
+ ** header exists. If it exists, yet we didn't see a needed locktoken,
+ ** then that implies the dummy header (Lock-Token header) did NOT
+ ** specify one of this resource's locktokens. (this implies something
+ ** in the real If: header matched)
+ **
+ ** We want to note the 400 (Bad Request) in favor of a 423 (Locked).
+ */
+ if (if_header->dummy_header) {
+ return dav_new_error(p, HTTP_BAD_REQUEST, 0,
+ "The locktoken specified in the "
+ "\"Lock-Token:\" header did not specify one "
+ "of this resource's locktoken(s).");
+ }
+
+ return dav_new_error(p, HTTP_LOCKED, 1 /* error_id */,
+ "This resource is locked and the \"If:\" header "
+ "did not specify one of the "
+ "locktokens for this resource's lock(s).");
+}
+
+/* dav_validate_walker: Walker callback function to validate resource state */
+static dav_error * dav_validate_walker(dav_walk_resource *wres, int calltype)
+{
+ dav_walker_ctx *ctx = wres->walk_ctx;
+ dav_error *err;
+
+ if ((err = dav_validate_resource_state(ctx->w.pool, wres->resource,
+ ctx->w.lockdb,
+ ctx->if_header, ctx->flags,
+ &ctx->work_buf, ctx->r)) == NULL) {
+ /* There was no error, so just bug out. */
+ return NULL;
+ }
+
+ /*
+ ** If we have a serious server error, or if the request itself failed,
+ ** then just return error (not a multistatus).
+ */
+ if (ap_is_HTTP_SERVER_ERROR(err->status)
+ || (*wres->resource->hooks->is_same_resource)(wres->resource,
+ ctx->w.root)) {
+ /* ### maybe push a higher-level description? */
+ return err;
+ }
+
+ /* associate the error with the current URI */
+ dav_add_response(wres, err->status, NULL);
+
+ return NULL;
+}
+
+/*
+** dav_validate_request: Validate if-headers (and check for locks) on:
+** (1) r->filename @ depth;
+** (2) Parent of r->filename if check_parent == 1
+**
+** The check of parent should be done when it is necessary to verify that
+** the parent collection will accept a new member (ie current resource
+** state is null).
+**
+** Return OK on successful validation.
+** On error, return appropriate HTTP_* code, and log error. If a multi-stat
+** error is necessary, response will point to it, else NULL.
+*/
+DAV_DECLARE(dav_error *) dav_validate_request(request_rec *r,
+ dav_resource *resource,
+ int depth,
+ dav_locktoken *locktoken,
+ dav_response **response,
+ int flags,
+ dav_lockdb *lockdb)
+{
+ dav_error *err;
+ int result;
+ dav_if_header *if_header;
+ int lock_db_opened_locally = 0;
+ const dav_hooks_locks *locks_hooks = DAV_GET_HOOKS_LOCKS(r);
+ const dav_hooks_repository *repos_hooks = resource->hooks;
+ dav_buffer work_buf = { 0 };
+ dav_response *new_response;
+
+#if DAV_DEBUG
+ if (depth && response == NULL) {
+ /*
+ ** ### bleck. we can't return errors for other URIs unless we have
+ ** ### a "response" ptr.
+ */
+ return dav_new_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "DESIGN ERROR: dav_validate_request called "
+ "with depth>0, but no response ptr.");
+ }
+#endif
+
+ if (response != NULL)
+ *response = NULL;
+
+ /* Do the standard checks for conditional requests using
+ * If-..-Since, If-Match etc */
+ if ((result = ap_meets_conditions(r)) != OK) {
+ /* ### fix this up... how? */
+ return dav_new_error(r->pool, result, 0, NULL);
+ }
+
+ /* always parse (and later process) the If: header */
+ if ((err = dav_process_if_header(r, &if_header)) != NULL) {
+ /* ### maybe add higher-level description */
+ return err;
+ }
+
+ /* If a locktoken was specified, create a dummy if_header with which
+ * to validate resources. In the interim, figure out why DAV uses
+ * locktokens in an if-header without a Lock-Token header to refresh
+ * locks, but a Lock-Token header without an if-header to remove them.
+ */
+ if (locktoken != NULL) {
+ dav_if_header *ifhdr_new;
+
+ ifhdr_new = apr_pcalloc(r->pool, sizeof(*ifhdr_new));
+ ifhdr_new->uri = resource->uri;
+ ifhdr_new->uri_len = strlen(resource->uri);
+ ifhdr_new->dummy_header = 1;
+
+ ifhdr_new->state = apr_pcalloc(r->pool, sizeof(*ifhdr_new->state));
+ ifhdr_new->state->type = dav_if_opaquelock;
+ ifhdr_new->state->condition = DAV_IF_COND_NORMAL;
+ ifhdr_new->state->locktoken = locktoken;
+
+ ifhdr_new->next = if_header;
+ if_header = ifhdr_new;
+ }
+
+ /*
+ ** If necessary, open the lock database (read-only, lazily);
+ ** the validation process may need to retrieve or update lock info.
+ ** Otherwise, assume provided lockdb is valid and opened rw.
+ */
+ if (lockdb == NULL) {
+ if (locks_hooks != NULL) {
+ if ((err = (*locks_hooks->open_lockdb)(r, 0, 0, &lockdb)) != NULL) {
+ /* ### maybe insert higher-level comment */
+ return err;
+ }
+ lock_db_opened_locally = 1;
+ }
+ }
+
+ /* (1) Validate the specified resource, at the specified depth */
+ if (resource->exists && depth > 0) {
+ dav_walker_ctx ctx = { { 0 } };
+ dav_response *multi_status;
+
+ ctx.w.walk_type = DAV_WALKTYPE_NORMAL;
+ ctx.w.func = dav_validate_walker;
+ ctx.w.walk_ctx = &ctx;
+ ctx.w.pool = r->pool;
+ ctx.w.root = resource;
+
+ ctx.if_header = if_header;
+ ctx.r = r;
+ ctx.flags = flags;
+
+ if (lockdb != NULL) {
+ ctx.w.lockdb = lockdb;
+ ctx.w.walk_type |= DAV_WALKTYPE_LOCKNULL;
+ }
+
+ err = (*repos_hooks->walk)(&ctx.w, DAV_INFINITY, &multi_status);
+ if (err == NULL) {
+ *response = multi_status;;
+ }
+ /* else: implies a 5xx status code occurred. */
+ }
+ else {
+ err = dav_validate_resource_state(r->pool, resource, lockdb,
+ if_header, flags, &work_buf, r);
+ }
+
+ /* (2) Validate the parent resource if requested */
+ if (err == NULL && (flags & DAV_VALIDATE_PARENT)) {
+ dav_resource *parent_resource;
+
+ err = (*repos_hooks->get_parent_resource)(resource, &parent_resource);
+
+ if (err == NULL && parent_resource == NULL) {
+ err = dav_new_error(r->pool, HTTP_FORBIDDEN, 0,
+ "Cannot access parent of repository root.");
+ }
+ else if (err == NULL) {
+ err = dav_validate_resource_state(r->pool, parent_resource, lockdb,
+ if_header,
+ flags | DAV_VALIDATE_IS_PARENT,
+ &work_buf, r);
+
+ /*
+ ** This error occurred on the parent resource. This implies that
+ ** we have to create a multistatus response (to report the error
+ ** against a URI other than the Request-URI). "Convert" this error
+ ** into a multistatus response.
+ */
+ if (err != NULL) {
+ new_response = apr_pcalloc(r->pool, sizeof(*new_response));
+
+ new_response->href = parent_resource->uri;
+ new_response->status = err->status;
+ new_response->desc =
+ "A validation error has occurred on the parent resource, "
+ "preventing the operation on the resource specified by "
+ "the Request-URI.";
+ if (err->desc != NULL) {
+ new_response->desc = apr_pstrcat(r->pool,
+ new_response->desc,
+ " The error was: ",
+ err->desc, NULL);
+ }
+
+ /* assert: DAV_VALIDATE_PARENT implies response != NULL */
+ new_response->next = *response;
+ *response = new_response;
+
+ err = NULL;
+ }
+ }
+ }
+
+ if (lock_db_opened_locally)
+ (*locks_hooks->close_lockdb)(lockdb);
+
+ /*
+ ** If we don't have a (serious) error, and we have multistatus responses,
+ ** then we need to construct an "error". This error will be the overall
+ ** status returned, and the multistatus responses will go into its body.
+ **
+ ** For certain methods, the overall error will be a 424. The default is
+ ** to construct a standard 207 response.
+ */
+ if (err == NULL && response != NULL && *response != NULL) {
+ apr_text *propstat = NULL;
+
+ if ((flags & DAV_VALIDATE_USE_424) != 0) {
+ /* manufacture a 424 error to hold the multistatus response(s) */
+ return dav_new_error(r->pool, HTTP_FAILED_DEPENDENCY, 0,
+ "An error occurred on another resource, "
+ "preventing the requested operation on "
+ "this resource.");
+ }
+
+ /*
+ ** Whatever caused the error, the Request-URI should have a 424
+ ** associated with it since we cannot complete the method.
+ **
+ ** For a LOCK operation, insert an empty DAV:lockdiscovery property.
+ ** For other methods, return a simple 424.
+ */
+ if ((flags & DAV_VALIDATE_ADD_LD) != 0) {
+ propstat = apr_pcalloc(r->pool, sizeof(*propstat));
+ propstat->text =
+ "<D:propstat>" DEBUG_CR
+ "<D:prop><D:lockdiscovery/></D:prop>" DEBUG_CR
+ "<D:status>HTTP/1.1 424 Failed Dependency</D:status>" DEBUG_CR
+ "</D:propstat>" DEBUG_CR;
+ }
+
+ /* create the 424 response */
+ new_response = apr_pcalloc(r->pool, sizeof(*new_response));
+ new_response->href = resource->uri;
+ new_response->status = HTTP_FAILED_DEPENDENCY;
+ new_response->propresult.propstats = propstat;
+ new_response->desc =
+ "An error occurred on another resource, preventing the "
+ "requested operation on this resource.";
+
+ new_response->next = *response;
+ *response = new_response;
+
+ /* manufacture a 207 error for the multistatus response(s) */
+ return dav_new_error(r->pool, HTTP_MULTI_STATUS, 0,
+ "Error(s) occurred on resources during the "
+ "validation process.");
+ }
+
+ return err;
+}
+
+/* dav_get_locktoken_list:
+ *
+ * Sets ltl to a locktoken_list of all positive locktokens in header,
+ * else NULL if no If-header, or no positive locktokens.
+ */
+DAV_DECLARE(dav_error *) dav_get_locktoken_list(request_rec *r,
+ dav_locktoken_list **ltl)
+{
+ dav_error *err;
+ dav_if_header *if_header;
+ dav_if_state_list *if_state;
+ dav_locktoken_list *lock_token = NULL;
+
+ *ltl = NULL;
+
+ if ((err = dav_process_if_header(r, &if_header)) != NULL) {
+ /* ### add a higher-level description? */
+ return err;
+ }
+
+ while (if_header != NULL) {
+ if_state = if_header->state; /* Begining of the if_state linked list */
+ while (if_state != NULL) {
+ if (if_state->condition == DAV_IF_COND_NORMAL
+ && if_state->type == dav_if_opaquelock) {
+ lock_token = apr_pcalloc(r->pool, sizeof(dav_locktoken_list));
+ lock_token->locktoken = if_state->locktoken;
+ lock_token->next = *ltl;
+ *ltl = lock_token;
+ }
+ if_state = if_state->next;
+ }
+ if_header = if_header->next;
+ }
+ if (*ltl == NULL) {
+ /* No nodes added */
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST, DAV_ERR_IF_ABSENT,
+ "No locktokens were specified in the \"If:\" "
+ "header, so the refresh could not be performed.");
+ }
+
+ return NULL;
+}
+
+#if 0 /* not needed right now... */
+
+static const char *strip_white(const char *s, apr_pool_t *pool)
+{
+ apr_size_t idx;
+
+ /* trim leading whitespace */
+ while (apr_isspace(*s)) /* assume: return false for '\0' */
+ ++s;
+
+ /* trim trailing whitespace */
+ idx = strlen(s) - 1;
+ if (apr_isspace(s[idx])) {
+ char *s2 = apr_pstrdup(pool, s);
+
+ while (apr_isspace(s2[idx]) && idx > 0)
+ --idx;
+ s2[idx + 1] = '\0';
+ return s2;
+ }
+
+ return s;
+}
+#endif
+
+#define DAV_LABEL_HDR "Label"
+
+/* dav_add_vary_header
+ *
+ * If there were any headers in the request which require a Vary header
+ * in the response, add it.
+ */
+DAV_DECLARE(void) dav_add_vary_header(request_rec *in_req,
+ request_rec *out_req,
+ const dav_resource *resource)
+{
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(in_req);
+
+ /* ### this is probably all wrong... I think there is a function in
+ ### the Apache API to add things to the Vary header. need to check */
+
+ /* Only versioning headers require a Vary response header,
+ * so only do this check if there is a versioning provider */
+ if (vsn_hooks != NULL) {
+ const char *target = apr_table_get(in_req->headers_in, DAV_LABEL_HDR);
+ const char *vary = apr_table_get(out_req->headers_out, "Vary");
+
+ /* If Target-Selector specified, add it to the Vary header */
+ if (target != NULL) {
+ if (vary == NULL)
+ vary = DAV_LABEL_HDR;
+ else
+ vary = apr_pstrcat(out_req->pool, vary, "," DAV_LABEL_HDR,
+ NULL);
+
+ apr_table_setn(out_req->headers_out, "Vary", vary);
+ }
+ }
+}
+
+/* dav_can_auto_checkout
+ *
+ * Determine whether auto-checkout is enabled for a resource.
+ * r - the request_rec
+ * resource - the resource
+ * auto_version - the value of the auto_versionable hook for the resource
+ * lockdb - pointer to lock database (opened if necessary)
+ * auto_checkout - set to 1 if auto-checkout enabled
+ */
+static dav_error * dav_can_auto_checkout(
+ request_rec *r,
+ dav_resource *resource,
+ dav_auto_version auto_version,
+ dav_lockdb **lockdb,
+ int *auto_checkout)
+{
+ dav_error *err;
+ dav_lock *lock_list;
+
+ *auto_checkout = 0;
+
+ if (auto_version == DAV_AUTO_VERSION_ALWAYS) {
+ *auto_checkout = 1;
+ }
+ else if (auto_version == DAV_AUTO_VERSION_LOCKED) {
+ if (*lockdb == NULL) {
+ const dav_hooks_locks *locks_hooks = DAV_GET_HOOKS_LOCKS(r);
+
+ if (locks_hooks == NULL) {
+ return dav_new_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Auto-checkout is only enabled for locked resources, "
+ "but there is no lock provider.");
+ }
+
+ if ((err = (*locks_hooks->open_lockdb)(r, 0, 0, lockdb)) != NULL) {
+ return dav_push_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Cannot open lock database to determine "
+ "auto-versioning behavior.",
+ err);
+ }
+ }
+
+ if ((err = dav_lock_query(*lockdb, resource, &lock_list)) != NULL) {
+ return dav_push_error(r->pool,
+ HTTP_INTERNAL_SERVER_ERROR, 0,
+ "The locks could not be queried for "
+ "determining auto-versioning behavior.",
+ err);
+ }
+
+ if (lock_list != NULL)
+ *auto_checkout = 1;
+ }
+
+ return NULL;
+}
+
+/* see mod_dav.h for docco */
+DAV_DECLARE(dav_error *) dav_auto_checkout(
+ request_rec *r,
+ dav_resource *resource,
+ int parent_only,
+ dav_auto_version_info *av_info)
+{
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ dav_lockdb *lockdb = NULL;
+ dav_error *err = NULL;
+
+ /* Initialize results */
+ memset(av_info, 0, sizeof(*av_info));
+
+ /* if no versioning provider, just return */
+ if (vsn_hooks == NULL)
+ return NULL;
+
+ /* check parent resource if requested or if resource must be created */
+ if (!resource->exists || parent_only) {
+ dav_resource *parent;
+
+ if ((err = (*resource->hooks->get_parent_resource)(resource,
+ &parent)) != NULL)
+ goto done;
+
+ if (parent == NULL || !parent->exists) {
+ err = dav_new_error(r->pool, HTTP_CONFLICT, 0,
+ apr_psprintf(r->pool,
+ "Missing one or more intermediate "
+ "collections. Cannot create resource %s.",
+ ap_escape_html(r->pool, resource->uri)));
+ goto done;
+ }
+
+ av_info->parent_resource = parent;
+
+ /* if parent versioned and not checked out, see if it can be */
+ if (parent->versioned && !parent->working) {
+ int checkout_parent;
+
+ if ((err = dav_can_auto_checkout(r, parent,
+ (*vsn_hooks->auto_versionable)(parent),
+ &lockdb, &checkout_parent))
+ != NULL) {
+ goto done;
+ }
+
+ if (!checkout_parent) {
+ err = dav_new_error(r->pool, HTTP_CONFLICT, 0,
+ "<DAV:cannot-modify-checked-in-parent>");
+ goto done;
+ }
+
+ /* Try to checkout the parent collection.
+ * Note that auto-versioning can only be applied to a version selector,
+ * so no separate working resource will be created.
+ */
+ if ((err = (*vsn_hooks->checkout)(parent, 1 /*auto_checkout*/,
+ 0, 0, 0, NULL, NULL))
+ != NULL)
+ {
+ err = dav_push_error(r->pool, HTTP_CONFLICT, 0,
+ apr_psprintf(r->pool,
+ "Unable to auto-checkout parent collection. "
+ "Cannot create resource %s.",
+ ap_escape_html(r->pool, resource->uri)),
+ err);
+ goto done;
+ }
+
+ /* remember that parent was checked out */
+ av_info->parent_checkedout = 1;
+ }
+ }
+
+ /* if only checking parent, we're done */
+ if (parent_only)
+ goto done;
+
+ /* if creating a new resource, see if it should be version-controlled */
+ if (!resource->exists
+ && (*vsn_hooks->auto_versionable)(resource) == DAV_AUTO_VERSION_ALWAYS) {
+
+ if ((err = (*vsn_hooks->vsn_control)(resource, NULL)) != NULL) {
+ err = dav_push_error(r->pool, HTTP_CONFLICT, 0,
+ apr_psprintf(r->pool,
+ "Unable to create versioned resource %s.",
+ ap_escape_html(r->pool, resource->uri)),
+ err);
+ goto done;
+ }
+
+ /* remember that resource was created */
+ av_info->resource_versioned = 1;
+ }
+
+ /* if resource is versioned, make sure it is checked out */
+ if (resource->versioned && !resource->working) {
+ int checkout_resource;
+
+ if ((err = dav_can_auto_checkout(r, resource,
+ (*vsn_hooks->auto_versionable)(resource),
+ &lockdb, &checkout_resource)) != NULL) {
+ goto done;
+ }
+
+ if (!checkout_resource) {
+ err = dav_new_error(r->pool, HTTP_CONFLICT, 0,
+ "<DAV:cannot-modify-version-controlled-content>");
+ goto done;
+ }
+
+ /* Auto-versioning can only be applied to version selectors, so
+ * no separate working resource will be created. */
+ if ((err = (*vsn_hooks->checkout)(resource, 1 /*auto_checkout*/,
+ 0, 0, 0, NULL, NULL))
+ != NULL)
+ {
+ err = dav_push_error(r->pool, HTTP_CONFLICT, 0,
+ apr_psprintf(r->pool,
+ "Unable to checkout resource %s.",
+ ap_escape_html(r->pool, resource->uri)),
+ err);
+ goto done;
+ }
+
+ /* remember that resource was checked out */
+ av_info->resource_checkedout = 1;
+ }
+
+done:
+
+ /* make sure lock database is closed */
+ if (lockdb != NULL)
+ (*lockdb->hooks->close_lockdb)(lockdb);
+
+ /* if an error occurred, undo any auto-versioning operations already done */
+ if (err != NULL) {
+ dav_auto_checkin(r, resource, 1 /*undo*/, 0 /*unlock*/, av_info);
+ return err;
+ }
+
+ return NULL;
+}
+
+/* see mod_dav.h for docco */
+DAV_DECLARE(dav_error *) dav_auto_checkin(
+ request_rec *r,
+ dav_resource *resource,
+ int undo,
+ int unlock,
+ dav_auto_version_info *av_info)
+{
+ const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r);
+ dav_error *err = NULL;
+ dav_auto_version auto_version;
+
+ /* If no versioning provider, this is a no-op */
+ if (vsn_hooks == NULL)
+ return NULL;
+
+ /* If undoing auto-checkouts, then do uncheckouts */
+ if (undo) {
+ if (resource != NULL) {
+ if (av_info->resource_checkedout) {
+ if ((err = (*vsn_hooks->uncheckout)(resource)) != NULL) {
+ return dav_push_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ apr_psprintf(r->pool,
+ "Unable to undo auto-checkout "
+ "of resource %s.",
+ ap_escape_html(r->pool, resource->uri)),
+ err);
+ }
+ }
+
+ if (av_info->resource_versioned) {
+ dav_response *response;
+
+ /* ### should we do anything with the response? */
+ if ((err = (*resource->hooks->remove_resource)(resource,
+ &response)) != NULL) {
+ return dav_push_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ apr_psprintf(r->pool,
+ "Unable to undo auto-version-control "
+ "of resource %s.",
+ ap_escape_html(r->pool, resource->uri)),
+ err);
+ }
+ }
+ }
+
+ if (av_info->parent_resource != NULL && av_info->parent_checkedout) {
+ if ((err = (*vsn_hooks->uncheckout)(av_info->parent_resource)) != NULL) {
+ return dav_push_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ apr_psprintf(r->pool,
+ "Unable to undo auto-checkout "
+ "of parent collection %s.",
+ ap_escape_html(r->pool, av_info->parent_resource->uri)),
+ err);
+ }
+ }
+
+ return NULL;
+ }
+
+ /* If the resource was checked out, and auto-checkin is enabled,
+ * then check it in.
+ */
+ if (resource != NULL && resource->working
+ && (unlock || av_info->resource_checkedout)) {
+
+ auto_version = (*vsn_hooks->auto_versionable)(resource);
+
+ if (auto_version == DAV_AUTO_VERSION_ALWAYS ||
+ (unlock && (auto_version == DAV_AUTO_VERSION_LOCKED))) {
+
+ if ((err = (*vsn_hooks->checkin)(resource,
+ 0 /*keep_checked_out*/, NULL))
+ != NULL) {
+ return dav_push_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ apr_psprintf(r->pool,
+ "Unable to auto-checkin resource %s.",
+ ap_escape_html(r->pool, resource->uri)),
+ err);
+ }
+ }
+ }
+
+ /* If parent resource was checked out, and auto-checkin is enabled,
+ * then check it in.
+ */
+ if (!unlock
+ && av_info->parent_checkedout
+ && av_info->parent_resource != NULL
+ && av_info->parent_resource->working) {
+
+ auto_version = (*vsn_hooks->auto_versionable)(av_info->parent_resource);
+
+ if (auto_version == DAV_AUTO_VERSION_ALWAYS) {
+ if ((err = (*vsn_hooks->checkin)(av_info->parent_resource,
+ 0 /*keep_checked_out*/, NULL))
+ != NULL) {
+ return dav_push_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ apr_psprintf(r->pool,
+ "Unable to auto-checkin parent collection %s.",
+ ap_escape_html(r->pool, av_info->parent_resource->uri)),
+ err);
+ }
+ }
+ }
+
+ return NULL;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/dav/main/util_lock.c b/rubbos/app/httpd-2.0.64/modules/dav/main/util_lock.c
new file mode 100644
index 00000000..cd39f067
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/dav/main/util_lock.c
@@ -0,0 +1,791 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+** DAV repository-independent lock functions
+*/
+
+#include "apr.h"
+#include "apr_strings.h"
+
+#if APR_HAVE_STDIO_H
+#include <stdio.h> /* for sprintf() */
+#endif
+
+#include "mod_dav.h"
+#include "http_log.h"
+#include "http_config.h"
+#include "http_protocol.h"
+#include "http_core.h"
+
+
+/* ---------------------------------------------------------------
+**
+** Property-related lock functions
+**
+*/
+
+/*
+** dav_lock_get_activelock: Returns a <lockdiscovery> containing
+** an activelock element for every item in the lock_discovery tree
+*/
+DAV_DECLARE(const char *) dav_lock_get_activelock(request_rec *r,
+ dav_lock *lock,
+ dav_buffer *pbuf)
+{
+ dav_lock *lock_scan;
+ const dav_hooks_locks *hooks = DAV_GET_HOOKS_LOCKS(r);
+ int count = 0;
+ dav_buffer work_buf = { 0 };
+ apr_pool_t *p = r->pool;
+
+ /* If no locks or no lock provider, there are no locks */
+ if (lock == NULL || hooks == NULL) {
+ /*
+ ** Since resourcediscovery is defined with (activelock)*,
+ ** <D:activelock/> shouldn't be necessary for an empty lock.
+ */
+ return "";
+ }
+
+ /*
+ ** Note: it could be interesting to sum the lengths of the owners
+ ** and locktokens during this loop. However, the buffer
+ ** mechanism provides some rough padding so that we don't
+ ** really need to have an exact size. Further, constructing
+ ** locktoken strings could be relatively expensive.
+ */
+ for (lock_scan = lock; lock_scan != NULL; lock_scan = lock_scan->next)
+ count++;
+
+ /* if a buffer was not provided, then use an internal buffer */
+ if (pbuf == NULL)
+ pbuf = &work_buf;
+
+ /* reset the length before we start appending stuff */
+ pbuf->cur_len = 0;
+
+ /* prep the buffer with a "good" size */
+ dav_check_bufsize(p, pbuf, count * 300);
+
+ for (; lock != NULL; lock = lock->next) {
+ char tmp[100];
+
+#if DAV_DEBUG
+ if (lock->rectype == DAV_LOCKREC_INDIRECT_PARTIAL) {
+ /* ### crap. design error */
+ dav_buffer_append(p, pbuf,
+ "DESIGN ERROR: attempted to product an "
+ "activelock element from a partial, indirect "
+ "lock record. Creating an XML parsing error "
+ "to ease detection of this situation: <");
+ }
+#endif
+
+ dav_buffer_append(p, pbuf, "<D:activelock>" DEBUG_CR "<D:locktype>");
+ switch (lock->type) {
+ case DAV_LOCKTYPE_WRITE:
+ dav_buffer_append(p, pbuf, "<D:write/>");
+ break;
+ default:
+ /* ### internal error. log something? */
+ break;
+ }
+ dav_buffer_append(p, pbuf, "</D:locktype>" DEBUG_CR "<D:lockscope>");
+ switch (lock->scope) {
+ case DAV_LOCKSCOPE_EXCLUSIVE:
+ dav_buffer_append(p, pbuf, "<D:exclusive/>");
+ break;
+ case DAV_LOCKSCOPE_SHARED:
+ dav_buffer_append(p, pbuf, "<D:shared/>");
+ break;
+ default:
+ /* ### internal error. log something? */
+ break;
+ }
+ dav_buffer_append(p, pbuf, "</D:lockscope>" DEBUG_CR);
+ sprintf(tmp, "<D:depth>%s</D:depth>" DEBUG_CR,
+ lock->depth == DAV_INFINITY ? "infinity" : "0");
+ dav_buffer_append(p, pbuf, tmp);
+
+ if (lock->owner) {
+ /*
+ ** This contains a complete, self-contained <DAV:owner> element,
+ ** with namespace declarations and xml:lang handling. Just drop
+ ** it in.
+ */
+ dav_buffer_append(p, pbuf, lock->owner);
+ }
+
+ dav_buffer_append(p, pbuf, "<D:timeout>");
+ if (lock->timeout == DAV_TIMEOUT_INFINITE) {
+ dav_buffer_append(p, pbuf, "Infinite");
+ }
+ else {
+ time_t now = time(NULL);
+ sprintf(tmp, "Second-%lu", (long unsigned int)(lock->timeout - now));
+ dav_buffer_append(p, pbuf, tmp);
+ }
+
+ dav_buffer_append(p, pbuf,
+ "</D:timeout>" DEBUG_CR
+ "<D:locktoken>" DEBUG_CR
+ "<D:href>");
+ dav_buffer_append(p, pbuf,
+ (*hooks->format_locktoken)(p, lock->locktoken));
+ dav_buffer_append(p, pbuf,
+ "</D:href>" DEBUG_CR
+ "</D:locktoken>" DEBUG_CR
+ "</D:activelock>" DEBUG_CR);
+ }
+
+ return pbuf->buf;
+}
+
+/*
+** dav_lock_parse_lockinfo: Validates the given xml_doc to contain a
+** lockinfo XML element, then populates a dav_lock structure
+** with its contents.
+*/
+DAV_DECLARE(dav_error *) dav_lock_parse_lockinfo(request_rec *r,
+ const dav_resource *resource,
+ dav_lockdb *lockdb,
+ const apr_xml_doc *doc,
+ dav_lock **lock_request)
+{
+ apr_pool_t *p = r->pool;
+ dav_error *err;
+ apr_xml_elem *child;
+ dav_lock *lock;
+
+ if (!dav_validate_root(doc, "lockinfo")) {
+ return dav_new_error(p, HTTP_BAD_REQUEST, 0,
+ "The request body contains an unexpected "
+ "XML root element.");
+ }
+
+ if ((err = (*lockdb->hooks->create_lock)(lockdb, resource,
+ &lock)) != NULL) {
+ return dav_push_error(p, err->status, 0,
+ "Could not parse the lockinfo due to an "
+ "internal problem creating a lock structure.",
+ err);
+ }
+
+ lock->depth = dav_get_depth(r, DAV_INFINITY);
+ if (lock->depth == -1) {
+ return dav_new_error(p, HTTP_BAD_REQUEST, 0,
+ "An invalid Depth header was specified.");
+ }
+ lock->timeout = dav_get_timeout(r);
+
+ /* Parse elements in the XML body */
+ for (child = doc->root->first_child; child; child = child->next) {
+ if (strcmp(child->name, "locktype") == 0
+ && child->first_child
+ && lock->type == DAV_LOCKTYPE_UNKNOWN) {
+ if (strcmp(child->first_child->name, "write") == 0) {
+ lock->type = DAV_LOCKTYPE_WRITE;
+ continue;
+ }
+ }
+ if (strcmp(child->name, "lockscope") == 0
+ && child->first_child
+ && lock->scope == DAV_LOCKSCOPE_UNKNOWN) {
+ if (strcmp(child->first_child->name, "exclusive") == 0)
+ lock->scope = DAV_LOCKSCOPE_EXCLUSIVE;
+ else if (strcmp(child->first_child->name, "shared") == 0)
+ lock->scope = DAV_LOCKSCOPE_SHARED;
+ if (lock->scope != DAV_LOCKSCOPE_UNKNOWN)
+ continue;
+ }
+
+ if (strcmp(child->name, "owner") == 0 && lock->owner == NULL) {
+ const char *text;
+
+ /* quote all the values in the <DAV:owner> element */
+ apr_xml_quote_elem(p, child);
+
+ /*
+ ** Store a full <DAV:owner> element with namespace definitions
+ ** and an xml:lang definition, if applicable.
+ */
+ apr_xml_to_text(p, child, APR_XML_X2T_FULL_NS_LANG, doc->namespaces,
+ NULL, &text, NULL);
+ lock->owner = text;
+
+ continue;
+ }
+
+ return dav_new_error(p, HTTP_PRECONDITION_FAILED, 0,
+ apr_psprintf(p,
+ "The server cannot satisfy the "
+ "LOCK request due to an unknown XML "
+ "element (\"%s\") within the "
+ "DAV:lockinfo element.",
+ child->name));
+ }
+
+ *lock_request = lock;
+ return NULL;
+}
+
+/* ---------------------------------------------------------------
+**
+** General lock functions
+**
+*/
+
+/* dav_lock_walker: Walker callback function to record indirect locks */
+static dav_error * dav_lock_walker(dav_walk_resource *wres, int calltype)
+{
+ dav_walker_ctx *ctx = wres->walk_ctx;
+ dav_error *err;
+
+ /* We don't want to set indirects on the target */
+ if ((*wres->resource->hooks->is_same_resource)(wres->resource,
+ ctx->w.root))
+ return NULL;
+
+ if ((err = (*ctx->w.lockdb->hooks->append_locks)(ctx->w.lockdb,
+ wres->resource, 1,
+ ctx->lock)) != NULL) {
+ if (ap_is_HTTP_SERVER_ERROR(err->status)) {
+ /* ### add a higher-level description? */
+ return err;
+ }
+
+ /* add to the multistatus response */
+ dav_add_response(wres, err->status, NULL);
+
+ /*
+ ** ### actually, this is probably wrong: we want to fail the whole
+ ** ### LOCK process if something goes bad. maybe the caller should
+ ** ### do a dav_unlock() (e.g. a rollback) if any errors occurred.
+ */
+ }
+
+ return NULL;
+}
+
+/*
+** dav_add_lock: Add a direct lock for resource, and indirect locks for
+** all children, bounded by depth.
+** ### assume request only contains one lock
+*/
+DAV_DECLARE(dav_error *) dav_add_lock(request_rec *r,
+ const dav_resource *resource,
+ dav_lockdb *lockdb, dav_lock *lock,
+ dav_response **response)
+{
+ dav_error *err;
+ int depth = lock->depth;
+
+ *response = NULL;
+
+ /* Requested lock can be:
+ * Depth: 0 for null resource, existing resource, or existing collection
+ * Depth: Inf for existing collection
+ */
+
+ /*
+ ** 2518 9.2 says to ignore depth if target is not a collection (it has
+ ** no internal children); pretend the client gave the correct depth.
+ */
+ if (!resource->collection) {
+ depth = 0;
+ }
+
+ /* In all cases, first add direct entry in lockdb */
+
+ /*
+ ** Append the new (direct) lock to the resource's existing locks.
+ **
+ ** Note: this also handles locknull resources
+ */
+ if ((err = (*lockdb->hooks->append_locks)(lockdb, resource, 0,
+ lock)) != NULL) {
+ /* ### maybe add a higher-level description */
+ return err;
+ }
+
+ if (depth > 0) {
+ /* Walk existing collection and set indirect locks */
+ dav_walker_ctx ctx = { { 0 } };
+ dav_response *multi_status;
+
+ ctx.w.walk_type = DAV_WALKTYPE_NORMAL | DAV_WALKTYPE_AUTH;
+ ctx.w.func = dav_lock_walker;
+ ctx.w.walk_ctx = &ctx;
+ ctx.w.pool = r->pool;
+ ctx.w.root = resource;
+ ctx.w.lockdb = lockdb;
+
+ ctx.r = r;
+ ctx.lock = lock;
+
+ err = (*resource->hooks->walk)(&ctx.w, DAV_INFINITY, &multi_status);
+ if (err != NULL) {
+ /* implies a 5xx status code occurred. screw the multistatus */
+ return err;
+ }
+
+ if (multi_status != NULL) {
+ /* manufacture a 207 error for the multistatus response */
+ *response = multi_status;
+ return dav_new_error(r->pool, HTTP_MULTI_STATUS, 0,
+ "Error(s) occurred on resources during the "
+ "addition of a depth lock.");
+ }
+ }
+
+ return NULL;
+}
+
+/*
+** dav_lock_query: Opens the lock database. Returns a linked list of
+** dav_lock structures for all direct locks on path.
+*/
+DAV_DECLARE(dav_error*) dav_lock_query(dav_lockdb *lockdb,
+ const dav_resource *resource,
+ dav_lock **locks)
+{
+ /* If no lock database, return empty result */
+ if (lockdb == NULL) {
+ *locks = NULL;
+ return NULL;
+ }
+
+ /* ### insert a higher-level description? */
+ return (*lockdb->hooks->get_locks)(lockdb, resource,
+ DAV_GETLOCKS_RESOLVED,
+ locks);
+}
+
+/* dav_unlock_walker: Walker callback function to remove indirect locks */
+static dav_error * dav_unlock_walker(dav_walk_resource *wres, int calltype)
+{
+ dav_walker_ctx *ctx = wres->walk_ctx;
+ dav_error *err;
+
+ /* Before removing the lock, do any auto-checkin required */
+ if (wres->resource->working) {
+ /* ### get rid of this typecast */
+ if ((err = dav_auto_checkin(ctx->r, (dav_resource *) wres->resource,
+ 0 /*undo*/, 1 /*unlock*/, NULL))
+ != NULL) {
+ return err;
+ }
+ }
+
+ if ((err = (*ctx->w.lockdb->hooks->remove_lock)(ctx->w.lockdb,
+ wres->resource,
+ ctx->locktoken)) != NULL) {
+ /* ### should we stop or return a multistatus? looks like STOP */
+ /* ### add a higher-level description? */
+ return err;
+ }
+
+ return NULL;
+}
+
+/*
+** dav_get_direct_resource:
+**
+** Find a lock on the specified resource, then return the resource the
+** lock was applied to (in other words, given a (possibly) indirect lock,
+** return the direct lock's corresponding resource).
+**
+** If the lock is an indirect lock, this usually means traversing up the
+** namespace [repository] hierarchy. Note that some lock providers may be
+** able to return this information with a traversal.
+*/
+static dav_error * dav_get_direct_resource(apr_pool_t *p,
+ dav_lockdb *lockdb,
+ const dav_locktoken *locktoken,
+ const dav_resource *resource,
+ const dav_resource **direct_resource)
+{
+ if (lockdb->hooks->lookup_resource != NULL) {
+ return (*lockdb->hooks->lookup_resource)(lockdb, locktoken,
+ resource, direct_resource);
+ }
+
+ *direct_resource = NULL;
+
+ /* Find the top of this lock-
+ * If r->filename's direct locks include locktoken, use r->filename.
+ * If r->filename's indirect locks include locktoken, retry r->filename/..
+ * Else fail.
+ */
+ while (resource != NULL) {
+ dav_error *err;
+ dav_lock *lock;
+ dav_resource *parent;
+
+ /*
+ ** Find the lock specified by <locktoken> on <resource>. If it is
+ ** an indirect lock, then partial results are okay. We're just
+ ** trying to find the thing and know whether it is a direct or
+ ** an indirect lock.
+ */
+ if ((err = (*lockdb->hooks->find_lock)(lockdb, resource, locktoken,
+ 1, &lock)) != NULL) {
+ /* ### add a higher-level desc? */
+ return err;
+ }
+
+ /* not found! that's an error. */
+ if (lock == NULL) {
+ return dav_new_error(p, HTTP_BAD_REQUEST, 0,
+ "The specified locktoken does not correspond "
+ "to an existing lock on this resource.");
+ }
+
+ if (lock->rectype == DAV_LOCKREC_DIRECT) {
+ /* we found the direct lock. return this resource. */
+
+ *direct_resource = resource;
+ return NULL;
+ }
+
+ /* the lock was indirect. move up a level in the URL namespace */
+ if ((err = (*resource->hooks->get_parent_resource)(resource,
+ &parent)) != NULL) {
+ /* ### add a higher-level desc? */
+ return err;
+ }
+ resource = parent;
+ }
+
+ return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "The lock database is corrupt. A direct lock could "
+ "not be found for the corresponding indirect lock "
+ "on this resource.");
+}
+
+/*
+** dav_unlock: Removes all direct and indirect locks for r->filename,
+** with given locktoken. If locktoken == null_locktoken, all locks
+** are removed. If r->filename represents an indirect lock,
+** we must unlock the appropriate direct lock.
+** Returns OK or appropriate HTTP_* response and logs any errors.
+**
+** ### We've already crawled the tree to ensure everything was locked
+** by us; there should be no need to incorporate a rollback.
+*/
+DAV_DECLARE(int) dav_unlock(request_rec *r, const dav_resource *resource,
+ const dav_locktoken *locktoken)
+{
+ int result;
+ dav_lockdb *lockdb;
+ const dav_resource *lock_resource = resource;
+ const dav_hooks_locks *hooks = DAV_GET_HOOKS_LOCKS(r);
+ const dav_hooks_repository *repos_hooks = resource->hooks;
+ dav_walker_ctx ctx = { { 0 } };
+ dav_response *multi_status;
+ dav_error *err;
+
+ /* If no locks provider, then there is nothing to unlock. */
+ if (hooks == NULL) {
+ return OK;
+ }
+
+ /* 2518 requires the entire lock to be removed if resource/locktoken
+ * point to an indirect lock. We need resource of the _direct_
+ * lock in order to walk down the tree and remove the locks. So,
+ * If locktoken != null_locktoken,
+ * Walk up the resource hierarchy until we see a direct lock.
+ * Or, we could get the direct lock's db/key, pick out the URL
+ * and do a subrequest. I think walking up is faster and will work
+ * all the time.
+ * Else
+ * Just start removing all locks at and below resource.
+ */
+
+ if ((err = (*hooks->open_lockdb)(r, 0, 1, &lockdb)) != NULL) {
+ /* ### return err! maybe add a higher-level desc */
+ /* ### map result to something nice; log an error */
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ if (locktoken != NULL
+ && (err = dav_get_direct_resource(r->pool, lockdb,
+ locktoken, resource,
+ &lock_resource)) != NULL) {
+ /* ### add a higher-level desc? */
+ /* ### should return err! */
+ return err->status;
+ }
+
+ /* At this point, lock_resource/locktoken refers to a direct lock (key), ie
+ * the root of a depth > 0 lock, or locktoken is null.
+ */
+ ctx.w.walk_type = DAV_WALKTYPE_NORMAL | DAV_WALKTYPE_LOCKNULL;
+ ctx.w.func = dav_unlock_walker;
+ ctx.w.walk_ctx = &ctx;
+ ctx.w.pool = r->pool;
+ ctx.w.root = lock_resource;
+ ctx.w.lockdb = lockdb;
+
+ ctx.r = r;
+ ctx.locktoken = locktoken;
+
+ err = (*repos_hooks->walk)(&ctx.w, DAV_INFINITY, &multi_status);
+
+ /* ### fix this! */
+ /* ### do something with multi_status */
+ result = err == NULL ? OK : err->status;
+
+ (*hooks->close_lockdb)(lockdb);
+
+ return result;
+}
+
+/* dav_inherit_walker: Walker callback function to inherit locks */
+static dav_error * dav_inherit_walker(dav_walk_resource *wres, int calltype)
+{
+ dav_walker_ctx *ctx = wres->walk_ctx;
+
+ if (ctx->skip_root
+ && (*wres->resource->hooks->is_same_resource)(wres->resource,
+ ctx->w.root)) {
+ return NULL;
+ }
+
+ /* ### maybe add a higher-level desc */
+ return (*ctx->w.lockdb->hooks->append_locks)(ctx->w.lockdb,
+ wres->resource, 1,
+ ctx->lock);
+}
+
+/*
+** dav_inherit_locks: When a resource or collection is added to a collection,
+** locks on the collection should be inherited to the resource/collection.
+** (MOVE, MKCOL, etc) Here we propagate any direct or indirect locks from
+** parent of resource to resource and below.
+*/
+static dav_error * dav_inherit_locks(request_rec *r, dav_lockdb *lockdb,
+ const dav_resource *resource,
+ int use_parent)
+{
+ dav_error *err;
+ const dav_resource *which_resource;
+ dav_lock *locks;
+ dav_lock *scan;
+ dav_lock *prev;
+ dav_walker_ctx ctx = { { 0 } };
+ const dav_hooks_repository *repos_hooks = resource->hooks;
+ dav_response *multi_status;
+
+ if (use_parent) {
+ dav_resource *parent;
+ if ((err = (*repos_hooks->get_parent_resource)(resource,
+ &parent)) != NULL) {
+ /* ### add a higher-level desc? */
+ return err;
+ }
+ if (parent == NULL) {
+ /* ### map result to something nice; log an error */
+ return dav_new_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+ "Could not fetch parent resource. Unable to "
+ "inherit locks from the parent and apply "
+ "them to this resource.");
+ }
+ which_resource = parent;
+ }
+ else {
+ which_resource = resource;
+ }
+
+ if ((err = (*lockdb->hooks->get_locks)(lockdb, which_resource,
+ DAV_GETLOCKS_PARTIAL,
+ &locks)) != NULL) {
+ /* ### maybe add a higher-level desc */
+ return err;
+ }
+
+ if (locks == NULL) {
+ /* No locks to propagate, just return */
+ return NULL;
+ }
+
+ /*
+ ** (1) Copy all indirect locks from our parent;
+ ** (2) Create indirect locks for the depth infinity, direct locks
+ ** in our parent.
+ **
+ ** The append_locks call in the walker callback will do the indirect
+ ** conversion, but we need to remove any direct locks that are NOT
+ ** depth "infinity".
+ */
+ for (scan = locks, prev = NULL;
+ scan != NULL;
+ prev = scan, scan = scan->next) {
+
+ if (scan->rectype == DAV_LOCKREC_DIRECT
+ && scan->depth != DAV_INFINITY) {
+
+ if (prev == NULL)
+ locks = scan->next;
+ else
+ prev->next = scan->next;
+ }
+ }
+
+ /* <locks> has all our new locks. Walk down and propagate them. */
+
+ ctx.w.walk_type = DAV_WALKTYPE_NORMAL | DAV_WALKTYPE_LOCKNULL;
+ ctx.w.func = dav_inherit_walker;
+ ctx.w.walk_ctx = &ctx;
+ ctx.w.pool = r->pool;
+ ctx.w.root = resource;
+ ctx.w.lockdb = lockdb;
+
+ ctx.r = r;
+ ctx.lock = locks;
+ ctx.skip_root = !use_parent;
+
+ /* ### do something with multi_status */
+ return (*repos_hooks->walk)(&ctx.w, DAV_INFINITY, &multi_status);
+}
+
+/* ---------------------------------------------------------------
+**
+** Functions dealing with lock-null resources
+**
+*/
+
+/*
+** dav_get_resource_state: Returns the state of the resource
+** r->filename: DAV_RESOURCE_NULL, DAV_RESOURCE_LOCK_NULL,
+** or DAV_RESOURCE_EXIST.
+**
+** Returns DAV_RESOURCE_ERROR if an error occurs.
+*/
+DAV_DECLARE(int) dav_get_resource_state(request_rec *r,
+ const dav_resource *resource)
+{
+ const dav_hooks_locks *hooks = DAV_GET_HOOKS_LOCKS(r);
+
+ if (resource->exists)
+ return DAV_RESOURCE_EXISTS;
+
+ if (hooks != NULL) {
+ dav_error *err;
+ dav_lockdb *lockdb;
+ int locks_present;
+
+ /*
+ ** A locknull resource has the form:
+ **
+ ** known-dir "/" locknull-file
+ **
+ ** It would be nice to look into <resource> to verify this form,
+ ** but it does not have enough information for us. Instead, we
+ ** can look at the path_info. If the form does not match, then
+ ** there is no way we could have a locknull resource -- it must
+ ** be a plain, null resource.
+ **
+ ** Apache sets r->filename to known-dir/unknown-file and r->path_info
+ ** to "" for the "proper" case. If anything is in path_info, then
+ ** it can't be a locknull resource.
+ **
+ ** ### I bet this path_info hack doesn't work for repositories.
+ ** ### Need input from repository implementors! What kind of
+ ** ### restructure do we need? New provider APIs?
+ */
+ if (r->path_info != NULL && *r->path_info != '\0') {
+ return DAV_RESOURCE_NULL;
+ }
+
+ if ((err = (*hooks->open_lockdb)(r, 1, 1, &lockdb)) == NULL) {
+ /* note that we might see some expired locks... *shrug* */
+ err = (*hooks->has_locks)(lockdb, resource, &locks_present);
+ (*hooks->close_lockdb)(lockdb);
+ }
+
+ if (err != NULL) {
+ /* ### don't log an error. return err. add higher-level desc. */
+
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Failed to query lock-null status for %s",
+ r->filename);
+
+ return DAV_RESOURCE_ERROR;
+ }
+
+ if (locks_present)
+ return DAV_RESOURCE_LOCK_NULL;
+ }
+
+ return DAV_RESOURCE_NULL;
+}
+
+DAV_DECLARE(dav_error *) dav_notify_created(request_rec *r,
+ dav_lockdb *lockdb,
+ const dav_resource *resource,
+ int resource_state,
+ int depth)
+{
+ dav_error *err;
+
+ if (resource_state == DAV_RESOURCE_LOCK_NULL) {
+
+ /*
+ ** The resource is no longer a locknull resource. This will remove
+ ** the special marker.
+ **
+ ** Note that a locknull resource has already inherited all of the
+ ** locks from the parent. We do not need to call dav_inherit_locks.
+ **
+ ** NOTE: some lock providers record locks for locknull resources using
+ ** a different key than for regular resources. this will shift
+ ** the lock information between the two key types.
+ */
+ (void)(*lockdb->hooks->remove_locknull_state)(lockdb, resource);
+
+ /*
+ ** There are resources under this one, which are new. We must
+ ** propagate the locks down to the new resources.
+ */
+ if (depth > 0 &&
+ (err = dav_inherit_locks(r, lockdb, resource, 0)) != NULL) {
+ /* ### add a higher level desc? */
+ return err;
+ }
+ }
+ else if (resource_state == DAV_RESOURCE_NULL) {
+
+ /* ### should pass depth to dav_inherit_locks so that it can
+ ** ### optimize for the depth==0 case.
+ */
+
+ /* this resource should inherit locks from its parent */
+ if ((err = dav_inherit_locks(r, lockdb, resource, 1)) != NULL) {
+
+ err = dav_push_error(r->pool, err->status, 0,
+ "The resource was created successfully, but "
+ "there was a problem inheriting locks from "
+ "the parent resource.",
+ err);
+ return err;
+ }
+ }
+ /* else the resource already exists and its locks are correct. */
+
+ return NULL;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/echo/.deps b/rubbos/app/httpd-2.0.64/modules/echo/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/echo/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/echo/.indent.pro b/rubbos/app/httpd-2.0.64/modules/echo/.indent.pro
new file mode 100644
index 00000000..a9fbe9f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/echo/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/rubbos/app/httpd-2.0.64/modules/echo/Makefile b/rubbos/app/httpd-2.0.64/modules/echo/Makefile
new file mode 100644
index 00000000..353de416
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/echo/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/echo
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/echo
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/echo
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/echo/Makefile.in b/rubbos/app/httpd-2.0.64/modules/echo/Makefile.in
new file mode 100644
index 00000000..167b343d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/echo/Makefile.in
@@ -0,0 +1,3 @@
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/echo/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/echo/NWGNUmakefile
new file mode 100644
index 00000000..a2eecb32
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/echo/NWGNUmakefile
@@ -0,0 +1,257 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = echo
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Echo Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Echo Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/echo.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_echo.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ echo_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/echo/config.m4 b/rubbos/app/httpd-2.0.64/modules/echo/config.m4
new file mode 100644
index 00000000..234a7d5e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/echo/config.m4
@@ -0,0 +1,11 @@
+dnl modules enabled in this directory by default
+
+dnl APACHE_MODULE(name, helptext[, objects[, structname[, default[, config]]]])
+
+APACHE_MODPATH_INIT(echo)
+
+APACHE_MODULE(echo, ECHO server, , , no)
+
+APR_ADDTO(LT_LDFLAGS,-export-dynamic)
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/echo/mod_echo.c b/rubbos/app/httpd-2.0.64/modules/echo/mod_echo.c
new file mode 100644
index 00000000..e9e6c33a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/echo/mod_echo.c
@@ -0,0 +1,102 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ap_config.h"
+#include "ap_mmn.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+
+#include "apr_buckets.h"
+#include "util_filter.h"
+
+module AP_MODULE_DECLARE_DATA echo_module;
+
+typedef struct {
+ int bEnabled;
+} EchoConfig;
+
+static void *create_echo_server_config(apr_pool_t *p, server_rec *s)
+{
+ EchoConfig *pConfig = apr_pcalloc(p, sizeof *pConfig);
+
+ pConfig->bEnabled = 0;
+
+ return pConfig;
+}
+
+static const char *echo_on(cmd_parms *cmd, void *dummy, int arg)
+{
+ EchoConfig *pConfig = ap_get_module_config(cmd->server->module_config,
+ &echo_module);
+ pConfig->bEnabled = arg;
+
+ return NULL;
+}
+
+static int process_echo_connection(conn_rec *c)
+{
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ apr_status_t rv;
+ EchoConfig *pConfig = ap_get_module_config(c->base_server->module_config,
+ &echo_module);
+
+ if (!pConfig->bEnabled) {
+ return DECLINED;
+ }
+
+ bb = apr_brigade_create(c->pool, c->bucket_alloc);
+
+ for ( ; ; ) {
+ /* Get a single line of input from the client */
+ if ((rv = ap_get_brigade(c->input_filters, bb, AP_MODE_GETLINE,
+ APR_BLOCK_READ, 0) != APR_SUCCESS ||
+ APR_BRIGADE_EMPTY(bb))) {
+ apr_brigade_destroy(bb);
+ break;
+ }
+
+ /* Make sure the data is flushed to the client */
+ b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ ap_pass_brigade(c->output_filters, bb);
+ }
+ return OK;
+}
+
+static const command_rec echo_cmds[] =
+{
+ AP_INIT_FLAG("ProtocolEcho", echo_on, NULL, RSRC_CONF,
+ "Run an echo server on this host"),
+ { NULL }
+};
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_process_connection(process_echo_connection, NULL, NULL,
+ APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA echo_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ create_echo_server_config, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ echo_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/echo/mod_echo.dsp b/rubbos/app/httpd-2.0.64/modules/echo/mod_echo.dsp
new file mode 100644
index 00000000..5f1543cf
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/echo/mod_echo.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_echo" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_echo - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_echo.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_echo.mak" CFG="mod_echo - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_echo - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_echo - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_echo - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_echo_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_echo.so" /base:@..\..\os\win32\BaseAddr.ref,mod_echo.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_echo.so" /base:@..\..\os\win32\BaseAddr.ref,mod_echo.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_echo - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_echo_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_echo.so" /base:@..\..\os\win32\BaseAddr.ref,mod_echo.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_echo.so" /base:@..\..\os\win32\BaseAddr.ref,mod_echo.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_echo - Win32 Release"
+# Name "mod_echo - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_echo.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_echo.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_echo - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_echo.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_echo.so "echo_module for Apache" ../../include/ap_release.h > .\mod_echo.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_echo - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_echo.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_echo.so "echo_module for Apache" ../../include/ap_release.h > .\mod_echo.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/echo/modules.mk b/rubbos/app/httpd-2.0.64/modules/echo/modules.mk
new file mode 100644
index 00000000..ceb52a1b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/echo/modules.mk
@@ -0,0 +1,3 @@
+DISTCLEAN_TARGETS = modules.mk
+static =
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/.deps b/rubbos/app/httpd-2.0.64/modules/experimental/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/.indent.pro b/rubbos/app/httpd-2.0.64/modules/experimental/.indent.pro
new file mode 100644
index 00000000..a9fbe9f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/Makefile b/rubbos/app/httpd-2.0.64/modules/experimental/Makefile
new file mode 100644
index 00000000..9d5e211d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/experimental
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/experimental
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/experimental
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/Makefile.in b/rubbos/app/httpd-2.0.64/modules/experimental/Makefile.in
new file mode 100644
index 00000000..7c5c149d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/Makefile.in
@@ -0,0 +1,3 @@
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUauthldap b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUauthldap
new file mode 100644
index 00000000..4963a1dd
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUauthldap
@@ -0,0 +1,262 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(LDAPSDK)/inc \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = authldap
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) LDAP Authentication Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = AuthLDAP Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/authldap.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_auth_ldap.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ lldapsdk \
+ lldapssl \
+ lldapx \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ util_ldap_connection_find \
+ util_ldap_connection_close \
+ util_ldap_connection_unbind \
+ util_ldap_connection_cleanup \
+ util_ldap_cache_checkuserid \
+ util_ldap_cache_compare \
+ util_ldap_cache_comparedn \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ @$(LDAPSDK)/imports/lldapsdk.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ auth_ldap_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+ copy charset.conv $(INSTALL)\Apache2\conf\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUcharsetl b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUcharsetl
new file mode 100644
index 00000000..b4e4f595
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUcharsetl
@@ -0,0 +1,257 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ -DAP_WANT_DIR_TRANSLATION \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = charsetl
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Charset Lite Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = charsetl
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/charsetl.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_charset_lite.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ charset_lite_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUdsk_cach b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUdsk_cach
new file mode 100644
index 00000000..879dd86e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUdsk_cach
@@ -0,0 +1,261 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = dsk_cach
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Memory Cache Sub-Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = dsk_cach
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/dsk_cach.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_disk_cache.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Apache2 \
+ Libc \
+ mod_cach \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @$(APR)/aprlib.imp \
+ @httpd.imp \
+ @mod_cache.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ disk_cache_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUexample b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUexample
new file mode 100644
index 00000000..01b7b85e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUexample
@@ -0,0 +1,256 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = example
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Example Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Example Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/example.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_example.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ example_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmakefile
new file mode 100644
index 00000000..d6584514
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmakefile
@@ -0,0 +1,256 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME =
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION =
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME =
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE =
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/charsetl.nlm \
+ $(OBJDIR)/example.nlm \
+ $(OBJDIR)/moddumpio.nlm \
+ $(OBJDIR)/mod_cach.nlm \
+ $(OBJDIR)/mem_cach.nlm \
+ $(OBJDIR)/dsk_cach.nlm \
+ $(EOLIST)
+
+# If LDAPSDK has been defined then build the auth_ldap module
+ifneq "$(LDAPSDK)" ""
+TARGET_nlm += $(OBJDIR)/authldap.nlm \
+ $(OBJDIR)/utilldap.nlm \
+ $(EOLIST)
+endif
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmem_cach b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmem_cach
new file mode 100644
index 00000000..236d867a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmem_cach
@@ -0,0 +1,265 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ -DDEBUG \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = mem_cach
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Memory Cache Sub-Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = mem_cach
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/mem_cach.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_mem_cache.o \
+ $(OBJDIR)/cache_hash.o \
+ $(OBJDIR)/cache_pqueue.o \
+ $(OBJDIR)/cache_cache.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Apache2 \
+ Libc \
+ mod_cach \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @$(APR)/aprlib.imp \
+ @httpd.imp \
+ @mod_cache.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ mem_cache_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmod_cach b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmod_cach
new file mode 100644
index 00000000..3665b764
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmod_cach
@@ -0,0 +1,264 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ -DDEBUG \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = mod_cach
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Cache module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = mod_cach
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/mod_cach.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/cache_util.o \
+ $(OBJDIR)/cache_storage.o \
+ $(OBJDIR)/mod_cache.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Apache2 \
+ Libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @$(APR)/aprlib.imp \
+ @httpd.imp \
+ @netware.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ @mod_cache.imp \
+ cache_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmoddumpio b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmoddumpio
new file mode 100644
index 00000000..53c74bb5
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUmoddumpio
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = moddumpio
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Debugging IO Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = DumpIO Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/moddumpio.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_dumpio.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ dumpio_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUutilldap b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUutilldap
new file mode 100644
index 00000000..376325a2
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/NWGNUutilldap
@@ -0,0 +1,266 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(LDAPSDK)/inc \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = utilldap
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) LDAP Authentication Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = UtilLDAP Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/utilldap.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/util_ldap.o \
+ $(OBJDIR)/util_ldap_cache.o \
+ $(OBJDIR)/util_ldap_cache_mgr.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ lldapsdk \
+ lldapssl \
+ lldapx \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ @$(LDAPSDK)/imports/lldapsdk.imp \
+ @$(LDAPSDK)/imports/lldapssl.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ ldap_module \
+ util_ldap_connection_find \
+ util_ldap_connection_close \
+ util_ldap_connection_unbind \
+ util_ldap_connection_cleanup \
+ util_ldap_cache_checkuserid \
+ util_ldap_cache_getuserdn \
+ util_ldap_cache_compare \
+ util_ldap_cache_comparedn \
+ util_ldap_ssl_supported \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/README b/rubbos/app/httpd-2.0.64/modules/experimental/README
new file mode 100644
index 00000000..447c16ee
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/README
@@ -0,0 +1,41 @@
+README for Apache 2.0 Example Module
+[April, 1997, updated May 2000]
+
+The files in the src/modules/example directory under the Apache
+distribution directory tree are provided as an example to those that
+wish to write modules that use the Apache API.
+
+The main file is mod_example.c, which illustrates all the different
+callback mechanisms and call syntaces. By no means does an add-on
+module need to include routines for all of the callbacks - quite the
+contrary!
+
+The example module is an actual working module. If you link it into
+your server, enable the "example-handler" handler for a location, and then
+browse to that location, you will see a display of some of the tracing
+the example module did as the various callbacks were made.
+
+To include the example module in your server add --enable-example to the
+other ./configure arguments executed from the httpd-2.0 directory. After
+that run 'make'.
+
+To add another module of your own:
+
+ A. cp modules/experimental/mod_example.c modules/experimental/mod_myexample.c
+ B. Modify the file
+ C. Build the server with --enable--myexample
+
+To activate the example module, include a block similar to the
+following in your httpd.conf file:
+
+ <Location /example-info>
+ SetHandler example-handler
+ </Location>
+
+As an alternative, you can put the following into a .htaccess file and
+then request the file "test.example" from that location:
+
+ AddHandler example-handler .example
+
+After reloading/restarting your server, you should be able to browse
+to this location and see the brief display mentioned earlier.
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/README.ldap b/rubbos/app/httpd-2.0.64/modules/experimental/README.ldap
new file mode 100644
index 00000000..c9445b81
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/README.ldap
@@ -0,0 +1,47 @@
+Quick installation instructions (UNIX):
+
+- Building on generic Unix:
+
+ Add generic ldap support and the TWO ldap modules to the build, like this:
+
+ ./configure --with-ldap --enable-ldap --enable-auth-ldap
+
+ The --with-ldap switches on LDAP library linking in apr-util. Make
+ sure that you have an LDAP client library available such as those
+ from Netscape/iPlanet/Sun One or the OpenLDAP project.
+
+ The --enable-ldap option switches on the LDAP caching module. This
+ module is a support module for other LDAP modules, and is not useful
+ on its own. This module is required, but caching can be disabled
+ via the configuration directive LDAPCacheEntries.
+
+ The --enable-auth-ldap option switches on the LDAP authentication
+ module.
+
+- Building on AIX:
+
+ The following ./configure line is reported to work for AIX:
+
+ CC=cc_r; export CC
+ CPPFLAGS=-qcpluscmt;export CPPFLAGS
+ ./configure --with-mpm=worker --prefix=/usr/local/apache \
+ --enable-dav=static --enable-dav_fs=static --enable-ssl=static
+ --with-ldap=yes --with-ldap-include=/usr/local/include
+ --with-ldap-lib=/usr/local/lib --enable-ldap=static
+ --enable-auth_ldap=static
+
+
+Quick installation instructions (win32):
+
+1. copy the file srclib\apr-util\include\apr_ldap.hw to apr_ldap.h
+2. the netscape/iplanet ldap libraries are installed in srclib\ldap
+3. Compile the two modules util_ldap and mod_auth_ldap using the dsp files
+4. You get a mod_auth_ldap.so and a util_ldap.so module
+5. Put them in the modules directory, don't forget to copy the
+ nsldap32v50.dll somewhere where apache.exe will find it
+6. Load the two modules in your httpd.conf, like below:
+ LoadModule ldap_module modules/util_ldap.so
+ LoadModule auth_ldap_module modules/mod_auth_ldap.so
+7. Configure the directories as described in the docus.
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.c b/rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.c
new file mode 100644
index 00000000..6db98f71
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.c
@@ -0,0 +1,171 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_general.h"
+
+#include "mod_cache.h"
+#include "cache_hash.h"
+#include "cache_pqueue.h"
+#include "cache_cache.h"
+
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#if APR_HAVE_STRING_H
+#include <string.h>
+#endif
+
+struct cache_cache_t {
+ int max_entries;
+ apr_size_t max_size;
+ apr_size_t current_size;
+ int total_purges;
+ long queue_clock;
+ cache_hash_t *ht;
+ cache_pqueue_t *pq;
+ cache_pqueue_set_priority set_pri;
+ cache_pqueue_get_priority get_pri;
+ cache_cache_inc_frequency *inc_entry;
+ cache_cache_get_size *size_entry;
+ cache_cache_get_key *key_entry;
+ cache_cache_free *free_entry;
+};
+
+CACHE_DECLARE(cache_cache_t *)cache_init(int max_entries,
+ apr_size_t max_size,
+ cache_pqueue_get_priority get_pri,
+ cache_pqueue_set_priority set_pri,
+ cache_pqueue_getpos get_pos,
+ cache_pqueue_setpos set_pos,
+ cache_cache_inc_frequency *inc_entry,
+ cache_cache_get_size *size_entry,
+ cache_cache_get_key* key_entry,
+ cache_cache_free *free_entry)
+{
+ cache_cache_t *tmp;
+ tmp = malloc(sizeof(cache_cache_t));
+ tmp->max_entries = max_entries;
+ tmp->max_size = max_size;
+ tmp->current_size = 0;
+ tmp->total_purges = 0;
+ tmp->queue_clock = 0;
+ tmp->get_pri = get_pri;
+ tmp->set_pri = set_pri;
+ tmp->inc_entry = inc_entry;
+ tmp->size_entry = size_entry;
+ tmp->key_entry = key_entry;
+ tmp->free_entry = free_entry;
+
+ tmp->ht = cache_hash_make(max_entries);
+ tmp->pq = cache_pq_init(max_entries, get_pri, get_pos, set_pos);
+
+ return tmp;
+}
+
+CACHE_DECLARE(void) cache_free(cache_cache_t *c)
+{
+ cache_pq_free(c->pq);
+ cache_hash_free(c->ht);
+ free(c);
+}
+
+
+CACHE_DECLARE(void*) cache_find(cache_cache_t* c, const char *key)
+{
+ void *e;
+
+ e = cache_hash_get(c->ht, key, CACHE_HASH_KEY_STRING);
+ if (!e)
+ return NULL;
+
+ return e;
+}
+
+CACHE_DECLARE(void) cache_update(cache_cache_t* c, void *entry)
+{
+ long old_priority;
+ long new_priority;
+
+ old_priority = c->set_pri(c->queue_clock, entry);
+ c->inc_entry(entry);
+ new_priority = c->set_pri(c->queue_clock, entry);
+ cache_pq_change_priority(c->pq, old_priority, new_priority, entry);
+}
+
+CACHE_DECLARE(void) cache_insert(cache_cache_t* c, void *entry)
+{
+ void *ejected = NULL;
+ long priority;
+
+ c->set_pri(c->queue_clock, entry);
+ /* FIX: check if priority of bottom item is greater than inserted one */
+ while ((cache_pq_size(c->pq) >= c->max_entries) ||
+ ((c->current_size + c->size_entry(entry)) > c->max_size)) {
+
+ ejected = cache_pq_pop(c->pq);
+ /* FIX: If ejected is NULL, we'll segfault here */
+ priority = c->get_pri(ejected);
+
+ if (c->queue_clock > priority)
+ c->queue_clock = priority;
+
+ cache_hash_set(c->ht,
+ c->key_entry(ejected),
+ CACHE_HASH_KEY_STRING,
+ NULL);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, "Cache Purge of %s",c->key_entry(ejected));
+ c->current_size -= c->size_entry(ejected);
+ c->free_entry(ejected);
+ c->total_purges++;
+ }
+ c->current_size += c->size_entry(entry);
+
+ cache_pq_insert(c->pq, entry);
+ cache_hash_set(c->ht, c->key_entry(entry), CACHE_HASH_KEY_STRING, entry);
+}
+
+CACHE_DECLARE(void *) cache_pop(cache_cache_t *c)
+{
+ void *entry;
+
+ if (!c)
+ return NULL;
+
+ entry = cache_pq_pop(c->pq);
+
+ if (!entry)
+ return NULL;
+
+ c->current_size -= c->size_entry(entry);
+ cache_hash_set(c->ht, c->key_entry(entry), CACHE_HASH_KEY_STRING, NULL);
+
+ return entry;
+}
+
+CACHE_DECLARE(apr_status_t) cache_remove(cache_cache_t *c, void *entry)
+{
+ apr_size_t entry_size = c->size_entry(entry);
+ apr_status_t rc;
+ rc = cache_pq_remove(c->pq, entry);
+ if (rc != APR_SUCCESS)
+ return rc;
+
+ cache_hash_set(c->ht, c->key_entry(entry), CACHE_HASH_KEY_STRING, NULL);
+ c->current_size -= entry_size;
+
+ return APR_SUCCESS;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.h b/rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.h
new file mode 100644
index 00000000..67189c5f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_cache.h
@@ -0,0 +1,112 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CACHE_CACHE_H
+#define CACHE_CACHE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mod_cache.h"
+
+/**
+ * @file cache_hash.h
+ * @brief Cache Cache Functions
+ */
+
+/**
+ * @defgroup Cache_cache Cache Functions
+ * @ingroup CACHE
+ * @{
+ */
+/** ADT for the cache */
+typedef struct cache_cache_t cache_cache_t;
+
+/** callback to increment the frequency of a item */
+typedef void cache_cache_inc_frequency(void*a);
+/** callback to get the size of a item */
+typedef apr_size_t cache_cache_get_size(void*a);
+/** callback to get the key of a item */
+typedef const char* cache_cache_get_key(void *a);
+/** callback to free an entry */
+typedef void cache_cache_free(void *a);
+
+/**
+ * initialize the cache ADT
+ * @param max_entries the number of entries in the cache
+ * @param max_size the size of the cache
+ * @param get_pri callback to get a priority of a entry
+ * @param set_pri callback to set a priority of a entry
+ * @param get_pos callback to get the position of a entry in the cache
+ * @param set_pos callback to set the position of a entry in the cache
+ * @param inc_entry callback to increment the frequency of a entry
+ * @param size_entry callback to get the size of a entry
+ * @param key_entry callback to get the key of a entry
+ * @param free_entry callback to free an entry
+ */
+CACHE_DECLARE(cache_cache_t *)cache_init(int max_entries,
+ apr_size_t max_size,
+ cache_pqueue_get_priority get_pri,
+ cache_pqueue_set_priority set_pri,
+ cache_pqueue_getpos get_pos,
+ cache_pqueue_setpos set_pos,
+ cache_cache_inc_frequency *inc_entry,
+ cache_cache_get_size *size_entry,
+ cache_cache_get_key *key_entry,
+ cache_cache_free *free_entry);
+
+/**
+ * free up the cache
+ * @param c the cache
+ */
+CACHE_DECLARE(void) cache_free(cache_cache_t *c);
+/**
+ * find a entry in the cache, incrementing the frequency if found
+ * @param c the cache
+ * @param key the key
+ */
+CACHE_DECLARE(void*) cache_find(cache_cache_t* c, const char *key);
+/**
+ * insert a entry into the cache
+ * @param c the cache
+ * @param entry the entry
+ */
+CACHE_DECLARE(void) cache_update(cache_cache_t* c, void *entry);
+/**
+ * insert a entry into the cache
+ * @param c the cache
+ * @param entry the entry
+ */
+CACHE_DECLARE(void) cache_insert(cache_cache_t* c, void *entry);
+/**
+ * pop the lowest priority item off
+ * @param c the cache
+ * @returns the entry or NULL
+ */
+CACHE_DECLARE(void *)cache_pop(cache_cache_t* c);
+/**
+ * remove an item from the cache
+ * @param c the cache
+ * @param entry the actual entry (from a find)
+ */
+CACHE_DECLARE(apr_status_t) cache_remove(cache_cache_t* c, void *entry);
+/** @} */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !CACHE_CACHE_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.c b/rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.c
new file mode 100644
index 00000000..89552a18
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.c
@@ -0,0 +1,290 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_general.h"
+
+#include "mod_cache.h"
+#include "cache_hash.h"
+
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#if APR_HAVE_STRING_H
+#include <string.h>
+#endif
+
+
+/*
+ * The internal form of a hash table.
+ *
+ * The table is an array indexed by the hash of the key; collisions
+ * are resolved by hanging a linked list of hash entries off each
+ * element of the array. Although this is a really simple design it
+ * isn't too bad given that pools have a low allocation overhead.
+ */
+
+typedef struct cache_hash_entry_t cache_hash_entry_t;
+
+struct cache_hash_entry_t {
+ cache_hash_entry_t *next;
+ unsigned int hash;
+ const void *key;
+ apr_ssize_t klen;
+ const void *val;
+};
+
+/*
+ * Data structure for iterating through a hash table.
+ *
+ * We keep a pointer to the next hash entry here to allow the current
+ * hash entry to be freed or otherwise mangled between calls to
+ * cache_hash_next().
+ */
+struct cache_hash_index_t {
+ cache_hash_t *ht;
+ cache_hash_entry_t *this, *next;
+ int index;
+};
+
+/*
+ * The size of the array is always a power of two. We use the maximum
+ * index rather than the size so that we can use bitwise-AND for
+ * modular arithmetic.
+ * The count of hash entries may be greater depending on the chosen
+ * collision rate.
+ */
+struct cache_hash_t {
+ cache_hash_entry_t **array;
+ cache_hash_index_t iterator; /* For cache_hash_first(NULL, ...) */
+ int count, max;
+};
+
+/*
+ * Hash creation functions.
+ */
+static cache_hash_entry_t **alloc_array(cache_hash_t *ht, int max)
+{
+ return calloc(1, sizeof(*ht->array) * (max + 1));
+}
+
+CACHE_DECLARE(cache_hash_t *) cache_hash_make(apr_size_t size)
+{
+ cache_hash_t *ht;
+ ht = malloc(sizeof(cache_hash_t));
+ if (!ht) {
+ return NULL;
+ }
+ ht->count = 0;
+ ht->max = size;
+ ht->array = alloc_array(ht, ht->max);
+ if (!ht->array) {
+ free(ht);
+ return NULL;
+ }
+ return ht;
+}
+
+CACHE_DECLARE(void) cache_hash_free(cache_hash_t *ht)
+{
+ if (ht) {
+ if (ht->array) {
+ free (ht->array);
+ }
+ free (ht);
+ }
+}
+/*
+ * Hash iteration functions.
+ */
+
+CACHE_DECLARE(cache_hash_index_t *) cache_hash_next(cache_hash_index_t *hi)
+{
+ hi->this = hi->next;
+ while (!hi->this) {
+ if (hi->index > hi->ht->max)
+ return NULL;
+ hi->this = hi->ht->array[hi->index++];
+ }
+ hi->next = hi->this->next;
+ return hi;
+}
+
+CACHE_DECLARE(cache_hash_index_t *) cache_hash_first(cache_hash_t *ht)
+{
+ cache_hash_index_t *hi;
+
+ hi = &ht->iterator;
+ hi->ht = ht;
+ hi->index = 0;
+ hi->this = NULL;
+ hi->next = NULL;
+ return cache_hash_next(hi);
+}
+
+CACHE_DECLARE(void) cache_hash_this(cache_hash_index_t *hi,
+ const void **key,
+ apr_ssize_t *klen,
+ void **val)
+{
+ if (key) *key = hi->this->key;
+ if (klen) *klen = hi->this->klen;
+ if (val) *val = (void *)hi->this->val;
+}
+
+
+/*
+ * This is where we keep the details of the hash function and control
+ * the maximum collision rate.
+ *
+ * If val is non-NULL it creates and initializes a new hash entry if
+ * there isn't already one there; it returns an updatable pointer so
+ * that hash entries can be removed.
+ */
+
+static cache_hash_entry_t **find_entry(cache_hash_t *ht,
+ const void *key,
+ apr_ssize_t klen,
+ const void *val)
+{
+ cache_hash_entry_t **hep, *he;
+ const unsigned char *p;
+ unsigned int hash;
+ apr_ssize_t i;
+
+ /*
+ * This is the popular `times 33' hash algorithm which is used by
+ * perl and also appears in Berkeley DB. This is one of the best
+ * known hash functions for strings because it is both computed
+ * very fast and distributes very well.
+ *
+ * The originator may be Dan Bernstein but the code in Berkeley DB
+ * cites Chris Torek as the source. The best citation I have found
+ * is "Chris Torek, Hash function for text in C, Usenet message
+ * <27038@mimsy.umd.edu> in comp.lang.c , October, 1990." in Rich
+ * Salz's USENIX 1992 paper about INN which can be found at
+ * <http://citeseer.nj.nec.com/salz92internetnews.html>.
+ *
+ * The magic of number 33, i.e. why it works better than many other
+ * constants, prime or not, has never been adequately explained by
+ * anyone. So I try an explanation: if one experimentally tests all
+ * multipliers between 1 and 256 (as I did while writing a low-level
+ * data structure library some time ago) one detects that even
+ * numbers are not useable at all. The remaining 128 odd numbers
+ * (except for the number 1) work more or less all equally well.
+ * They all distribute in an acceptable way and this way fill a hash
+ * table with an average percent of approx. 86%.
+ *
+ * If one compares the chi^2 values of the variants (see
+ * Bob Jenkins ``Hashing Frequently Asked Questions'' at
+ * http://burtleburtle.net/bob/hash/hashfaq.html for a description
+ * of chi^2), the number 33 not even has the best value. But the
+ * number 33 and a few other equally good numbers like 17, 31, 63,
+ * 127 and 129 have nevertheless a great advantage to the remaining
+ * numbers in the large set of possible multipliers: their multiply
+ * operation can be replaced by a faster operation based on just one
+ * shift plus either a single addition or subtraction operation. And
+ * because a hash function has to both distribute good _and_ has to
+ * be very fast to compute, those few numbers should be preferred.
+ *
+ * -- Ralf S. Engelschall <rse@engelschall.com>
+ */
+ hash = 0;
+ if (klen == CACHE_HASH_KEY_STRING) {
+ for (p = key; *p; p++) {
+ hash = hash * 33 + *p;
+ }
+ klen = p - (const unsigned char *)key;
+ }
+ else {
+ for (p = key, i = klen; i; i--, p++) {
+ hash = hash * 33 + *p;
+ }
+ }
+
+ /* scan linked list */
+ for (hep = &ht->array[hash % ht->max], he = *hep;
+ he;
+ hep = &he->next, he = *hep) {
+ if (he->hash == hash &&
+ he->klen == klen &&
+ memcmp(he->key, key, klen) == 0)
+ break;
+ }
+ if (he || !val)
+ return hep;
+ /* add a new entry for non-NULL values */
+ he = malloc(sizeof(*he));
+ if (!he) {
+ return NULL;
+ }
+ he->next = NULL;
+ he->hash = hash;
+ he->key = key;
+ he->klen = klen;
+ he->val = val;
+ *hep = he;
+ ht->count++;
+ return hep;
+}
+
+CACHE_DECLARE(void *) cache_hash_get(cache_hash_t *ht,
+ const void *key,
+ apr_ssize_t klen)
+{
+ cache_hash_entry_t *he;
+ he = *find_entry(ht, key, klen, NULL);
+ if (he)
+ return (void *)he->val;
+ else
+ return NULL;
+}
+
+CACHE_DECLARE(void *) cache_hash_set(cache_hash_t *ht,
+ const void *key,
+ apr_ssize_t klen,
+ const void *val)
+{
+ cache_hash_entry_t **hep, *tmp;
+ const void *tval;
+ hep = find_entry(ht, key, klen, val);
+ /* If hep == NULL, then the malloc() in find_entry failed */
+ if (hep && *hep) {
+ if (!val) {
+ /* delete entry */
+ tval = (*hep)->val;
+ tmp = *hep;
+ *hep = (*hep)->next;
+ free(tmp);
+ --ht->count;
+ }
+ else {
+ /* replace entry */
+ tval = (*hep)->val;
+ (*hep)->val = val;
+ }
+ /* Return the object just removed from the cache to let the
+ * caller clean it up. Cast the constness away upon return.
+ */
+ return (void *) tval;
+ }
+ /* else key not present and val==NULL */
+ return NULL;
+}
+
+CACHE_DECLARE(int) cache_hash_count(cache_hash_t *ht)
+{
+ return ht->count;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.h b/rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.h
new file mode 100644
index 00000000..ee3d8d12
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_hash.h
@@ -0,0 +1,161 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CACHE_HASH_H
+#define CACHE_HASH_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mod_cache.h"
+
+/**
+ * @file cache_hash.h
+ * @brief Cache Hash Tables
+ */
+
+/**
+ * @defgroup Cache_Hash Hash Tables
+ * @ingroup CACHE
+ * @{
+ */
+
+/**
+ * When passing a key to cache_hash_set or cache_hash_get, this value can be
+ * passed to indicate a string-valued key, and have cache_hash compute the
+ * length automatically.
+ *
+ * @remark cache_hash will use strlen(key) for the length. The null-terminator
+ * is not included in the hash value (why throw a constant in?).
+ * Since the hash table merely references the provided key (rather
+ * than copying it), cache_hash_this() will return the null-term'd key.
+ */
+#define CACHE_HASH_KEY_STRING (-1)
+
+/**
+ * Abstract type for hash tables.
+ */
+typedef struct cache_hash_t cache_hash_t;
+
+/**
+ * Abstract type for scanning hash tables.
+ */
+typedef struct cache_hash_index_t cache_hash_index_t;
+
+/**
+ * Create a hash table.
+ * @param size
+ * @return The hash table just created
+ */
+CACHE_DECLARE(cache_hash_t *) cache_hash_make(apr_size_t size);
+
+/**
+ * Create a hash table.
+ * @param *ht Pointer to the hash table to be freed.
+ * @return void
+ * @remark The caller should ensure that all objects have been removed
+ * from the cache prior to calling cache_hash_free(). Objects
+ * not removed from the cache prior to calling cache_hash_free()
+ * will be unaccessable.
+ */
+CACHE_DECLARE(void) cache_hash_free(cache_hash_t *ht);
+
+
+/**
+ * Associate a value with a key in a hash table.
+ * @param ht The hash table
+ * @param key Pointer to the key
+ * @param klen Length of the key. Can be CACHE_HASH_KEY_STRING to use the string length.
+ * @param val Value to associate with the key
+ * @remark If the value is NULL the hash entry is deleted.
+ * @return The value of the deleted cache entry (so the caller can clean it up).
+ */
+CACHE_DECLARE(void *) cache_hash_set(cache_hash_t *ht, const void *key,
+ apr_ssize_t klen, const void *val);
+
+/**
+ * Look up the value associated with a key in a hash table.
+ * @param ht The hash table
+ * @param key Pointer to the key
+ * @param klen Length of the key. Can be CACHE_HASH_KEY_STRING to use the string length.
+ * @return Returns NULL if the key is not present.
+ */
+CACHE_DECLARE(void *) cache_hash_get(cache_hash_t *ht, const void *key,
+ apr_ssize_t klen);
+
+/**
+ * Start iterating over the entries in a hash table.
+ * @param ht The hash table
+ * @example
+ */
+/**
+ * <PRE>
+ *
+ * int sum_values(cache_hash_t *ht)
+ * {
+ * cache_hash_index_t *hi;
+ * void *val;
+ * int sum = 0;
+ * for (hi = cache_hash_first(ht); hi; hi = cache_hash_next(hi)) {
+ * cache_hash_this(hi, NULL, NULL, &val);
+ * sum += *(int *)val;
+ * }
+ * return sum;
+ * }
+ *
+ * There is no restriction on adding or deleting hash entries during an
+ * iteration (although the results may be unpredictable unless all you do
+ * is delete the current entry) and multiple iterations can be in
+ * progress at the same time.
+ * </PRE>
+ */
+CACHE_DECLARE(cache_hash_index_t *) cache_hash_first(cache_hash_t *ht);
+
+/**
+ * Continue iterating over the entries in a hash table.
+ * @param hi The iteration state
+ * @return a pointer to the updated iteration state. NULL if there are no more
+ * entries.
+ */
+CACHE_DECLARE(cache_hash_index_t *) cache_hash_next(cache_hash_index_t *hi);
+
+/**
+ * Get the current entry's details from the iteration state.
+ * @param hi The iteration state
+ * @param key Return pointer for the pointer to the key.
+ * @param klen Return pointer for the key length.
+ * @param val Return pointer for the associated value.
+ * @remark The return pointers should point to a variable that will be set to the
+ * corresponding data, or they may be NULL if the data isn't interesting.
+ */
+CACHE_DECLARE(void) cache_hash_this(cache_hash_index_t *hi, const void **key,
+ apr_ssize_t *klen, void **val);
+
+/**
+ * Get the number of key/value pairs in the hash table.
+ * @param ht The hash table
+ * @return The number of key/value pairs in the hash table.
+ */
+CACHE_DECLARE(int) cache_hash_count(cache_hash_t *ht);
+
+
+/** @} */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !CACHE_HASH_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.c b/rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.c
new file mode 100644
index 00000000..580b47e7
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.c
@@ -0,0 +1,290 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_general.h"
+
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#if APR_HAVE_STDIO_H
+#include <stdio.h>
+#endif
+
+#if APR_HAVE_STRING_H
+#include <string.h>
+#endif
+
+#include "cache_pqueue.h"
+#define left(i) (2*(i))
+#define right(i) ((2*(i))+1)
+#define parent(i) ((i)/2)
+/*
+ * Priority queue structure
+ */
+struct cache_pqueue_t
+{
+ apr_ssize_t size;
+ apr_ssize_t avail;
+ apr_ssize_t step;
+ cache_pqueue_get_priority pri;
+ cache_pqueue_getpos get;
+ cache_pqueue_setpos set;
+ void **d;
+};
+
+cache_pqueue_t *cache_pq_init(apr_ssize_t n,
+ cache_pqueue_get_priority pri,
+ cache_pqueue_getpos get,
+ cache_pqueue_setpos set)
+{
+ cache_pqueue_t *q;
+
+ if (!(q = malloc(sizeof(cache_pqueue_t)))) {
+ return NULL;
+ }
+
+ /* Need to allocate n+1 elements since element 0 isn't used. */
+ if (!(q->d = malloc(sizeof(void*) * (n+1)))) {
+ free(q);
+ return NULL;
+ }
+ q->avail = q->step = (n+1); /* see comment above about n+1 */
+ q->pri = pri;
+ q->size = 1;
+ q->get = get;
+ q->set = set;
+ return q;
+}
+/*
+ * cleanup
+ */
+void cache_pq_free(cache_pqueue_t *q)
+{
+ free(q->d);
+ free(q);
+}
+/*
+ * pqsize: size of the queue.
+ */
+apr_ssize_t cache_pq_size(cache_pqueue_t *q)
+{
+ /* queue element 0 exists but doesn't count since it isn't used. */
+ return (q->size - 1);
+}
+
+static void cache_pq_bubble_up(cache_pqueue_t *q, apr_ssize_t i)
+{
+ apr_ssize_t parent_node;
+ void *moving_node = q->d[i];
+ long moving_pri = q->pri(moving_node);
+
+ for (parent_node = parent(i);
+ ((i > 1) && (q->pri(q->d[parent_node]) < moving_pri));
+ i = parent_node, parent_node = parent(i))
+ {
+ q->d[i] = q->d[parent_node];
+ q->set(q->d[i], i);
+ }
+
+ q->d[i] = moving_node;
+ q->set(moving_node, i);
+}
+
+static apr_ssize_t maxchild(cache_pqueue_t *q, apr_ssize_t i)
+{
+ apr_ssize_t child_node = left(i);
+
+ if (child_node >= q->size)
+ return 0;
+
+ if ((child_node+1 < q->size) &&
+ (q->pri(q->d[child_node+1]) > q->pri(q->d[child_node])))
+ {
+ child_node++; /* use right child instead of left */
+ }
+
+ return child_node;
+}
+
+static void cache_pq_percolate_down(cache_pqueue_t *q, apr_ssize_t i)
+{
+ apr_ssize_t child_node;
+ void *moving_node = q->d[i];
+ long moving_pri = q->pri(moving_node);
+
+ while ((child_node = maxchild(q, i)) &&
+ (moving_pri < q->pri(q->d[child_node])))
+ {
+ q->d[i] = q->d[child_node];
+ q->set(q->d[i], i);
+ i = child_node;
+ }
+
+ q->d[i] = moving_node;
+ q->set(moving_node, i);
+}
+
+apr_status_t cache_pq_insert(cache_pqueue_t *q, void *d)
+{
+ void *tmp;
+ apr_ssize_t i;
+ apr_ssize_t newsize;
+
+ if (!q) return APR_EGENERAL;
+
+ /* allocate more memory if necessary */
+ if (q->size >= q->avail) {
+ newsize = q->size + q->step;
+ if (!(tmp = realloc(q->d, sizeof(void*) * newsize))) {
+ return APR_EGENERAL;
+ };
+ q->d = tmp;
+ q->avail = newsize;
+ }
+
+ /* insert item */
+ i = q->size++;
+ q->d[i] = d;
+ cache_pq_bubble_up(q, i);
+ return APR_SUCCESS;
+}
+
+/*
+ * move a existing entry to a new priority
+ */
+void cache_pq_change_priority(cache_pqueue_t *q,
+ long old_priority,
+ long new_priority,
+ void *d)
+{
+ apr_ssize_t posn;
+
+ posn = q->get(d);
+ if (new_priority > old_priority)
+ cache_pq_bubble_up(q, posn);
+ else
+ cache_pq_percolate_down(q, posn);
+}
+
+apr_status_t cache_pq_remove(cache_pqueue_t *q, void *d)
+{
+ apr_ssize_t posn = q->get(d);
+ q->d[posn] = q->d[--q->size];
+ if (q->pri(q->d[posn]) > q->pri(d))
+ cache_pq_bubble_up(q, posn);
+ else
+ cache_pq_percolate_down(q, posn);
+
+ return APR_SUCCESS;
+}
+
+void *cache_pq_pop(cache_pqueue_t *q)
+{
+ void *head;
+
+ if (!q || q->size == 1)
+ return NULL;
+
+ head = q->d[1];
+ q->d[1] = q->d[--q->size];
+ cache_pq_percolate_down(q, 1);
+
+ return head;
+}
+
+void *cache_pq_peek(cache_pqueue_t *q)
+{
+ void *d;
+ if (!q || q->size == 1)
+ return NULL;
+ d = q->d[1];
+ return d;
+}
+
+static void cache_pq_set_null( void*d, apr_ssize_t val)
+{
+ /* do nothing */
+}
+
+/*
+ * this is a debug function.. so it's EASY not fast
+ */
+void cache_pq_dump(cache_pqueue_t *q,
+ FILE*out,
+ cache_pqueue_print_entry print)
+{
+ int i;
+
+ fprintf(stdout,"posn\tleft\tright\tparent\tmaxchild\t...\n");
+ for (i = 1; i < q->size ;i++) {
+ fprintf(stdout,
+ "%d\t%d\t%d\t%d\t%" APR_SSIZE_T_FMT "\t",
+ i,
+ left(i), right(i), parent(i),
+ maxchild(q, i));
+ print(out, q->d[i]);
+ }
+}
+
+/*
+ * this is a debug function.. so it's EASY not fast
+ */
+void cache_pq_print(cache_pqueue_t *q,
+ FILE*out,
+ cache_pqueue_print_entry print)
+{
+ cache_pqueue_t *dup;
+ dup = cache_pq_init(q->size, q->pri, q->get, cache_pq_set_null);
+ dup->size = q->size;
+ dup->avail = q->avail;
+ dup->step = q->step;
+
+ memcpy(dup->d, q->d, q->size*sizeof(void*));
+
+ while (cache_pq_size(dup) > 1) {
+ void *e = NULL;
+ e = cache_pq_pop(dup);
+ if (e)
+ print(out, e);
+ else
+ break;
+ }
+ cache_pq_free(dup);
+}
+
+static int cache_pq_subtree_is_valid(cache_pqueue_t *q, int pos)
+{
+ if (left(pos) < q->size) {
+ /* has a left child */
+ if (q->pri(q->d[pos]) < q->pri(q->d[left(pos)]))
+ return 0;
+ if (!cache_pq_subtree_is_valid(q, left(pos)))
+ return 0;
+ }
+ if (right(pos) < q->size) {
+ /* has a right child */
+ if (q->pri(q->d[pos]) < q->pri(q->d[right(pos)]))
+ return 0;
+ if (!cache_pq_subtree_is_valid(q, right(pos)))
+ return 0;
+ }
+ return 1;
+}
+
+int cache_pq_is_valid(cache_pqueue_t *q)
+{
+ return cache_pq_subtree_is_valid(q, 1);
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.h b/rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.h
new file mode 100644
index 00000000..19709764
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_pqueue.h
@@ -0,0 +1,160 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CACHE_PQUEUE_H
+#define CACHE_PQUEUE_H
+
+#include <apr.h>
+#include <apr_errno.h>
+
+#if APR_HAVE_STDIO_H
+#include <stdio.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** the cache priority queue handle */
+typedef struct cache_pqueue_t cache_pqueue_t;
+
+/**
+ * callback function to assign a priority for a element
+ * @param a the element
+ * @return the score (the lower the score the longer it is kept int the queue)
+ */
+typedef long (*cache_pqueue_set_priority)(long queue_clock, void *a);
+typedef long (*cache_pqueue_get_priority)(void *a);
+
+/** callback function to get a position of a element */
+typedef apr_ssize_t (*cache_pqueue_getpos)(void *a);
+
+/**
+ * callback function to set a position of a element
+ * @param a the element
+ * @param pos the position to set it to
+ */
+typedef void (*cache_pqueue_setpos)(void *a, apr_ssize_t pos);
+
+/** debug callback function to print a entry */
+typedef void (*cache_pqueue_print_entry)(FILE *out, void *a);
+
+/**
+ * initialize the queue
+ *
+ * @param n the initial estimate of the number of queue items for which memory
+ * should be preallocated
+ * @param pri the callback function to run to assign a score to a element
+ * @param get the callback function to get the current element's position
+ * @param set the callback function to set the current element's position
+ *
+ * @Return the handle or NULL for insufficent memory
+ */
+cache_pqueue_t *cache_pq_init(apr_ssize_t n,
+ cache_pqueue_get_priority pri,
+ cache_pqueue_getpos get,
+ cache_pqueue_setpos set);
+/**
+ * free all memory used by the queue
+ * @param q the queue
+ */
+void cache_pq_free(cache_pqueue_t *q);
+/**
+ * return the size of the queue.
+ * @param q the queue
+ */
+apr_ssize_t cache_pq_size(cache_pqueue_t *q);
+
+/**
+ * insert an item into the queue.
+ * @param q the queue
+ * @param d the item
+ * @return APR_SUCCESS on success
+ */
+apr_status_t cache_pq_insert(cache_pqueue_t *q, void *d);
+
+/*
+ * move a existing entry to a different priority
+ * @param q the queue
+ * @param old the old priority
+ * @param d the entry
+ */
+void cache_pq_change_priority(cache_pqueue_t *q,
+ long old_priority,
+ long new_priority,
+ void *d);
+
+/**
+ * pop the highest-ranking item from the queue.
+ * @param p the queue
+ * @param d where to copy the entry to
+ * @return NULL on error, otherwise the entry
+ */
+void *cache_pq_pop(cache_pqueue_t *q);
+
+/**
+ * remove an item from the queue.
+ * @param p the queue
+ * @param d the entry
+ * @return APR_SUCCESS on success
+ */
+apr_status_t cache_pq_remove(cache_pqueue_t *q, void *d);
+
+/**
+ * access highest-ranking item without removing it.
+ * @param q the queue
+ * @param d the entry
+ * @return NULL on error, otherwise the entry
+ */
+void *cache_pq_peek(cache_pqueue_t *q);
+
+/**
+ * print the queue
+ * @internal
+ * DEBUG function only
+ * @param q the queue
+ * @param out the output handle
+ * @param the callback function to print the entry
+ */
+void cache_pq_print(cache_pqueue_t *q,
+ FILE *out,
+ cache_pqueue_print_entry print);
+
+/**
+ * dump the queue and it's internal structure
+ * @internal
+ * debug function only
+ * @param q the queue
+ * @param out the output handle
+ * @param the callback function to print the entry
+ */
+void cache_pq_dump(cache_pqueue_t *q,
+ FILE *out,
+ cache_pqueue_print_entry print);
+
+/**
+ * checks that the pq is in the right order, etc
+ * @internal
+ * debug function only
+ * @param q the queue
+ */
+int cache_pq_is_valid(cache_pqueue_t *q);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !CACHE_PQUEUE_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_storage.c b/rubbos/app/httpd-2.0.64/modules/experimental/cache_storage.c
new file mode 100644
index 00000000..88f3d5dd
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_storage.c
@@ -0,0 +1,311 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define CORE_PRIVATE
+
+#include "mod_cache.h"
+
+extern APR_OPTIONAL_FN_TYPE(ap_cache_generate_key) *cache_generate_key;
+
+extern module AP_MODULE_DECLARE_DATA cache_module;
+
+/* -------------------------------------------------------------- */
+
+/*
+ * delete all URL entities from the cache
+ *
+ */
+int cache_remove_url(request_rec *r, char *url)
+{
+ cache_provider_list *list;
+ apr_status_t rv;
+ char *key;
+ cache_request_rec *cache = (cache_request_rec *)
+ ap_get_module_config(r->request_config, &cache_module);
+
+ rv = cache_generate_key(r,r->pool,&key);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ list = cache->providers;
+
+ /* for each specified cache type, delete the URL */
+ while(list) {
+ list->provider->remove_url(key);
+ list = list->next;
+ }
+ return OK;
+}
+
+
+/*
+ * create a new URL entity in the cache
+ *
+ * It is possible to store more than once entity per URL. This
+ * function will always create a new entity, regardless of whether
+ * other entities already exist for the same URL.
+ *
+ * The size of the entity is provided so that a cache module can
+ * decide whether or not it wants to cache this particular entity.
+ * If the size is unknown, a size of -1 should be set.
+ */
+int cache_create_entity(request_rec *r, char *url, apr_off_t size)
+{
+ cache_provider_list *list;
+ cache_handle_t *h = apr_pcalloc(r->pool, sizeof(cache_handle_t));
+ char *key;
+ apr_status_t rv;
+ cache_request_rec *cache = (cache_request_rec *)
+ ap_get_module_config(r->request_config, &cache_module);
+
+ rv = cache_generate_key(r,r->pool,&key);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ list = cache->providers;
+ /* for each specified cache type, delete the URL */
+ while (list) {
+ switch (rv = list->provider->create_entity(h, r, key, size)) {
+ case OK: {
+ cache->handle = h;
+ cache->provider = list->provider;
+ cache->provider_name = list->provider_name;
+ return OK;
+ }
+ case DECLINED: {
+ list = list->next;
+ continue;
+ }
+ default: {
+ return rv;
+ }
+ }
+ }
+ return DECLINED;
+}
+
+static int set_cookie_doo_doo(void *v, const char *key, const char *val)
+{
+ apr_table_addn(v, key, val);
+ return 1;
+}
+
+static void accept_headers(cache_handle_t *h, request_rec *r)
+{
+ apr_table_t *cookie_table;
+ const char *v;
+
+ v = apr_table_get(h->resp_hdrs, "Content-Type");
+ if (v) {
+ ap_set_content_type(r, v);
+ apr_table_unset(h->resp_hdrs, "Content-Type");
+ }
+
+ /* If the cache gave us a Last-Modified header, we can't just
+ * pass it on blindly because of restrictions on future values.
+ */
+ v = apr_table_get(h->resp_hdrs, "Last-Modified");
+ if (v) {
+ ap_update_mtime(r, apr_date_parse_http(v));
+ ap_set_last_modified(r);
+ apr_table_unset(h->resp_hdrs, "Last-Modified");
+ }
+
+ /* The HTTP specification says that it is legal to merge duplicate
+ * headers into one. Some browsers that support Cookies don't like
+ * merged headers and prefer that each Set-Cookie header is sent
+ * separately. Lets humour those browsers by not merging.
+ * Oh what a pain it is.
+ */
+ cookie_table = apr_table_make(r->pool, 2);
+ apr_table_do(set_cookie_doo_doo, cookie_table, r->err_headers_out,
+ "Set-Cookie", NULL);
+ apr_table_do(set_cookie_doo_doo, cookie_table, h->resp_hdrs,
+ "Set-Cookie", NULL);
+ apr_table_unset(r->err_headers_out, "Set-Cookie");
+ apr_table_unset(h->resp_hdrs, "Set-Cookie");
+
+ apr_table_overlap(r->headers_out, h->resp_hdrs,
+ APR_OVERLAP_TABLES_SET);
+ apr_table_overlap(r->err_headers_out, h->resp_err_hdrs,
+ APR_OVERLAP_TABLES_SET);
+ if (!apr_is_empty_table(cookie_table)) {
+ r->err_headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ cookie_table);
+ }
+}
+
+/*
+ * select a specific URL entity in the cache
+ *
+ * It is possible to store more than one entity per URL. Content
+ * negotiation is used to select an entity. Once an entity is
+ * selected, details of it are stored in the per request
+ * config to save time when serving the request later.
+ *
+ * This function returns OK if successful, DECLINED if no
+ * cached entity fits the bill.
+ */
+int cache_select_url(request_rec *r, char *url)
+{
+ cache_provider_list *list;
+ apr_status_t rv;
+ cache_handle_t *h;
+ char *key;
+ cache_request_rec *cache = (cache_request_rec *)
+ ap_get_module_config(r->request_config, &cache_module);
+
+ rv = cache_generate_key(r, r->pool, &key);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ /* go through the cache types till we get a match */
+ h = apr_palloc(r->pool, sizeof(cache_handle_t));
+
+ list = cache->providers;
+
+ while (list) {
+ switch ((rv = list->provider->open_entity(h, r, key))) {
+ case OK: {
+ char *vary = NULL;
+ const char *varyhdr = NULL;
+ int fresh;
+
+ if (list->provider->recall_headers(h, r) != APR_SUCCESS) {
+ /* TODO: Handle this error */
+ return DECLINED;
+ }
+
+ /*
+ * Check Content-Negotiation - Vary
+ *
+ * At this point we need to make sure that the object we found in
+ * the cache is the same object that would be delivered to the
+ * client, when the effects of content negotiation are taken into
+ * effect.
+ *
+ * In plain english, we want to make sure that a language-negotiated
+ * document in one language is not given to a client asking for a
+ * language negotiated document in a different language by mistake.
+ *
+ * This code makes the assumption that the storage manager will
+ * cache the req_hdrs if the response contains a Vary
+ * header.
+ *
+ * RFC2616 13.6 and 14.44 describe the Vary mechanism.
+ */
+ if ((varyhdr = apr_table_get(h->resp_err_hdrs, "Vary")) == NULL) {
+ varyhdr = apr_table_get(h->resp_hdrs, "Vary");
+ }
+ vary = apr_pstrdup(r->pool, varyhdr);
+ while (vary && *vary) {
+ char *name = vary;
+ const char *h1, *h2;
+
+ /* isolate header name */
+ while (*vary && !apr_isspace(*vary) && (*vary != ','))
+ ++vary;
+ while (*vary && (apr_isspace(*vary) || (*vary == ','))) {
+ *vary = '\0';
+ ++vary;
+ }
+
+ /*
+ * is this header in the request and the header in the cached
+ * request identical? If not, we give up and do a straight get
+ */
+ h1 = apr_table_get(r->headers_in, name);
+ h2 = apr_table_get(h->req_hdrs, name);
+ if (h1 == h2) {
+ /* both headers NULL, so a match - do nothing */
+ }
+ else if (h1 && h2 && !strcmp(h1, h2)) {
+ /* both headers exist and are equal - do nothing */
+ }
+ else {
+ /* headers do not match, so Vary failed */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
+ r->server,
+ "cache_select_url(): Vary header mismatch.");
+ return DECLINED;
+ }
+ }
+
+ cache->provider = list->provider;
+ cache->provider_name = list->provider_name;
+
+ /* Is our cached response fresh enough? */
+ fresh = ap_cache_check_freshness(h, r);
+ if (!fresh) {
+ cache_info *info = &(h->cache_obj->info);
+
+ /* Make response into a conditional */
+ /* FIXME: What if the request is already conditional? */
+ if (info && info->etag) {
+ /* if we have a cached etag */
+ cache->stale_headers = apr_table_copy(r->pool,
+ r->headers_in);
+ apr_table_set(r->headers_in, "If-None-Match", info->etag);
+ cache->stale_handle = h;
+ }
+ else if (info && info->lastmods) {
+ /* if we have a cached Last-Modified header */
+ cache->stale_headers = apr_table_copy(r->pool,
+ r->headers_in);
+ apr_table_set(r->headers_in, "If-Modified-Since",
+ info->lastmods);
+ cache->stale_handle = h;
+ }
+
+ return DECLINED;
+ }
+
+ /* Okay, this response looks okay. Merge in our stuff and go. */
+ apr_table_setn(r->headers_out, "Content-Type",
+ ap_make_content_type(r, h->content_type));
+ r->filename = apr_pstrdup(r->pool, h->cache_obj->info.filename);
+ accept_headers(h, r);
+
+ cache->handle = h;
+ return OK;
+ }
+ case DECLINED: {
+ /* try again with next cache type */
+ list = list->next;
+ continue;
+ }
+ default: {
+ /* oo-er! an error */
+ return rv;
+ }
+ }
+ }
+ return DECLINED;
+}
+
+apr_status_t cache_generate_key_default( request_rec *r, apr_pool_t*p, char**key )
+{
+ if (r->hostname) {
+ *key = apr_pstrcat(p, r->hostname, r->uri, "?", r->args, NULL);
+ }
+ else {
+ *key = apr_pstrcat(p, r->uri, "?", r->args, NULL);
+ }
+ return APR_SUCCESS;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/cache_util.c b/rubbos/app/httpd-2.0.64/modules/experimental/cache_util.c
new file mode 100644
index 00000000..9782cb7b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/cache_util.c
@@ -0,0 +1,575 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define CORE_PRIVATE
+
+#include "mod_cache.h"
+
+#include <ap_provider.h>
+
+/* -------------------------------------------------------------- */
+
+extern module AP_MODULE_DECLARE_DATA cache_module;
+
+/* return true if the request is conditional */
+CACHE_DECLARE(int) ap_cache_request_is_conditional(apr_table_t *table)
+{
+ if (apr_table_get(table, "If-Match") ||
+ apr_table_get(table, "If-None-Match") ||
+ apr_table_get(table, "If-Modified-Since") ||
+ apr_table_get(table, "If-Unmodified-Since")) {
+ return 1;
+ }
+ return 0;
+}
+
+CACHE_DECLARE(cache_provider_list *)ap_cache_get_providers(request_rec *r,
+ cache_server_conf *conf,
+ const char *url)
+{
+ cache_provider_list *providers = NULL;
+ int i;
+
+ /* we can't cache if there's no URL */
+ /* Is this case even possible?? */
+ if (!url) return NULL;
+
+ /* loop through all the cacheenable entries */
+ for (i = 0; i < conf->cacheenable->nelts; i++) {
+ struct cache_enable *ent =
+ (struct cache_enable *)conf->cacheenable->elts;
+ if ((ent[i].url) && !strncasecmp(url, ent[i].url, ent[i].urllen)) {
+ /* Fetch from global config and add to the list. */
+ cache_provider *provider;
+ provider = ap_lookup_provider(CACHE_PROVIDER_GROUP, ent[i].type,
+ "0");
+ if (!provider) {
+ /* Log an error! */
+ }
+ else {
+ cache_provider_list *newp;
+ newp = apr_pcalloc(r->pool, sizeof(cache_provider_list));
+ newp->provider_name = ent[i].type;
+ newp->provider = provider;
+
+ if (!providers) {
+ providers = newp;
+ }
+ else {
+ cache_provider_list *last = providers;
+
+ while (last->next) {
+ last = last->next;
+ }
+ last->next = newp;
+ }
+ }
+ }
+ }
+
+ /* then loop through all the cachedisable entries
+ * Looking for urls that contain the full cachedisable url and possibly
+ * more.
+ * This means we are disabling cachedisable url and below...
+ */
+ for (i = 0; i < conf->cachedisable->nelts; i++) {
+ struct cache_disable *ent =
+ (struct cache_disable *)conf->cachedisable->elts;
+ if ((ent[i].url) && !strncasecmp(url, ent[i].url, ent[i].urllen)) {
+ /* Stop searching now. */
+ return NULL;
+ }
+ }
+
+ return providers;
+}
+
+
+/* do a HTTP/1.1 age calculation */
+CACHE_DECLARE(apr_int64_t) ap_cache_current_age(cache_info *info,
+ const apr_time_t age_value,
+ apr_time_t now)
+{
+ apr_time_t apparent_age, corrected_received_age, response_delay,
+ corrected_initial_age, resident_time, current_age,
+ age_value_usec;
+
+ age_value_usec = apr_time_from_sec(age_value);
+
+ /* Perform an HTTP/1.1 age calculation. (RFC2616 13.2.3) */
+
+ apparent_age = MAX(0, info->response_time - info->date);
+ corrected_received_age = MAX(apparent_age, age_value_usec);
+ response_delay = info->response_time - info->request_time;
+ corrected_initial_age = corrected_received_age + response_delay;
+ resident_time = now - info->response_time;
+ current_age = corrected_initial_age + resident_time;
+
+ return apr_time_sec(current_age);
+}
+
+CACHE_DECLARE(int) ap_cache_check_freshness(cache_handle_t *h,
+ request_rec *r)
+{
+ apr_int64_t age, maxage_req, maxage_cresp, maxage, smaxage, maxstale;
+ apr_int64_t minfresh;
+ int age_in_errhdr = 0;
+ const char *cc_cresp, *cc_ceresp, *cc_req;
+ const char *agestr = NULL;
+ const char *expstr = NULL;
+ char *val;
+ apr_time_t age_c = 0;
+ cache_info *info = &(h->cache_obj->info);
+
+ /*
+ * We now want to check if our cached data is still fresh. This depends
+ * on a few things, in this order:
+ *
+ * - RFC2616 14.9.4 End to end reload, Cache-Control: no-cache. no-cache in
+ * either the request or the cached response means that we must
+ * revalidate the request unconditionally, overriding any expiration
+ * mechanism. It's equivalent to max-age=0,must-revalidate.
+ *
+ * - RFC2616 14.32 Pragma: no-cache This is treated the same as
+ * Cache-Control: no-cache.
+ *
+ * - RFC2616 14.9.3 Cache-Control: max-stale, must-revalidate,
+ * proxy-revalidate if the max-stale request header exists, modify the
+ * stale calculations below so that an object can be at most <max-stale>
+ * seconds stale before we request a revalidation, _UNLESS_ a
+ * must-revalidate or proxy-revalidate cached response header exists to
+ * stop us doing this.
+ *
+ * - RFC2616 14.9.3 Cache-Control: s-maxage the origin server specifies the
+ * maximum age an object can be before it is considered stale. This
+ * directive has the effect of proxy|must revalidate, which in turn means
+ * simple ignore any max-stale setting.
+ *
+ * - RFC2616 14.9.4 Cache-Control: max-age this header can appear in both
+ * requests and responses. If both are specified, the smaller of the two
+ * takes priority.
+ *
+ * - RFC2616 14.21 Expires: if this request header exists in the cached
+ * entity, and it's value is in the past, it has expired.
+ *
+ */
+ cc_cresp = apr_table_get(h->resp_hdrs, "Cache-Control");
+ cc_ceresp = apr_table_get(h->resp_err_hdrs, "Cache-Control");
+ cc_req = apr_table_get(h->req_hdrs, "Cache-Control");
+
+ if ((agestr = apr_table_get(h->resp_hdrs, "Age"))) {
+ age_c = apr_atoi64(agestr);
+ }
+ else if ((agestr = apr_table_get(h->resp_err_hdrs, "Age"))) {
+ age_c = apr_atoi64(agestr);
+ age_in_errhdr = 1;
+ }
+
+ if (!(expstr = apr_table_get(h->resp_err_hdrs, "Expires"))) {
+ expstr = apr_table_get(h->resp_hdrs, "Expires");
+ }
+
+ /* calculate age of object */
+ age = ap_cache_current_age(info, age_c, r->request_time);
+
+ /* extract s-maxage */
+ if (cc_cresp && ap_cache_liststr(r->pool, cc_cresp, "s-maxage", &val)
+ && val != NULL) {
+ smaxage = apr_atoi64(val);
+ }
+ else if (cc_ceresp && ap_cache_liststr(r->pool, cc_ceresp, "s-maxage", &val)) {
+ smaxage = apr_atoi64(val);
+ }
+ else {
+ smaxage = -1;
+ }
+
+ /* extract max-age from request */
+ if (cc_req && ap_cache_liststr(r->pool, cc_req, "max-age", &val)
+ && val != NULL) {
+ maxage_req = apr_atoi64(val);
+ }
+ else {
+ maxage_req = -1;
+ }
+
+ /* extract max-age from response */
+ if (cc_cresp && ap_cache_liststr(r->pool, cc_cresp, "max-age", &val)
+ && val != NULL) {
+ maxage_cresp = apr_atoi64(val);
+ }
+ else if (cc_ceresp && ap_cache_liststr(r->pool, cc_ceresp, "max-age", &val)) {
+ maxage_cresp = apr_atoi64(val);
+ }
+ else
+ {
+ maxage_cresp = -1;
+ }
+
+ /*
+ * if both maxage request and response, the smaller one takes priority
+ */
+ if (-1 == maxage_req) {
+ maxage = maxage_cresp;
+ }
+ else if (-1 == maxage_cresp) {
+ maxage = maxage_req;
+ }
+ else {
+ maxage = MIN(maxage_req, maxage_cresp);
+ }
+
+ /* extract max-stale */
+ if (cc_req && ap_cache_liststr(r->pool, cc_req, "max-stale", &val)) {
+ if(val != NULL) {
+ maxstale = apr_atoi64(val);
+ }
+ else {
+ /*
+ * If no value is assigned to max-stale, then the client is willing
+ * to accept a stale response of any age (RFC2616 14.9.3). We will
+ * set it to one year in this case as this situation is somewhat
+ * similar to a "never expires" Expires header (RFC2616 14.21)
+ * which is set to a date one year from the time the response is
+ * sent in this case.
+ */
+ maxstale = APR_INT64_C(86400*365);
+ }
+ }
+ else {
+ maxstale = 0;
+ }
+
+ /* extract min-fresh */
+ if (cc_req && ap_cache_liststr(r->pool, cc_req, "min-fresh", &val)
+ && val != NULL) {
+ minfresh = apr_atoi64(val);
+ }
+ else {
+ minfresh = 0;
+ }
+
+ /* override maxstale if must-revalidate or proxy-revalidate */
+ if (maxstale && ((cc_cresp &&
+ ap_cache_liststr(NULL, cc_cresp,
+ "must-revalidate", NULL)) ||
+ (cc_cresp &&
+ ap_cache_liststr(NULL, cc_cresp,
+ "proxy-revalidate", NULL)) ||
+ (cc_ceresp &&
+ ap_cache_liststr(NULL, cc_ceresp,
+ "must-revalidate", NULL)) ||
+ (cc_ceresp &&
+ ap_cache_liststr(NULL, cc_ceresp,
+ "proxy-revalidate", NULL)))) {
+ maxstale = 0;
+ }
+
+ /* handle expiration */
+ if (((smaxage != -1) && (age < (smaxage - minfresh))) ||
+ ((maxage != -1) && (age < (maxage + maxstale - minfresh))) ||
+ ((smaxage == -1) && (maxage == -1) &&
+ (info->expire != APR_DATE_BAD) &&
+ (age < (apr_time_sec(info->expire - info->date) + maxstale - minfresh)))) {
+ const char *warn_head;
+ apr_table_t *head_ptr;
+
+ warn_head = apr_table_get(h->resp_hdrs, "Warning");
+ if (warn_head != NULL) {
+ head_ptr = h->resp_hdrs;
+ }
+ else {
+ warn_head = apr_table_get(h->resp_err_hdrs, "Warning");
+ head_ptr = h->resp_err_hdrs;
+ }
+
+ /* it's fresh darlings... */
+ /* set age header on response */
+ if (age_in_errhdr) {
+ apr_table_set(h->resp_err_hdrs, "Age",
+ apr_psprintf(r->pool, "%lu", (unsigned long)age));
+ }
+ else {
+ apr_table_set(h->resp_hdrs, "Age",
+ apr_psprintf(r->pool, "%lu", (unsigned long)age));
+ }
+
+ /* add warning if maxstale overrode freshness calculation */
+ if (!(((smaxage != -1) && age < smaxage) ||
+ ((maxage != -1) && age < maxage) ||
+ (info->expire != APR_DATE_BAD &&
+ (info->expire - info->date) > age))) {
+ /* make sure we don't stomp on a previous warning */
+ if ((warn_head == NULL) ||
+ ((warn_head != NULL) && (ap_strstr_c(warn_head, "110") == NULL))) {
+ apr_table_merge(head_ptr, "Warning", "110 Response is stale");
+ }
+ }
+ /*
+ * If none of Expires, Cache-Control: max-age, or Cache-Control:
+ * s-maxage appears in the response, and the respose header age
+ * calculated is more than 24 hours add the warning 113
+ */
+ if ((maxage_cresp == -1) && (smaxage == -1) &&
+ (expstr == NULL) && (age > 86400)) {
+
+ /* Make sure we don't stomp on a previous warning, and don't dup
+ * a 113 marning that is already present. Also, make sure to add
+ * the new warning to the correct *headers_out location.
+ */
+ if ((warn_head == NULL) ||
+ ((warn_head != NULL) && (ap_strstr_c(warn_head, "113") == NULL))) {
+ apr_table_merge(head_ptr, "Warning", "113 Heuristic expiration");
+ }
+ }
+ return 1; /* Cache object is fresh (enough) */
+ }
+ return 0; /* Cache object is stale */
+}
+
+/*
+ * list is a comma-separated list of case-insensitive tokens, with
+ * optional whitespace around the tokens.
+ * The return returns 1 if the token val is found in the list, or 0
+ * otherwise.
+ */
+CACHE_DECLARE(int) ap_cache_liststr(apr_pool_t *p, const char *list,
+ const char *key, char **val)
+{
+ apr_size_t key_len;
+ const char *next;
+
+ if (!list) {
+ return 0;
+ }
+
+ key_len = strlen(key);
+ next = list;
+
+ for (;;) {
+
+ /* skip whitespace and commas to find the start of the next key */
+ while (*next && (apr_isspace(*next) || (*next == ','))) {
+ next++;
+ }
+
+ if (!*next) {
+ return 0;
+ }
+
+ if (!strncasecmp(next, key, key_len)) {
+ /* this field matches the key (though it might just be
+ * a prefix match, so make sure the match is followed
+ * by either a space or an equals sign)
+ */
+ next += key_len;
+ if (!*next || (*next == '=') || apr_isspace(*next) ||
+ (*next == ',')) {
+ /* valid match */
+ if (val) {
+ while (*next && (*next != '=') && (*next != ',')) {
+ next++;
+ }
+ if (*next == '=') {
+ next++;
+ while (*next && apr_isspace(*next )) {
+ next++;
+ }
+ if (!*next) {
+ *val = NULL;
+ }
+ else {
+ const char *val_start = next;
+ while (*next && !apr_isspace(*next) &&
+ (*next != ',')) {
+ next++;
+ }
+ *val = apr_pstrmemdup(p, val_start,
+ next - val_start);
+ }
+ }
+ else {
+ *val = NULL;
+ }
+ }
+ return 1;
+ }
+ }
+
+ /* skip to the next field */
+ do {
+ next++;
+ if (!*next) {
+ return 0;
+ }
+ } while (*next != ',');
+ }
+}
+
+/* return each comma separated token, one at a time */
+CACHE_DECLARE(const char *)ap_cache_tokstr(apr_pool_t *p, const char *list,
+ const char **str)
+{
+ apr_size_t i;
+ const char *s;
+
+ s = ap_strchr_c(list, ',');
+ if (s != NULL) {
+ i = s - list;
+ do
+ s++;
+ while (apr_isspace(*s))
+ ; /* noop */
+ }
+ else
+ i = strlen(list);
+
+ while (i > 0 && apr_isspace(list[i - 1]))
+ i--;
+
+ *str = s;
+ if (i)
+ return apr_pstrndup(p, list, i);
+ else
+ return NULL;
+}
+
+/*
+ * Converts apr_time_t expressed as hex digits to
+ * a true apr_time_t.
+ */
+CACHE_DECLARE(apr_time_t) ap_cache_hex2usec(const char *x)
+{
+ int i, ch;
+ apr_time_t j;
+ for (i = 0, j = 0; i < sizeof(j) * 2; i++) {
+ ch = x[i];
+ j <<= 4;
+ if (apr_isdigit(ch))
+ j |= ch - '0';
+ else if (apr_isupper(ch))
+ j |= ch - ('A' - 10);
+ else
+ j |= ch - ('a' - 10);
+ }
+ return j;
+}
+
+/*
+ * Converts apr_time_t to apr_time_t expressed as hex digits.
+ */
+CACHE_DECLARE(void) ap_cache_usec2hex(apr_time_t j, char *y)
+{
+ int i, ch;
+
+ for (i = (sizeof(j) * 2)-1; i >= 0; i--) {
+ ch = (int)(j & 0xF);
+ j >>= 4;
+ if (ch >= 10)
+ y[i] = ch + ('A' - 10);
+ else
+ y[i] = ch + '0';
+ }
+ y[sizeof(j) * 2] = '\0';
+}
+
+static void cache_hash(const char *it, char *val, int ndepth, int nlength)
+{
+ apr_md5_ctx_t context;
+ unsigned char digest[16];
+ char tmp[22];
+ int i, k, d;
+ unsigned int x;
+ static const char enc_table[64] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_@";
+
+ apr_md5_init(&context);
+ apr_md5_update(&context, (const unsigned char *) it, strlen(it));
+ apr_md5_final(digest, &context);
+
+ /* encode 128 bits as 22 characters, using a modified uuencoding
+ * the encoding is 3 bytes -> 4 characters* i.e. 128 bits is
+ * 5 x 3 bytes + 1 byte -> 5 * 4 characters + 2 characters
+ */
+ for (i = 0, k = 0; i < 15; i += 3) {
+ x = (digest[i] << 16) | (digest[i + 1] << 8) | digest[i + 2];
+ tmp[k++] = enc_table[x >> 18];
+ tmp[k++] = enc_table[(x >> 12) & 0x3f];
+ tmp[k++] = enc_table[(x >> 6) & 0x3f];
+ tmp[k++] = enc_table[x & 0x3f];
+ }
+
+ /* one byte left */
+ x = digest[15];
+ tmp[k++] = enc_table[x >> 2]; /* use up 6 bits */
+ tmp[k++] = enc_table[(x << 4) & 0x3f];
+
+ /* now split into directory levels */
+ for (i = k = d = 0; d < ndepth; ++d) {
+ memcpy(&val[i], &tmp[k], nlength);
+ k += nlength;
+ val[i + nlength] = '/';
+ i += nlength + 1;
+ }
+ memcpy(&val[i], &tmp[k], 22 - k);
+ val[i + 22 - k] = '\0';
+}
+
+CACHE_DECLARE(char *)generate_name(apr_pool_t *p, int dirlevels,
+ int dirlength, const char *name)
+{
+ char hashfile[66];
+ cache_hash(name, hashfile, dirlevels, dirlength);
+ return apr_pstrdup(p, hashfile);
+}
+
+/* Create a new table consisting of those elements from an input
+ * headers table that are allowed to be stored in a cache.
+ */
+CACHE_DECLARE(apr_table_t *)ap_cache_cacheable_hdrs_out(apr_pool_t *pool,
+ apr_table_t *t,
+ server_rec *s)
+{
+ cache_server_conf *conf;
+ char **header;
+ int i;
+
+ /* Make a copy of the headers, and remove from
+ * the copy any hop-by-hop headers, as defined in Section
+ * 13.5.1 of RFC 2616
+ */
+ apr_table_t *headers_out;
+ headers_out = apr_table_copy(pool, t);
+ apr_table_unset(headers_out, "Connection");
+ apr_table_unset(headers_out, "Keep-Alive");
+ apr_table_unset(headers_out, "Proxy-Authenticate");
+ apr_table_unset(headers_out, "Proxy-Authorization");
+ apr_table_unset(headers_out, "TE");
+ apr_table_unset(headers_out, "Trailers");
+ apr_table_unset(headers_out, "Transfer-Encoding");
+ apr_table_unset(headers_out, "Upgrade");
+
+ conf = (cache_server_conf *)ap_get_module_config(s->module_config,
+ &cache_module);
+ /* Remove the user defined headers set with CacheIgnoreHeaders.
+ * This may break RFC 2616 compliance on behalf of the administrator.
+ */
+ header = (char **)conf->ignore_headers->elts;
+ for (i = 0; i < conf->ignore_headers->nelts; i++) {
+ apr_table_unset(headers_out, header[i]);
+ }
+ return headers_out;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/charset.conv b/rubbos/app/httpd-2.0.64/modules/experimental/charset.conv
new file mode 100644
index 00000000..3cd6fa9d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/charset.conv
@@ -0,0 +1,55 @@
+
+# Lang-abbv Charset Language
+#---------------------------------
+en ISO-8859-1 English
+UTF-8 utf8 UTF-8
+Unicode ucs Unicode
+th Cp874 Thai
+ja SJIS Japanese
+ko Cp949 Korean
+zh Cp950 Chinese-Traditional
+zh-cn GB2312 Chinese-Simplified
+zh-tw Cp950 Chinese
+cs ISO-8859-2 Czech
+hu ISO-8859-2 Hungarian
+hr ISO-8859-2 Croation
+pl ISO-8859-2 Polish
+ro ISO-8859-2 Romanian
+sr ISO-8859-2 Serbian
+sk ISO-8859-2 Slovak
+sl ISO-8859-2 Slovenian
+sq ISO-8859-2 Albanian
+bg ISO-8859-5 Bulgarian
+be ISO-8859-5 Byelorussian
+mk ISO-8859-5 Macedonian
+ru ISO-8859-5 Russian
+uk ISO-8859-5 Ukrainian
+ca ISO-8859-1 Catalan
+de ISO-8859-1 German
+da ISO-8859-1 Danish
+fi ISO-8859-1 Finnish
+fr ISO-8859-1 French
+es ISO-8859-1 Spanish
+is ISO-8859-1 Icelandic
+it ISO-8859-1 Italian
+nl ISO-8859-1 Dutch
+no ISO-8859-1 Norwegian
+pt ISO-8859-1 Portuguese
+sv ISO-8859-1 Swedish
+af ISO-8859-1 Afrikaans
+eu ISO-8859-1 Basque
+fo ISO-8859-1 Faroese
+gl ISO-8859-1 Galician
+ga ISO-8859-1 Irish
+gd ISO-8859-1 Scottish
+mt ISO-8859-3 Maltese
+eo ISO-8859-3 Esperanto
+el ISO-8859-7 Greek
+tr ISO-8859-9 Turkish
+he ISO-8859-8 Hebrew
+iw ISO-8859-8 Hebrew
+ar ISO-8859-6 Arabic
+et ISO-8859-1 Estonian
+lv ISO-8859-2 Latvian
+lt ISO-8859-2 Lithuanian
+ \ No newline at end of file
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/config.m4 b/rubbos/app/httpd-2.0.64/modules/experimental/config.m4
new file mode 100644
index 00000000..b9d2e7e3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/config.m4
@@ -0,0 +1,39 @@
+
+APACHE_MODPATH_INIT(experimental)
+
+if test "$ac_cv_ebcdic" = "yes"; then
+# mod_charset_lite can be very useful on an ebcdic system,
+# so include it by default
+ APACHE_MODULE(charset_lite, character set translation, , , yes)
+else
+ APACHE_MODULE(charset_lite, character set translation, , , no)
+fi
+
+dnl # list of object files for mod_cache
+cache_objs="dnl
+mod_cache.lo dnl
+cache_storage.lo dnl
+cache_util.lo dnl
+"
+dnl # list of object files for mod_mem_cache
+mem_cache_objs="dnl
+mod_mem_cache.lo dnl
+cache_cache.lo dnl
+cache_pqueue.lo dnl
+cache_hash.lo dnl
+"
+APACHE_MODULE(cache, dynamic file caching, $cache_objs, , no)
+APACHE_MODULE(disk_cache, disk caching module, , , no)
+APACHE_MODULE(mem_cache, memory caching module, $mem_cache_objs, , no)
+APACHE_MODULE(example, example and demo module, , , no)
+APACHE_MODULE(case_filter, example uppercase conversion filter, , , no)
+APACHE_MODULE(case_filter_in, example uppercase conversion input filter, , , no)
+APACHE_MODULE(dumpio, I/O dump filter, , , no)
+
+ldap_objects="util_ldap.lo util_ldap_cache.lo util_ldap_cache_mgr.lo"
+APACHE_MODULE(ldap, LDAP caching and connection pooling services, $ldap_objects, , no)
+
+auth_ldap_objects="mod_auth_ldap.lo"
+APACHE_MODULE(auth_ldap, LDAP based authentication, $auth_ldap_objects, , no)
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.c
new file mode 100644
index 00000000..10b3f17c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.c
@@ -0,0 +1,1117 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_auth_ldap.c: LDAP authentication module
+ *
+ * Original code from auth_ldap module for Apache v1.3:
+ * Copyright 1998, 1999 Enbridge Pipelines Inc.
+ * Copyright 1999-2001 Dave Carrigan
+ */
+
+#include <apr_ldap.h>
+#include <apr_strings.h>
+#include <apr_xlate.h>
+#define APR_WANT_STRFUNC
+#include <apr_want.h>
+
+#include "ap_config.h"
+#if APR_HAVE_UNISTD_H
+/* for getpid() */
+#include <unistd.h>
+#endif
+#include <ctype.h>
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_ldap.h"
+
+#ifndef APU_HAS_LDAP
+#error mod_auth_ldap requires APR-util to have LDAP support built in
+#endif
+
+/* per directory configuration */
+typedef struct {
+ apr_pool_t *pool; /* Pool that this config is allocated from */
+#if APR_HAS_THREADS
+ apr_thread_mutex_t *lock; /* Lock for this config */
+#endif
+ int auth_authoritative; /* Is this auth method the one and only? */
+ int enabled; /* Is auth_ldap enabled in this directory? */
+
+ /* These parameters are all derived from the AuthLDAPURL directive */
+ char *url; /* String representation of the URL */
+
+ char *host; /* Name of the LDAP server (or space separated list) */
+ int port; /* Port of the LDAP server */
+ char *basedn; /* Base DN to do all searches from */
+ char *attribute; /* Attribute to search for */
+ char **attributes; /* Array of all the attributes to return */
+ int scope; /* Scope of the search */
+ char *filter; /* Filter to further limit the search */
+ deref_options deref; /* how to handle alias dereferening */
+ char *binddn; /* DN to bind to server (can be NULL) */
+ char *bindpw; /* Password to bind to server (can be NULL) */
+
+ int frontpage_hack; /* Hack for frontpage support */
+ int user_is_dn; /* If true, connection->user is DN instead of userid */
+ int compare_dn_on_server; /* If true, will use server to do DN compare */
+
+ int have_ldap_url; /* Set if we have found an LDAP url */
+
+ apr_array_header_t *groupattr; /* List of Group attributes */
+ int group_attrib_is_dn; /* If true, the group attribute is the DN, otherwise,
+ it's the exact string passed by the HTTP client */
+
+ int secure; /* True if SSL connections are requested */
+} mod_auth_ldap_config_t;
+
+typedef struct mod_auth_ldap_request_t {
+ char *dn; /* The saved dn from a successful search */
+ char *user; /* The username provided by the client */
+} mod_auth_ldap_request_t;
+
+/* maximum group elements supported */
+#define GROUPATTR_MAX_ELTS 10
+
+struct mod_auth_ldap_groupattr_entry_t {
+ char *name;
+};
+
+module AP_MODULE_DECLARE_DATA auth_ldap_module;
+
+/* function prototypes */
+void mod_auth_ldap_build_filter(char *filtbuf,
+ request_rec *r,
+ mod_auth_ldap_config_t *sec);
+int mod_auth_ldap_check_user_id(request_rec *r);
+int mod_auth_ldap_auth_checker(request_rec *r);
+void *mod_auth_ldap_create_dir_config(apr_pool_t *p, char *d);
+
+/* ---------------------------------------- */
+
+static apr_hash_t *charset_conversions = NULL;
+static char *to_charset = NULL; /* UTF-8 identifier derived from the charset.conv file */
+
+/* Derive a code page ID give a language name or ID */
+static char* derive_codepage_from_lang (apr_pool_t *p, char *language)
+{
+ int lang_len;
+ int check_short = 0;
+ char *charset;
+
+ if (!language) /* our default codepage */
+ return apr_pstrdup(p, "ISO-8859-1");
+ else
+ lang_len = strlen(language);
+
+ charset = (char*) apr_hash_get(charset_conversions, language, APR_HASH_KEY_STRING);
+
+ if (!charset) {
+ language[2] = '\0';
+ charset = (char*) apr_hash_get(charset_conversions, language, APR_HASH_KEY_STRING);
+ }
+
+ if (charset) {
+ charset = apr_pstrdup(p, charset);
+ }
+
+ return charset;
+}
+
+static apr_xlate_t* get_conv_set (request_rec *r)
+{
+ char *lang_line = (char*)apr_table_get(r->headers_in, "accept-language");
+ char *lang;
+ apr_xlate_t *convset;
+
+ if (lang_line) {
+ lang_line = apr_pstrdup(r->pool, lang_line);
+ for (lang = lang_line;*lang;lang++) {
+ if ((*lang == ',') || (*lang == ';')) {
+ *lang = '\0';
+ break;
+ }
+ }
+ lang = derive_codepage_from_lang(r->pool, lang_line);
+
+ if (lang && (apr_xlate_open(&convset, to_charset, lang, r->pool) == APR_SUCCESS)) {
+ return convset;
+ }
+ }
+
+ return NULL;
+}
+
+
+/*
+ * Build the search filter, or at least as much of the search filter that
+ * will fit in the buffer. We don't worry about the buffer not being able
+ * to hold the entire filter. If the buffer wasn't big enough to hold the
+ * filter, ldap_search_s will complain, but the only situation where this
+ * is likely to happen is if the client sent a really, really long
+ * username, most likely as part of an attack.
+ *
+ * The search filter consists of the filter provided with the URL,
+ * combined with a filter made up of the attribute provided with the URL,
+ * and the actual username passed by the HTTP client. For example, assume
+ * that the LDAP URL is
+ *
+ * ldap://ldap.airius.com/ou=People, o=Airius?uid??(posixid=*)
+ *
+ * Further, assume that the userid passed by the client was `userj'. The
+ * search filter will be (&(posixid=*)(uid=userj)).
+ */
+#define FILTER_LENGTH MAX_STRING_LEN
+void mod_auth_ldap_build_filter(char *filtbuf,
+ request_rec *r,
+ mod_auth_ldap_config_t *sec)
+{
+ char *p, *q, *filtbuf_end;
+ char *user;
+ apr_xlate_t *convset = NULL;
+ apr_size_t inbytes;
+ apr_size_t outbytes;
+ char *outbuf;
+
+ if (r->user != NULL) {
+ user = apr_pstrdup (r->pool, r->user);
+ }
+ else
+ return;
+
+ if (charset_conversions) {
+ convset = get_conv_set(r);
+ }
+
+ if (convset) {
+ inbytes = strlen(user);
+ outbytes = (inbytes+1)*3;
+ outbuf = apr_pcalloc(r->pool, outbytes);
+
+ /* Convert the user name to UTF-8. This is only valid for LDAP v3 */
+ if (apr_xlate_conv_buffer(convset, user, &inbytes, outbuf, &outbytes) == APR_SUCCESS) {
+ user = apr_pstrdup(r->pool, outbuf);
+ }
+ }
+
+ /*
+ * Create the first part of the filter, which consists of the
+ * config-supplied portions.
+ */
+ apr_snprintf(filtbuf, FILTER_LENGTH, "(&(%s)(%s=", sec->filter, sec->attribute);
+
+ /*
+ * Now add the client-supplied username to the filter, ensuring that any
+ * LDAP filter metachars are escaped.
+ */
+ filtbuf_end = filtbuf + FILTER_LENGTH - 1;
+#if APR_HAS_MICROSOFT_LDAPSDK
+ for (p = user, q=filtbuf + strlen(filtbuf);
+ *p && q < filtbuf_end; ) {
+ if (strchr("*()\\", *p) != NULL) {
+ if ( q + 3 >= filtbuf_end)
+ break; /* Don't write part of escape sequence if we can't write all of it */
+ *q++ = '\\';
+ switch ( *p++ )
+ {
+ case '*':
+ *q++ = '2';
+ *q++ = 'a';
+ break;
+ case '(':
+ *q++ = '2';
+ *q++ = '8';
+ break;
+ case ')':
+ *q++ = '2';
+ *q++ = '9';
+ break;
+ case '\\':
+ *q++ = '5';
+ *q++ = 'c';
+ break;
+ }
+ }
+ else
+ *q++ = *p++;
+ }
+#else
+ for (p = user, q=filtbuf + strlen(filtbuf);
+ *p && q < filtbuf_end; *q++ = *p++) {
+ if (strchr("*()\\", *p) != NULL) {
+ *q++ = '\\';
+ if (q >= filtbuf_end) {
+ break;
+ }
+ }
+ }
+#endif
+ *q = '\0';
+
+ /*
+ * Append the closing parens of the filter, unless doing so would
+ * overrun the buffer.
+ */
+ if (q + 2 <= filtbuf_end)
+ strcat(filtbuf, "))");
+}
+
+static apr_status_t mod_auth_ldap_cleanup_connection_close(void *param)
+{
+ util_ldap_connection_t *ldc = param;
+ util_ldap_connection_close(ldc);
+ return APR_SUCCESS;
+}
+
+
+/*
+ * Authentication Phase
+ * --------------------
+ *
+ * This phase authenticates the credentials the user has sent with
+ * the request (ie the username and password are checked). This is done
+ * by making an attempt to bind to the LDAP server using this user's
+ * DN and the supplied password.
+ *
+ */
+int mod_auth_ldap_check_user_id(request_rec *r)
+{
+ int failures = 0;
+ const char **vals = NULL;
+ char filtbuf[FILTER_LENGTH];
+ mod_auth_ldap_config_t *sec =
+ (mod_auth_ldap_config_t *)ap_get_module_config(r->per_dir_config, &auth_ldap_module);
+
+ util_ldap_connection_t *ldc = NULL;
+ const char *sent_pw;
+ int result = 0;
+ const char *dn = NULL;
+
+ mod_auth_ldap_request_t *req =
+ (mod_auth_ldap_request_t *)apr_pcalloc(r->pool, sizeof(mod_auth_ldap_request_t));
+ ap_set_module_config(r->request_config, &auth_ldap_module, req);
+
+ if (!sec->enabled) {
+ return DECLINED;
+ }
+
+ /*
+ * Basic sanity checks before any LDAP operations even happen.
+ */
+ if (!sec->have_ldap_url) {
+ return DECLINED;
+ }
+
+start_over:
+
+ /* There is a good AuthLDAPURL, right? */
+ if (sec->host) {
+ ldc = util_ldap_connection_find(r, sec->host, sec->port,
+ sec->binddn, sec->bindpw, sec->deref,
+ sec->secure);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authenticate: no sec->host - weird...?", getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authenticate: using URL %s", getpid(), sec->url);
+
+ /* Get the password that the client sent */
+ if ((result = ap_get_basic_auth_pw(r, &sent_pw))) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authenticate: "
+ "ap_get_basic_auth_pw() returns %d", getpid(), result);
+ util_ldap_connection_close(ldc);
+ return result;
+ }
+
+ if (r->user == NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authenticate: no user specified", getpid());
+ util_ldap_connection_close(ldc);
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+
+ /* build the username filter */
+ mod_auth_ldap_build_filter(filtbuf, r, sec);
+
+ /* do the user search */
+ result = util_ldap_cache_checkuserid(r, ldc, sec->url, sec->basedn, sec->scope,
+ sec->attributes, filtbuf, sent_pw, &dn, &vals);
+ util_ldap_connection_close(ldc);
+
+ /* sanity check - if server is down, retry it up to 5 times */
+ if (result == LDAP_SERVER_DOWN) {
+ if (failures++ <= 5) {
+ goto start_over;
+ }
+ }
+
+ /* handle bind failure */
+ if (result != LDAP_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authenticate: "
+ "user %s authentication failed; URI %s [%s][%s]",
+ getpid(), r->user, r->uri, ldc->reason, ldap_err2string(result));
+ if ((LDAP_INVALID_CREDENTIALS == result) || sec->auth_authoritative) {
+ ap_note_basic_auth_failure(r);
+ return HTTP_UNAUTHORIZED;
+ }
+ else {
+ return DECLINED;
+ }
+ }
+
+ /* mark the user and DN */
+ req->dn = apr_pstrdup(r->pool, dn);
+ req->user = r->user;
+ if (sec->user_is_dn) {
+ r->user = req->dn;
+ }
+
+ /* add environment variables */
+ if (sec->attributes && vals) {
+ apr_table_t *e = r->subprocess_env;
+ int i = 0;
+ while (sec->attributes[i]) {
+ char *str = apr_pstrcat(r->pool, "AUTHENTICATE_", sec->attributes[i], NULL);
+ int j = 13;
+ while (str[j]) {
+ if (str[j] >= 'a' && str[j] <= 'z') {
+ str[j] = str[j] - ('a' - 'A');
+ }
+ j++;
+ }
+ apr_table_setn(e, str, vals[i]);
+ i++;
+ }
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authenticate: accepting %s", getpid(), r->user);
+
+ return OK;
+}
+
+
+/*
+ * Authorisation Phase
+ * -------------------
+ *
+ * After checking whether the username and password are correct, we need
+ * to check whether that user is authorised to view this resource. The
+ * require directive is used to do this:
+ *
+ * require valid-user Any authenticated is allowed in.
+ * require user <username> This particular user is allowed in.
+ * require group <groupname> The user must be a member of this group
+ * in order to be allowed in.
+ * require dn <dn> The user must have the following DN in the
+ * LDAP tree to be let in.
+ *
+ */
+int mod_auth_ldap_auth_checker(request_rec *r)
+{
+ int result = 0;
+ mod_auth_ldap_request_t *req =
+ (mod_auth_ldap_request_t *)ap_get_module_config(r->request_config,
+ &auth_ldap_module);
+ mod_auth_ldap_config_t *sec =
+ (mod_auth_ldap_config_t *)ap_get_module_config(r->per_dir_config,
+ &auth_ldap_module);
+
+ util_ldap_connection_t *ldc = NULL;
+ int m = r->method_number;
+
+ const apr_array_header_t *reqs_arr = ap_requires(r);
+ require_line *reqs = reqs_arr ? (require_line *)reqs_arr->elts : NULL;
+
+ register int x;
+ const char *t;
+ char *w, *value;
+ int method_restricted = 0;
+
+ if (!sec->enabled) {
+ return DECLINED;
+ }
+
+ if (!sec->have_ldap_url) {
+ return DECLINED;
+ }
+
+ /*
+ * It is possible that we've skipped mod_auth_ldap's
+ * check_user_id hook, but still get here. In that
+ * case, the req request_config struct hasn't been initialized
+ * causing problems when we try to use req->dn and/or req->name
+ * below. So we simply create one.
+ *
+ * Unlike 2.2, we don't try to search or populate it.
+ */
+ if (!req) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "no req struct - skipped mod_auth_ldap_check_user_id?",
+ getpid());
+
+ req = (mod_auth_ldap_request_t *)apr_pcalloc(r->pool,
+ sizeof(mod_auth_ldap_request_t));
+ ap_set_module_config(r->request_config, &auth_ldap_module, req);
+ }
+
+ if (sec->host) {
+ ldc = util_ldap_connection_find(r, sec->host, sec->port,
+ sec->binddn, sec->bindpw, sec->deref,
+ sec->secure);
+ apr_pool_cleanup_register(r->pool, ldc,
+ mod_auth_ldap_cleanup_connection_close,
+ apr_pool_cleanup_null);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: no sec->host - weird...?", getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+
+ /*
+ * If there are no elements in the group attribute array, the default should be
+ * member and uniquemember; populate the array now.
+ */
+ if (sec->groupattr->nelts == 0) {
+ struct mod_auth_ldap_groupattr_entry_t *grp;
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(sec->lock);
+#endif
+ grp = apr_array_push(sec->groupattr);
+ grp->name = "member";
+ grp = apr_array_push(sec->groupattr);
+ grp->name = "uniquemember";
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(sec->lock);
+#endif
+ }
+
+ if (!reqs_arr) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: no requirements array", getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+
+ /* Loop through the requirements array until there's no elements
+ * left, or something causes a return from inside the loop */
+ for(x=0; x < reqs_arr->nelts; x++) {
+ if (! (reqs[x].method_mask & (1 << m))) {
+ continue;
+ }
+ method_restricted = 1;
+
+ t = reqs[x].requirement;
+ w = ap_getword_white(r->pool, &t);
+
+ if (strcmp(w, "valid-user") == 0) {
+ /*
+ * Valid user will always be true if we authenticated with ldap,
+ * but when using front page, valid user should only be true if
+ * he exists in the frontpage password file. This hack will get
+ * auth_ldap to look up the user in the the pw file to really be
+ * sure that he's valid. Naturally, it requires mod_auth to be
+ * compiled in, but if mod_auth wasn't in there, then the need
+ * for this hack wouldn't exist anyway.
+ */
+ if (sec->frontpage_hack) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "deferring authorisation to mod_auth (FP Hack)",
+ getpid());
+ return OK;
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "successful authorisation because user "
+ "is valid-user", getpid());
+ return OK;
+ }
+ }
+ else if (strcmp(w, "user") == 0) {
+ if (req->dn == NULL || strlen(req->dn) == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require user: user's DN has not been defined; failing authorisation",
+ getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+ /*
+ * First do a whole-line compare, in case it's something like
+ * require user Babs Jensen
+ */
+ result = util_ldap_cache_compare(r, ldc, sec->url, req->dn, sec->attribute, t);
+ switch(result) {
+ case LDAP_COMPARE_TRUE: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require user: authorisation successful", getpid());
+ return OK;
+ }
+ default: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: require user: "
+ "authorisation failed [%s][%s]", getpid(),
+ ldc->reason, ldap_err2string(result));
+ }
+ }
+ /*
+ * Now break apart the line and compare each word on it
+ */
+ while (t[0]) {
+ w = ap_getword_conf(r->pool, &t);
+ result = util_ldap_cache_compare(r, ldc, sec->url, req->dn, sec->attribute, w);
+ switch(result) {
+ case LDAP_COMPARE_TRUE: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require user: authorisation successful", getpid());
+ return OK;
+ }
+ default: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require user: authorisation failed [%s][%s]",
+ getpid(), ldc->reason, ldap_err2string(result));
+ }
+ }
+ }
+ }
+ else if (strcmp(w, "dn") == 0) {
+ if (req->dn == NULL || strlen(req->dn) == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require dn: user's DN has not been defined; failing authorisation",
+ getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+
+ result = util_ldap_cache_comparedn(r, ldc, sec->url, req->dn, t, sec->compare_dn_on_server);
+ switch(result) {
+ case LDAP_COMPARE_TRUE: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require dn: authorisation successful", getpid());
+ return OK;
+ }
+ default: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require dn \"%s\": LDAP error [%s][%s]",
+ getpid(), t, ldc->reason, ldap_err2string(result));
+ }
+ }
+ }
+ else if (strcmp(w, "group") == 0) {
+ struct mod_auth_ldap_groupattr_entry_t *ent = (struct mod_auth_ldap_groupattr_entry_t *) sec->groupattr->elts;
+ int i;
+
+ if (sec->group_attrib_is_dn) {
+ if (req->dn == NULL || strlen(req->dn) == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: require group: user's DN has not been defined; failing authorisation",
+ getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+ }
+ else {
+ if (req->user == NULL || strlen(req->user) == 0) {
+ /* We weren't called in the authentication phase, so we didn't have a
+ * chance to set the user field. Do so now. */
+ req->user = r->user;
+ }
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: require group: testing for group membership in \"%s\"",
+ getpid(), t);
+
+ for (i = 0; i < sec->groupattr->nelts; i++) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: require group: testing for %s: %s (%s)", getpid(),
+ ent[i].name, sec->group_attrib_is_dn ? req->dn : req->user, t);
+
+ result = util_ldap_cache_compare(r, ldc, sec->url, t, ent[i].name,
+ sec->group_attrib_is_dn ? req->dn : req->user);
+ switch(result) {
+ case LDAP_COMPARE_TRUE: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: require group: "
+ "authorisation successful (attribute %s) [%s][%s]",
+ getpid(), ent[i].name, ldc->reason, ldap_err2string(result));
+ return OK;
+ }
+ default: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: require group \"%s\": "
+ "authorisation failed [%s][%s]",
+ getpid(), t, ldc->reason, ldap_err2string(result));
+ }
+ }
+ }
+ }
+ else if (strcmp(w, "ldap-attribute") == 0) {
+ if (req->dn == NULL || strlen(req->dn) == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: "
+ "require ldap-attribute: user's DN has not been defined; failing authorisation",
+ getpid());
+ return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
+ }
+ while (t[0]) {
+ w = ap_getword(r->pool, &t, '=');
+ value = ap_getword_conf(r->pool, &t);
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: checking attribute"
+ " %s has value %s", getpid(), w, value);
+ result = util_ldap_cache_compare(r, ldc, sec->url, req->dn,
+ w, value);
+ switch(result) {
+ case LDAP_COMPARE_TRUE: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO,
+ 0, r, "[%d] auth_ldap authorise: "
+ "require attribute: authorisation "
+ "successful", getpid());
+ return OK;
+ }
+ default: {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO,
+ 0, r, "[%d] auth_ldap authorise: "
+ "require attribute: authorisation "
+ "failed [%s][%s]", getpid(),
+ ldc->reason, ldap_err2string(result));
+ }
+ }
+ }
+ }
+ }
+
+ if (!method_restricted) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: agreeing because non-restricted",
+ getpid());
+ return OK;
+ }
+
+ if (!sec->auth_authoritative) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: declining to authorise", getpid());
+ return DECLINED;
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r,
+ "[%d] auth_ldap authorise: authorisation denied", getpid());
+ ap_note_basic_auth_failure (r);
+
+ return HTTP_UNAUTHORIZED;
+}
+
+
+/* ---------------------------------------- */
+/* config directives */
+
+
+void *mod_auth_ldap_create_dir_config(apr_pool_t *p, char *d)
+{
+ mod_auth_ldap_config_t *sec =
+ (mod_auth_ldap_config_t *)apr_pcalloc(p, sizeof(mod_auth_ldap_config_t));
+
+ sec->pool = p;
+#if APR_HAS_THREADS
+ apr_thread_mutex_create(&sec->lock, APR_THREAD_MUTEX_DEFAULT, p);
+#endif
+ sec->auth_authoritative = 1;
+ sec->enabled = 1;
+ sec->groupattr = apr_array_make(p, GROUPATTR_MAX_ELTS,
+ sizeof(struct mod_auth_ldap_groupattr_entry_t));
+
+ sec->have_ldap_url = 0;
+ sec->url = "";
+ sec->host = NULL;
+ sec->binddn = NULL;
+ sec->bindpw = NULL;
+ sec->deref = always;
+ sec->group_attrib_is_dn = 1;
+
+ sec->frontpage_hack = 0;
+ sec->secure = 0;
+
+ sec->user_is_dn = 0;
+ sec->compare_dn_on_server = 0;
+
+ return sec;
+}
+
+/*
+ * Use the ldap url parsing routines to break up the ldap url into
+ * host and port.
+ */
+static const char *mod_auth_ldap_parse_url(cmd_parms *cmd,
+ void *config,
+ const char *url)
+{
+ int result;
+ apr_ldap_url_desc_t *urld;
+
+ mod_auth_ldap_config_t *sec = config;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: `%s'",
+ getpid(), url);
+
+ result = apr_ldap_url_parse(url, &(urld));
+ if (result != LDAP_SUCCESS) {
+ switch (result) {
+ case LDAP_URL_ERR_NOTLDAP:
+ return "LDAP URL does not begin with ldap://";
+ case LDAP_URL_ERR_NODN:
+ return "LDAP URL does not have a DN";
+ case LDAP_URL_ERR_BADSCOPE:
+ return "LDAP URL has an invalid scope";
+ case LDAP_URL_ERR_MEM:
+ return "Out of memory parsing LDAP URL";
+ default:
+ return "Could not parse LDAP URL";
+ }
+ }
+ sec->url = apr_pstrdup(cmd->pool, url);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: Host: %s", getpid(), urld->lud_host);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: Port: %d", getpid(), urld->lud_port);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: DN: %s", getpid(), urld->lud_dn);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: attrib: %s", getpid(), urld->lud_attrs? urld->lud_attrs[0] : "(null)");
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: scope: %s", getpid(),
+ (urld->lud_scope == LDAP_SCOPE_SUBTREE? "subtree" :
+ urld->lud_scope == LDAP_SCOPE_BASE? "base" :
+ urld->lud_scope == LDAP_SCOPE_ONELEVEL? "onelevel" : "unknown"));
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0,
+ cmd->server, "[%d] auth_ldap url parse: filter: %s", getpid(), urld->lud_filter);
+
+ /* Set all the values, or at least some sane defaults */
+ if (sec->host) {
+ char *p = apr_palloc(cmd->pool, strlen(sec->host) + strlen(urld->lud_host) + 2);
+ strcpy(p, urld->lud_host);
+ strcat(p, " ");
+ strcat(p, sec->host);
+ sec->host = p;
+ }
+ else {
+ sec->host = urld->lud_host? apr_pstrdup(cmd->pool, urld->lud_host) : "localhost";
+ }
+ sec->basedn = urld->lud_dn? apr_pstrdup(cmd->pool, urld->lud_dn) : "";
+ if (urld->lud_attrs && urld->lud_attrs[0]) {
+ int i = 1;
+ while (urld->lud_attrs[i]) {
+ i++;
+ }
+ sec->attributes = apr_pcalloc(cmd->pool, sizeof(char *) * (i+1));
+ i = 0;
+ while (urld->lud_attrs[i]) {
+ sec->attributes[i] = apr_pstrdup(cmd->pool, urld->lud_attrs[i]);
+ i++;
+ }
+ sec->attribute = sec->attributes[0];
+ }
+ else {
+ sec->attribute = "uid";
+ }
+
+ sec->scope = urld->lud_scope == LDAP_SCOPE_ONELEVEL ?
+ LDAP_SCOPE_ONELEVEL : LDAP_SCOPE_SUBTREE;
+
+ if (urld->lud_filter) {
+ if (urld->lud_filter[0] == '(') {
+ /*
+ * Get rid of the surrounding parens; later on when generating the
+ * filter, they'll be put back.
+ */
+ sec->filter = apr_pstrdup(cmd->pool, urld->lud_filter+1);
+ sec->filter[strlen(sec->filter)-1] = '\0';
+ }
+ else {
+ sec->filter = apr_pstrdup(cmd->pool, urld->lud_filter);
+ }
+ }
+ else {
+ sec->filter = "objectclass=*";
+ }
+
+ /* "ldaps" indicates secure ldap connections desired
+ */
+ if (strncasecmp(url, "ldaps", 5) == 0)
+ {
+ sec->secure = 1;
+ sec->port = urld->lud_port? urld->lud_port : LDAPS_PORT;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "LDAP: auth_ldap using SSL connections");
+ }
+ else
+ {
+ sec->secure = 0;
+ sec->port = urld->lud_port? urld->lud_port : LDAP_PORT;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server,
+ "LDAP: auth_ldap not using SSL connections");
+ }
+
+ sec->have_ldap_url = 1;
+ apr_ldap_free_urldesc(urld);
+ return NULL;
+}
+
+static const char *mod_auth_ldap_set_deref(cmd_parms *cmd, void *config, const char *arg)
+{
+ mod_auth_ldap_config_t *sec = config;
+
+ if (strcmp(arg, "never") == 0 || strcasecmp(arg, "off") == 0) {
+ sec->deref = never;
+ }
+ else if (strcmp(arg, "searching") == 0) {
+ sec->deref = searching;
+ }
+ else if (strcmp(arg, "finding") == 0) {
+ sec->deref = finding;
+ }
+ else if (strcmp(arg, "always") == 0 || strcasecmp(arg, "on") == 0) {
+ sec->deref = always;
+ }
+ else {
+ return "Unrecognized value for AuthLDAPAliasDereference directive";
+ }
+ return NULL;
+}
+
+static const char *mod_auth_ldap_add_group_attribute(cmd_parms *cmd, void *config, const char *arg)
+{
+ struct mod_auth_ldap_groupattr_entry_t *new;
+
+ mod_auth_ldap_config_t *sec = config;
+
+ if (sec->groupattr->nelts > GROUPATTR_MAX_ELTS)
+ return "Too many AuthLDAPGroupAttribute directives";
+
+ new = apr_array_push(sec->groupattr);
+ new->name = apr_pstrdup(cmd->pool, arg);
+
+ return NULL;
+}
+
+static const char *set_charset_config(cmd_parms *cmd, void *config, const char *arg)
+{
+ ap_set_module_config(cmd->server->module_config, &auth_ldap_module,
+ (void *)arg);
+ return NULL;
+}
+
+
+command_rec mod_auth_ldap_cmds[] = {
+ AP_INIT_TAKE1("AuthLDAPURL", mod_auth_ldap_parse_url, NULL, OR_AUTHCFG,
+ "URL to define LDAP connection. This should be an RFC 2255 complaint\n"
+ "URL of the form ldap://host[:port]/basedn[?attrib[?scope[?filter]]].\n"
+ "<ul>\n"
+ "<li>Host is the name of the LDAP server. Use a space separated list of hosts \n"
+ "to specify redundant servers.\n"
+ "<li>Port is optional, and specifies the port to connect to.\n"
+ "<li>basedn specifies the base DN to start searches from\n"
+ "<li>Attrib specifies what attribute to search for in the directory. If not "
+ "provided, it defaults to <b>uid</b>.\n"
+ "<li>Scope is the scope of the search, and can be either <b>sub</b> or "
+ "<b>one</b>. If not provided, the default is <b>sub</b>.\n"
+ "<li>Filter is a filter to use in the search. If not provided, "
+ "defaults to <b>(objectClass=*)</b>.\n"
+ "</ul>\n"
+ "Searches are performed using the attribute and the filter combined. "
+ "For example, assume that the\n"
+ "LDAP URL is <b>ldap://ldap.airius.com/ou=People, o=Airius?uid?sub?(posixid=*)</b>. "
+ "Searches will\n"
+ "be done using the filter <b>(&((posixid=*))(uid=<i>username</i>))</b>, "
+ "where <i>username</i>\n"
+ "is the user name passed by the HTTP client. The search will be a subtree "
+ "search on the branch <b>ou=People, o=Airius</b>."),
+
+ AP_INIT_TAKE1("AuthLDAPBindDN", ap_set_string_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, binddn), OR_AUTHCFG,
+ "DN to use to bind to LDAP server. If not provided, will do an anonymous bind."),
+
+ AP_INIT_TAKE1("AuthLDAPBindPassword", ap_set_string_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, bindpw), OR_AUTHCFG,
+ "Password to use to bind to LDAP server. If not provided, will do an anonymous bind."),
+
+ AP_INIT_FLAG("AuthLDAPRemoteUserIsDN", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, user_is_dn), OR_AUTHCFG,
+ "Set to 'on' to set the REMOTE_USER environment variable to be the full "
+ "DN of the remote user. By default, this is set to off, meaning that "
+ "the REMOTE_USER variable will contain whatever value the remote user sent."),
+
+ AP_INIT_FLAG("AuthLDAPAuthoritative", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, auth_authoritative), OR_AUTHCFG,
+ "Set to 'off' to allow access control to be passed along to lower modules if "
+ "the UserID and/or group is not known to this module"),
+
+ AP_INIT_FLAG("AuthLDAPCompareDNOnServer", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, compare_dn_on_server), OR_AUTHCFG,
+ "Set to 'on' to force auth_ldap to do DN compares (for the \"require dn\" "
+ "directive) using the server, and set it 'off' to do the compares locally "
+ "(at the expense of possible false matches). See the documentation for "
+ "a complete description of this option."),
+
+ AP_INIT_ITERATE("AuthLDAPGroupAttribute", mod_auth_ldap_add_group_attribute, NULL, OR_AUTHCFG,
+ "A list of attributes used to define group membership - defaults to "
+ "member and uniquemember"),
+
+ AP_INIT_FLAG("AuthLDAPGroupAttributeIsDN", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, group_attrib_is_dn), OR_AUTHCFG,
+ "If set to 'on', auth_ldap uses the DN that is retrieved from the server for"
+ "subsequent group comparisons. If set to 'off', auth_ldap uses the string"
+ "provided by the client directly. Defaults to 'on'."),
+
+ AP_INIT_TAKE1("AuthLDAPDereferenceAliases", mod_auth_ldap_set_deref, NULL, OR_AUTHCFG,
+ "Determines how aliases are handled during a search. Can bo one of the"
+ "values \"never\", \"searching\", \"finding\", or \"always\". "
+ "Defaults to always."),
+
+ AP_INIT_FLAG("AuthLDAPEnabled", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, enabled), OR_AUTHCFG,
+ "Set to off to disable auth_ldap, even if it's been enabled in a higher tree"),
+
+ AP_INIT_FLAG("AuthLDAPFrontPageHack", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mod_auth_ldap_config_t, frontpage_hack), OR_AUTHCFG,
+ "Set to 'on' to support Microsoft FrontPage"),
+
+ AP_INIT_TAKE1("AuthLDAPCharsetConfig", set_charset_config, NULL, RSRC_CONF,
+ "Character set conversion configuration file. If omitted, character set"
+ "conversion is disabled."),
+
+ {NULL}
+};
+
+static int auth_ldap_post_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s)
+{
+ ap_configfile_t *f;
+ char l[MAX_STRING_LEN];
+ const char *charset_confname = ap_get_module_config(s->module_config,
+ &auth_ldap_module);
+ apr_status_t status;
+
+ /*
+ mod_auth_ldap_config_t *sec = (mod_auth_ldap_config_t *)
+ ap_get_module_config(s->module_config,
+ &auth_ldap_module);
+
+ if (sec->secure)
+ {
+ if (!util_ldap_ssl_supported(s))
+ {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "LDAP: SSL connections (ldaps://) not supported by utilLDAP");
+ return(!OK);
+ }
+ }
+ */
+
+ /* make sure that mod_ldap (util_ldap) is loaded */
+ if (ap_find_linked_module("util_ldap.c") == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR|APLOG_NOERRNO, 0, s,
+ "Module mod_ldap missing. Mod_ldap (aka. util_ldap) "
+ "must be loaded in order for mod_auth_ldap to function properly");
+ return HTTP_INTERNAL_SERVER_ERROR;
+
+ }
+
+ if (!charset_confname) {
+ return OK;
+ }
+
+ charset_confname = ap_server_root_relative(p, charset_confname);
+ if (!charset_confname) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, APR_EBADPATH, s,
+ "Invalid charset conversion config path %s",
+ (const char *)ap_get_module_config(s->module_config,
+ &auth_ldap_module));
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ if ((status = ap_pcfg_openfile(&f, ptemp, charset_confname))
+ != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, s,
+ "could not open charset conversion config file %s.",
+ charset_confname);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ charset_conversions = apr_hash_make(p);
+
+ while (!(ap_cfg_getline(l, MAX_STRING_LEN, f))) {
+ const char *ll = l;
+ char *lang;
+
+ if (l[0] == '#') {
+ continue;
+ }
+ lang = ap_getword_conf(p, &ll);
+ ap_str_tolower(lang);
+
+ if (ll[0]) {
+ char *charset = ap_getword_conf(p, &ll);
+ apr_hash_set(charset_conversions, lang, APR_HASH_KEY_STRING, charset);
+ }
+ }
+ ap_cfg_closefile(f);
+
+ to_charset = derive_codepage_from_lang (p, "utf-8");
+ if (to_charset == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, s,
+ "could not find the UTF-8 charset in the file %s.",
+ charset_confname);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ return OK;
+}
+
+static void mod_auth_ldap_register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(auth_ldap_post_config,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_check_user_id(mod_auth_ldap_check_user_id, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_auth_checker(mod_auth_ldap_auth_checker, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module auth_ldap_module = {
+ STANDARD20_MODULE_STUFF,
+ mod_auth_ldap_create_dir_config, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ mod_auth_ldap_cmds, /* command table */
+ mod_auth_ldap_register_hooks, /* set up request processing hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.def b/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.def
new file mode 100644
index 00000000..599636fb
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.def
@@ -0,0 +1,6 @@
+IMPORT util_ldap_connection_find
+IMPORT util_ldap_connection_close
+IMPORT util_ldap_cache_checkuserid
+IMPORT util_ldap_cache_compare
+IMPORT util_ldap_cache_comparedn
+EXPORT auth_ldap_module
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.dsp
new file mode 100644
index 00000000..f26a31a0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_auth_ldap.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_auth_ldap" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_auth_ldap - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_auth_ldap.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_auth_ldap.mak" CFG="mod_auth_ldap - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_auth_ldap - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_auth_ldap - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_auth_ldap - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_auth_ldap_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_auth_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_ldap.so
+# ADD LINK32 kernel32.lib wldap32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_auth_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_ldap.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_auth_ldap - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_auth_ldap_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_auth_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_ldap.so
+# ADD LINK32 kernel32.lib wldap32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_auth_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,mod_auth_ldap.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_auth_ldap - Win32 Release"
+# Name "mod_auth_ldap - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_auth_ldap.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_auth_ldap.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_auth_ldap - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_auth_ldap.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_auth_ldap.so "auth_ldap_module for Apache" ../../include/ap_release.h > .\mod_auth_ldap.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_auth_ldap - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_auth_ldap.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_auth_ldap.so "auth_ldap_module for Apache" ../../include/ap_release.h > .\mod_auth_ldap.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.c
new file mode 100644
index 00000000..a208a510
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.c
@@ -0,0 +1,1006 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define CORE_PRIVATE
+
+#include "mod_cache.h"
+
+module AP_MODULE_DECLARE_DATA cache_module;
+APR_OPTIONAL_FN_TYPE(ap_cache_generate_key) *cache_generate_key;
+
+/* -------------------------------------------------------------- */
+
+
+/* Handles for cache filters, resolved at startup to eliminate
+ * a name-to-function mapping on each request
+ */
+static ap_filter_rec_t *cache_save_filter_handle;
+static ap_filter_rec_t *cache_out_filter_handle;
+
+/*
+ * CACHE handler
+ * -------------
+ *
+ * Can we deliver this request from the cache?
+ * If yes:
+ * deliver the content by installing the CACHE_OUT filter.
+ * If no:
+ * check whether we're allowed to try cache it
+ * If yes:
+ * add CACHE_SAVE filter
+ * If No:
+ * oh well.
+ */
+
+static int cache_url_handler(request_rec *r, int lookup)
+{
+ apr_status_t rv;
+ const char *pragma, *auth;
+ apr_uri_t uri;
+ char *url;
+ char *path;
+ cache_provider_list *providers;
+ cache_info *info;
+ cache_request_rec *cache;
+ cache_server_conf *conf;
+ apr_bucket_brigade *out;
+
+ /* Delay initialization until we know we are handling a GET */
+ if (r->method_number != M_GET) {
+ return DECLINED;
+ }
+
+ uri = r->parsed_uri;
+ url = r->unparsed_uri;
+ path = uri.path;
+ info = NULL;
+
+ conf = (cache_server_conf *) ap_get_module_config(r->server->module_config,
+ &cache_module);
+
+ /*
+ * Which cache module (if any) should handle this request?
+ */
+ if (!(providers = ap_cache_get_providers(r, conf, path))) {
+ return DECLINED;
+ }
+
+ /* make space for the per request config */
+ cache = (cache_request_rec *) ap_get_module_config(r->request_config,
+ &cache_module);
+ if (!cache) {
+ cache = apr_pcalloc(r->pool, sizeof(cache_request_rec));
+ ap_set_module_config(r->request_config, &cache_module, cache);
+ }
+
+ /* save away the possible providers */
+ cache->providers = providers;
+
+ /*
+ * Are we allowed to serve cached info at all?
+ */
+
+ /* find certain cache controlling headers */
+ pragma = apr_table_get(r->headers_in, "Pragma");
+ auth = apr_table_get(r->headers_in, "Authorization");
+
+ /* first things first - does the request allow us to return
+ * cached information at all? If not, just decline the request.
+ *
+ * Note that there is a big difference between not being allowed
+ * to cache a request (no-store) and not being allowed to return
+ * a cached request without revalidation (max-age=0).
+ *
+ * Caching is forbidden under the following circumstances:
+ *
+ * - RFC2616 14.9.2 Cache-Control: no-store
+ * - Pragma: no-cache
+ * - Any requests requiring authorization.
+ */
+ if (conf->ignorecachecontrol == 1 && auth == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "incoming request is asking for a uncached version of "
+ "%s, but we know better and are ignoring it", url);
+ }
+ else {
+ if (ap_cache_liststr(NULL, pragma, "no-cache", NULL) ||
+ auth != NULL) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache: no-cache or authorization forbids caching "
+ "of %s", url);
+ return DECLINED;
+ }
+ }
+
+ /*
+ * Try to serve this request from the cache.
+ *
+ * If no existing cache file (DECLINED)
+ * add cache_save filter
+ * If cached file (OK)
+ * clear filter stack
+ * add cache_out filter
+ * return OK
+ */
+ rv = cache_select_url(r, url);
+ if (rv != OK) {
+ if (rv == DECLINED) {
+ if (!lookup) {
+ /* add cache_save filter to cache this request */
+ ap_add_output_filter_handle(cache_save_filter_handle, NULL, r,
+ r->connection);
+ }
+ }
+ else {
+ /* error */
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "cache: error returned while checking for cached "
+ "file by %s cache", cache->provider_name);
+ }
+ return DECLINED;
+ }
+
+ /* We have located a suitable cache file now. */
+ info = &(cache->handle->cache_obj->info);
+
+ if (info && info->lastmod) {
+ ap_update_mtime(r, info->lastmod);
+ }
+
+ rv = ap_meets_conditions(r);
+ if (rv != OK) {
+ /* Return cached status. */
+ return rv;
+ }
+
+ /* If we're a lookup, we can exit now instead of serving the content. */
+ if (lookup) {
+ return OK;
+ }
+
+ /* Serve up the content */
+
+ /* We are in the quick handler hook, which means that no output
+ * filters have been set. So lets run the insert_filter hook.
+ */
+ ap_run_insert_filter(r);
+ ap_add_output_filter_handle(cache_out_filter_handle, NULL,
+ r, r->connection);
+
+ /* kick off the filter stack */
+ out = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ rv = ap_pass_brigade(r->output_filters, out);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "cache: error returned while trying to return %s "
+ "cached data",
+ cache->provider_name);
+ return rv;
+ }
+
+ return OK;
+}
+
+/*
+ * CACHE_OUT filter
+ * ----------------
+ *
+ * Deliver cached content (headers and body) up the stack.
+ */
+static int cache_out_filter(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ request_rec *r = f->r;
+ cache_request_rec *cache;
+
+ cache = (cache_request_rec *) ap_get_module_config(r->request_config,
+ &cache_module);
+
+ if (!cache) {
+ /* user likely configured CACHE_OUT manually; they should use mod_cache
+ * configuration to do that */
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "CACHE_OUT enabled unexpectedly");
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
+ "cache: running CACHE_OUT filter");
+
+ /* restore status of cached response */
+ r->status = cache->handle->status;
+
+ /* recall_headers() was called in cache_select_url() */
+ cache->provider->recall_body(cache->handle, r->pool, bb);
+
+ /* This filter is done once it has served up its content */
+ ap_remove_output_filter(f);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
+ "cache: serving %s", r->uri);
+ return ap_pass_brigade(f->next, bb);
+}
+
+
+/*
+ * CACHE_SAVE filter
+ * ---------------
+ *
+ * Decide whether or not this content should be cached.
+ * If we decide no it should not:
+ * remove the filter from the chain
+ * If we decide yes it should:
+ * Have we already started saving the response?
+ * If we have started, pass the data to the storage manager via store_body
+ * Otherwise:
+ * Check to see if we *can* save this particular response.
+ * If we can, call cache_create_entity() and save the headers and body
+ * Finally, pass the data to the next filter (the network or whatever)
+ */
+
+static int cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in)
+{
+ int rv;
+ int date_in_errhdr = 0;
+ request_rec *r = f->r;
+ cache_request_rec *cache;
+ cache_server_conf *conf;
+ char *url = r->unparsed_uri;
+ const char *cc_in, *cc_out, *cl, *vary_out;
+ const char *exps, *lastmods, *dates, *etag;
+ apr_time_t exp, date, lastmod, now;
+ apr_off_t size;
+ cache_info *info;
+ char *reason;
+ apr_pool_t *p;
+
+ /* check first whether running this filter has any point or not */
+ /* If the user has Cache-Control: no-store from RFC 2616, don't store! */
+ cc_in = apr_table_get(r->headers_in, "Cache-Control");
+ vary_out = apr_table_get(r->headers_out, "Vary");
+ if (r->no_cache || ap_cache_liststr(NULL, cc_in, "no-store", NULL) ||
+ ap_cache_liststr(NULL, vary_out, "*", NULL)) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, in);
+ }
+
+ /* Setup cache_request_rec */
+ cache = (cache_request_rec *) ap_get_module_config(r->request_config,
+ &cache_module);
+ if (!cache) {
+ /* user likely configured CACHE_SAVE manually; they should really use
+ * mod_cache configuration to do that
+ */
+ cache = apr_pcalloc(r->pool, sizeof(cache_request_rec));
+ ap_set_module_config(r->request_config, &cache_module, cache);
+ }
+
+ reason = NULL;
+ p = r->pool;
+ /*
+ * Pass Data to Cache
+ * ------------------
+ * This section passes the brigades into the cache modules, but only
+ * if the setup section (see below) is complete.
+ */
+ if (cache->block_response) {
+ /* We've already sent down the response and EOS. So, ignore
+ * whatever comes now.
+ */
+ return APR_SUCCESS;
+ }
+
+ /* have we already run the cachability check and set up the
+ * cached file handle?
+ */
+ if (cache->in_checked) {
+ /* pass the brigades into the cache, then pass them
+ * up the filter stack
+ */
+ rv = cache->provider->store_body(cache->handle, r, in);
+ if (rv != APR_SUCCESS) {
+ ap_remove_output_filter(f);
+ }
+ return ap_pass_brigade(f->next, in);
+ }
+
+ /*
+ * Setup Data in Cache
+ * -------------------
+ * This section opens the cache entity and sets various caching
+ * parameters, and decides whether this URL should be cached at
+ * all. This section is* run before the above section.
+ */
+
+ /* read expiry date; if a bad date, then leave it so the client can
+ * read it
+ */
+ exps = apr_table_get(r->err_headers_out, "Expires");
+ if (exps == NULL) {
+ exps = apr_table_get(r->headers_out, "Expires");
+ }
+ if (exps != NULL) {
+ if (APR_DATE_BAD == (exp = apr_date_parse_http(exps))) {
+ exps = NULL;
+ }
+ }
+ else {
+ exp = APR_DATE_BAD;
+ }
+
+ /* read the last-modified date; if the date is bad, then delete it */
+ lastmods = apr_table_get(r->err_headers_out, "Last-Modified");
+ if (lastmods == NULL) {
+ lastmods = apr_table_get(r->headers_out, "Last-Modified");
+ }
+ if (lastmods != NULL) {
+ if (APR_DATE_BAD == (lastmod = apr_date_parse_http(lastmods))) {
+ lastmods = NULL;
+ }
+ }
+ else {
+ lastmod = APR_DATE_BAD;
+ }
+
+ conf = (cache_server_conf *) ap_get_module_config(r->server->module_config, &cache_module);
+ /* read the etag and cache-control from the entity */
+ etag = apr_table_get(r->err_headers_out, "Etag");
+ if (etag == NULL) {
+ etag = apr_table_get(r->headers_out, "Etag");
+ }
+ cc_out = apr_table_get(r->err_headers_out, "Cache-Control");
+ if (cc_out == NULL) {
+ cc_out = apr_table_get(r->headers_out, "Cache-Control");
+ }
+
+ /*
+ * what responses should we not cache?
+ *
+ * At this point we decide based on the response headers whether it
+ * is appropriate _NOT_ to cache the data from the server. There are
+ * a whole lot of conditions that prevent us from caching this data.
+ * They are tested here one by one to be clear and unambiguous.
+ */
+ if (r->status != HTTP_OK && r->status != HTTP_NON_AUTHORITATIVE
+ && r->status != HTTP_MULTIPLE_CHOICES
+ && r->status != HTTP_MOVED_PERMANENTLY
+ && r->status != HTTP_NOT_MODIFIED) {
+ /* RFC2616 13.4 we are allowed to cache 200, 203, 206, 300, 301 or 410
+ * We don't cache 206, because we don't (yet) cache partial responses.
+ * We include 304 Not Modified here too as this is the origin server
+ * telling us to serve the cached copy.
+ */
+ reason = apr_psprintf(p, "Response status %d", r->status);
+ }
+ else if (exps != NULL && exp == APR_DATE_BAD) {
+ /* if a broken Expires header is present, don't cache it */
+ reason = apr_pstrcat(p, "Broken expires header: ", exps, NULL);
+ }
+ else if (r->args && exps == NULL) {
+ /* if query string present but no expiration time, don't cache it
+ * (RFC 2616/13.9)
+ */
+ reason = "Query string present but no expires header";
+ }
+ else if (r->status == HTTP_NOT_MODIFIED &&
+ !cache->handle && !cache->stale_handle) {
+ /* if the server said 304 Not Modified but we have no cache
+ * file - pass this untouched to the user agent, it's not for us.
+ */
+ reason = "HTTP Status 304 Not Modified";
+ }
+ else if (r->status == HTTP_OK && lastmods == NULL && etag == NULL
+ && (exps == NULL) && (conf->no_last_mod_ignore ==0)) {
+ /* 200 OK response from HTTP/1.0 and up without Last-Modified,
+ * Etag, or Expires headers.
+ */
+ /* Note: mod-include clears last_modified/expires/etags - this
+ * is why we have an optional function for a key-gen ;-)
+ */
+ reason = "No Last-Modified, Etag, or Expires headers";
+ }
+ else if (r->header_only) {
+ /* HEAD requests */
+ reason = "HTTP HEAD request";
+ }
+ else if (ap_cache_liststr(NULL, cc_out, "no-store", NULL)) {
+ /* RFC2616 14.9.2 Cache-Control: no-store response
+ * indicating do not cache, or stop now if you are
+ * trying to cache it */
+ reason = "Cache-Control: no-store present";
+ }
+ else if (ap_cache_liststr(NULL, cc_out, "private", NULL)) {
+ /* RFC2616 14.9.1 Cache-Control: private
+ * this object is marked for this user's eyes only. Behave
+ * as a tunnel.
+ */
+ reason = "Cache-Control: private present";
+ }
+ else if (apr_table_get(r->headers_in, "Authorization") != NULL
+ && !(ap_cache_liststr(NULL, cc_out, "s-maxage", NULL)
+ || ap_cache_liststr(NULL, cc_out, "must-revalidate", NULL)
+ || ap_cache_liststr(NULL, cc_out, "public", NULL))) {
+ /* RFC2616 14.8 Authorisation:
+ * if authorisation is included in the request, we don't cache,
+ * but we can cache if the following exceptions are true:
+ * 1) If Cache-Control: s-maxage is included
+ * 2) If Cache-Control: must-revalidate is included
+ * 3) If Cache-Control: public is included
+ */
+ reason = "Authorization required";
+ }
+ else if (r->no_cache) {
+ /* or we've been asked not to cache it above */
+ reason = "no_cache present";
+ }
+
+ if (reason) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache: %s not cached. Reason: %s", url, reason);
+ /* remove this object from the cache
+ * BillS Asks.. Why do we need to make this call to remove_url?
+ * leave it in for now..
+ */
+ cache_remove_url(r, url);
+
+ /* remove this filter from the chain */
+ ap_remove_output_filter(f);
+
+ /* ship the data up the stack */
+ return ap_pass_brigade(f->next, in);
+ }
+
+ /* Make it so that we don't execute this path again. */
+ cache->in_checked = 1;
+
+ /* Set the content length if known.
+ */
+ cl = apr_table_get(r->err_headers_out, "Content-Length");
+ if (cl == NULL) {
+ cl = apr_table_get(r->headers_out, "Content-Length");
+ }
+ if (cl) {
+#if 0
+ char *errp;
+ if (apr_strtoff(&size, cl, &errp, 10) || *errp || size < 0) {
+ cl = NULL; /* parse error, see next 'if' block */
+ }
+#else
+ size = apr_atoi64(cl);
+ if (size < 0) {
+ cl = NULL;
+ }
+#endif
+ }
+
+ if (!cl) {
+ /* if we don't get the content-length, see if we have all the
+ * buckets and use their length to calculate the size
+ */
+ apr_bucket *e;
+ int all_buckets_here=0;
+ int unresolved_length = 0;
+ size=0;
+ for (e = APR_BRIGADE_FIRST(in);
+ e != APR_BRIGADE_SENTINEL(in);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (APR_BUCKET_IS_EOS(e)) {
+ all_buckets_here=1;
+ break;
+ }
+ if (APR_BUCKET_IS_FLUSH(e)) {
+ unresolved_length = 1;
+ continue;
+ }
+ if (e->length == (apr_size_t)-1) {
+ break;
+ }
+ size += e->length;
+ }
+ if (!all_buckets_here) {
+ size = -1;
+ }
+ }
+
+ /* It's safe to cache the response.
+ *
+ * There are two possiblities at this point:
+ * - cache->handle == NULL. In this case there is no previously
+ * cached entity anywhere on the system. We must create a brand
+ * new entity and store the response in it.
+ * - cache->stale_handle != NULL. In this case there is a stale
+ * entity in the system which needs to be replaced by new
+ * content (unless the result was 304 Not Modified, which means
+ * the cached entity is actually fresh, and we should update
+ * the headers).
+ */
+
+ /* Did we have a stale cache entry that really is stale? */
+ if (cache->stale_handle) {
+ if (r->status == HTTP_NOT_MODIFIED) {
+ /* Oh, hey. It isn't that stale! Yay! */
+ cache->handle = cache->stale_handle;
+ info = &cache->handle->cache_obj->info;
+ }
+ else {
+ /* Oh, well. Toss it. */
+ cache->provider->remove_entity(cache->stale_handle);
+ /* Treat the request as if it wasn't conditional. */
+ cache->stale_handle = NULL;
+ }
+ }
+
+ /* no cache handle, create a new entity */
+ if (!cache->handle) {
+ rv = cache_create_entity(r, url, size);
+ info = apr_pcalloc(r->pool, sizeof(cache_info));
+ /* We only set info->status upon the initial creation. */
+ info->status = r->status;
+ }
+
+ if (rv != OK) {
+ /* Caching layer declined the opportunity to cache the response */
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, in);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache: Caching url: %s", url);
+
+ /*
+ * We now want to update the cache file header information with
+ * the new date, last modified, expire and content length and write
+ * it away to our cache file. First, we determine these values from
+ * the response, using heuristics if appropriate.
+ *
+ * In addition, we make HTTP/1.1 age calculations and write them away
+ * too.
+ */
+
+ /* Read the date. Generate one if one is not supplied */
+ dates = apr_table_get(r->err_headers_out, "Date");
+ if (dates != NULL) {
+ date_in_errhdr = 1;
+ }
+ else {
+ dates = apr_table_get(r->headers_out, "Date");
+ }
+ if (dates != NULL) {
+ info->date = apr_date_parse_http(dates);
+ }
+ else {
+ info->date = APR_DATE_BAD;
+ }
+
+ now = apr_time_now();
+ if (info->date == APR_DATE_BAD) { /* No, or bad date */
+ char *dates;
+ /* no date header (or bad header)! */
+ /* add one; N.B. use the time _now_ rather than when we were checking
+ * the cache
+ */
+ if (date_in_errhdr == 1) {
+ apr_table_unset(r->err_headers_out, "Date");
+ }
+ date = now;
+ dates = apr_pcalloc(r->pool, MAX_STRING_LEN);
+ apr_rfc822_date(dates, now);
+ apr_table_set(r->headers_out, "Date", dates);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache: Added date header");
+ info->date = date;
+ }
+ else {
+ date = info->date;
+ }
+
+ /* set response_time for HTTP/1.1 age calculations */
+ info->response_time = now;
+
+ /* get the request time */
+ info->request_time = r->request_time;
+
+ /* check last-modified date */
+ if (lastmod != APR_DATE_BAD && lastmod > date) {
+ /* if it's in the future, then replace by date */
+ lastmod = date;
+ lastmods = dates;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
+ r->server,
+ "cache: Last modified is in the future, "
+ "replacing with now");
+ }
+ info->lastmod = lastmod;
+
+ /* if no expiry date then
+ * if lastmod
+ * expiry date = date + min((date - lastmod) * factor, maxexpire)
+ * else
+ * expire date = date + defaultexpire
+ */
+ if (exp == APR_DATE_BAD) {
+ /* if lastmod == date then you get 0*conf->factor which results in
+ * an expiration time of now. This causes some problems with
+ * freshness calculations, so we choose the else path...
+ */
+ if ((lastmod != APR_DATE_BAD) && (lastmod < date)) {
+ apr_time_t x = (apr_time_t) ((date - lastmod) * conf->factor);
+
+ if (x > conf->maxex) {
+ x = conf->maxex;
+ }
+ exp = date + x;
+ }
+ else {
+ exp = date + conf->defex;
+ }
+ }
+ info->expire = exp;
+
+ info->content_type = apr_pstrdup(r->pool, r->content_type);
+ info->etag = apr_pstrdup(r->pool, etag);
+ info->lastmods = apr_pstrdup(r->pool, lastmods);
+ info->filename = apr_pstrdup(r->pool, r->filename);
+
+ /*
+ * Write away header information to cache.
+ */
+ rv = cache->provider->store_headers(cache->handle, r, info);
+
+ /* Did we actually find an entity before, but it wasn't really stale? */
+ if (rv == APR_SUCCESS && cache->stale_handle) {
+ apr_bucket_brigade *bb;
+ apr_bucket *bkt;
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+
+ /* Were we initially a conditional request? */
+ if (ap_cache_request_is_conditional(cache->stale_headers)) {
+ /* FIXME: Should we now go and make sure it's really not
+ * modified since what the user thought?
+ */
+ bkt = apr_bucket_eos_create(bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, bkt);
+ }
+ else {
+ r->status = info->status;
+ cache->provider->recall_body(cache->handle, r->pool, bb);
+ }
+
+ cache->block_response = 1;
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ if (rv == APR_SUCCESS) {
+ rv = cache->provider->store_body(cache->handle, r, in);
+ }
+ if (rv != APR_SUCCESS) {
+ ap_remove_output_filter(f);
+ }
+
+ return ap_pass_brigade(f->next, in);
+}
+
+/* -------------------------------------------------------------- */
+/* Setup configurable data */
+
+static void * create_cache_config(apr_pool_t *p, server_rec *s)
+{
+ cache_server_conf *ps = apr_pcalloc(p, sizeof(cache_server_conf));
+
+ /* array of URL prefixes for which caching is enabled */
+ ps->cacheenable = apr_array_make(p, 10, sizeof(struct cache_enable));
+ /* array of URL prefixes for which caching is disabled */
+ ps->cachedisable = apr_array_make(p, 10, sizeof(struct cache_disable));
+ /* maximum time to cache a document */
+ ps->maxex = DEFAULT_CACHE_MAXEXPIRE;
+ ps->maxex_set = 0;
+ /* default time to cache a document */
+ ps->defex = DEFAULT_CACHE_EXPIRE;
+ ps->defex_set = 0;
+ /* factor used to estimate Expires date from LastModified date */
+ ps->factor = DEFAULT_CACHE_LMFACTOR;
+ ps->factor_set = 0;
+ /* default percentage to force cache completion */
+ ps->complete = DEFAULT_CACHE_COMPLETION;
+ ps->complete_set = 0;
+ ps->no_last_mod_ignore_set = 0;
+ ps->no_last_mod_ignore = 0;
+ ps->ignorecachecontrol = 0;
+ ps->ignorecachecontrol_set = 0 ;
+ /* array of headers that should not be stored in cache */
+ ps->ignore_headers = apr_array_make(p, 10, sizeof(char *));
+ ps->ignore_headers_set = CACHE_IGNORE_HEADERS_UNSET;
+ return ps;
+}
+
+static void * merge_cache_config(apr_pool_t *p, void *basev, void *overridesv)
+{
+ cache_server_conf *ps = apr_pcalloc(p, sizeof(cache_server_conf));
+ cache_server_conf *base = (cache_server_conf *) basev;
+ cache_server_conf *overrides = (cache_server_conf *) overridesv;
+
+ /* array of URL prefixes for which caching is disabled */
+ ps->cachedisable = apr_array_append(p,
+ base->cachedisable,
+ overrides->cachedisable);
+ /* array of URL prefixes for which caching is enabled */
+ ps->cacheenable = apr_array_append(p,
+ base->cacheenable,
+ overrides->cacheenable);
+ /* maximum time to cache a document */
+ ps->maxex = (overrides->maxex_set == 0) ? base->maxex : overrides->maxex;
+ /* default time to cache a document */
+ ps->defex = (overrides->defex_set == 0) ? base->defex : overrides->defex;
+ /* factor used to estimate Expires date from LastModified date */
+ ps->factor =
+ (overrides->factor_set == 0) ? base->factor : overrides->factor;
+ /* default percentage to force cache completion */
+ ps->complete =
+ (overrides->complete_set == 0) ? base->complete : overrides->complete;
+
+ ps->no_last_mod_ignore =
+ (overrides->no_last_mod_ignore_set == 0)
+ ? base->no_last_mod_ignore
+ : overrides->no_last_mod_ignore;
+ ps->ignorecachecontrol =
+ (overrides->ignorecachecontrol_set == 0)
+ ? base->ignorecachecontrol
+ : overrides->ignorecachecontrol;
+ ps->ignore_headers =
+ (overrides->ignore_headers_set == CACHE_IGNORE_HEADERS_UNSET)
+ ? base->ignore_headers
+ : overrides->ignore_headers;
+ return ps;
+}
+static const char *set_cache_ignore_no_last_mod(cmd_parms *parms, void *dummy,
+ int flag)
+{
+ cache_server_conf *conf;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ conf->no_last_mod_ignore = flag;
+ conf->no_last_mod_ignore_set = 1;
+ return NULL;
+
+}
+
+static const char *set_cache_ignore_cachecontrol(cmd_parms *parms,
+ void *dummy, int flag)
+{
+ cache_server_conf *conf;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ conf->ignorecachecontrol = flag;
+ conf->ignorecachecontrol_set = 1;
+ return NULL;
+}
+
+static const char *add_ignore_header(cmd_parms *parms, void *dummy,
+ const char *header)
+{
+ cache_server_conf *conf;
+ char **new;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ if (!strncasecmp(header, "None", 4)) {
+ /* if header None is listed clear array */
+ conf->ignore_headers->nelts = 0;
+ }
+ else {
+ if ((conf->ignore_headers_set == CACHE_IGNORE_HEADERS_UNSET) ||
+ (conf->ignore_headers->nelts)) {
+ /* Only add header if no "None" has been found in header list
+ * so far.
+ * (When 'None' is passed, IGNORE_HEADERS_SET && nelts == 0.)
+ */
+ new = (char **)apr_array_push(conf->ignore_headers);
+ (*new) = (char*)header;
+ }
+ }
+ conf->ignore_headers_set = CACHE_IGNORE_HEADERS_SET;
+ return NULL;
+}
+
+static const char *add_cache_enable(cmd_parms *parms, void *dummy,
+ const char *type,
+ const char *url)
+{
+ cache_server_conf *conf;
+ struct cache_enable *new;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ new = apr_array_push(conf->cacheenable);
+ new->type = type;
+ new->url = url;
+ new->urllen = strlen(url);
+ return NULL;
+}
+
+static const char *add_cache_disable(cmd_parms *parms, void *dummy,
+ const char *url)
+{
+ cache_server_conf *conf;
+ struct cache_disable *new;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ new = apr_array_push(conf->cachedisable);
+ new->url = url;
+ new->urllen = strlen(url);
+ return NULL;
+}
+
+static const char *set_cache_maxex(cmd_parms *parms, void *dummy,
+ const char *arg)
+{
+ cache_server_conf *conf;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ conf->maxex = (apr_time_t) (atol(arg) * MSEC_ONE_SEC);
+ conf->maxex_set = 1;
+ return NULL;
+}
+
+static const char *set_cache_defex(cmd_parms *parms, void *dummy,
+ const char *arg)
+{
+ cache_server_conf *conf;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ conf->defex = (apr_time_t) (atol(arg) * MSEC_ONE_SEC);
+ conf->defex_set = 1;
+ return NULL;
+}
+
+static const char *set_cache_factor(cmd_parms *parms, void *dummy,
+ const char *arg)
+{
+ cache_server_conf *conf;
+ double val;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ if (sscanf(arg, "%lg", &val) != 1) {
+ return "CacheLastModifiedFactor value must be a float";
+ }
+ conf->factor = val;
+ conf->factor_set = 1;
+ return NULL;
+}
+
+static const char *set_cache_complete(cmd_parms *parms, void *dummy,
+ const char *arg)
+{
+ cache_server_conf *conf;
+ int val;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ if (sscanf(arg, "%u", &val) != 1) {
+ return "CacheForceCompletion value must be a percentage";
+ }
+ conf->complete = val;
+ conf->complete_set = 1;
+ return NULL;
+}
+
+static int cache_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ /* This is the means by which unusual (non-unix) os's may find alternate
+ * means to run a given command (e.g. shebang/registry parsing on Win32)
+ */
+ cache_generate_key = APR_RETRIEVE_OPTIONAL_FN(ap_cache_generate_key);
+ if (!cache_generate_key) {
+ cache_generate_key = cache_generate_key_default;
+ }
+ return OK;
+}
+
+static const command_rec cache_cmds[] =
+{
+ /* XXX
+ * Consider a new config directive that enables loading specific cache
+ * implememtations (like mod_cache_mem, mod_cache_file, etc.).
+ * Rather than using a LoadModule directive, admin would use something
+ * like CacheModule mem_cache_module | file_cache_module, etc,
+ * which would cause the approprpriate cache module to be loaded.
+ * This is more intuitive that requiring a LoadModule directive.
+ */
+
+ AP_INIT_TAKE2("CacheEnable", add_cache_enable, NULL, RSRC_CONF,
+ "A cache type and partial URL prefix below which "
+ "caching is enabled"),
+ AP_INIT_TAKE1("CacheDisable", add_cache_disable, NULL, RSRC_CONF,
+ "A partial URL prefix below which caching is disabled"),
+ AP_INIT_TAKE1("CacheMaxExpire", set_cache_maxex, NULL, RSRC_CONF,
+ "The maximum time in seconds to cache a document"),
+ AP_INIT_TAKE1("CacheDefaultExpire", set_cache_defex, NULL, RSRC_CONF,
+ "The default time in seconds to cache a document"),
+ AP_INIT_FLAG("CacheIgnoreNoLastMod", set_cache_ignore_no_last_mod, NULL,
+ RSRC_CONF,
+ "Ignore Responses where there is no Last Modified Header"),
+ AP_INIT_FLAG("CacheIgnoreCacheControl", set_cache_ignore_cachecontrol,
+ NULL,
+ RSRC_CONF,
+ "Ignore requests from the client for uncached content"),
+ AP_INIT_ITERATE("CacheIgnoreHeaders", add_ignore_header, NULL, RSRC_CONF,
+ "A space separated list of headers that should not be "
+ "stored by the cache"),
+ AP_INIT_TAKE1("CacheLastModifiedFactor", set_cache_factor, NULL, RSRC_CONF,
+ "The factor used to estimate Expires date from "
+ "LastModified date"),
+ AP_INIT_TAKE1("CacheForceCompletion", set_cache_complete, NULL, RSRC_CONF,
+ "Percentage of download to arrive for the cache to force "
+ "complete transfer"),
+ {NULL}
+};
+
+static void register_hooks(apr_pool_t *p)
+{
+ /* cache initializer */
+ /* cache handler */
+ ap_hook_quick_handler(cache_url_handler, NULL, NULL, APR_HOOK_FIRST);
+ /* cache filters
+ * XXX The cache filters need to run right after the handlers and before
+ * any other filters. Consider creating AP_FTYPE_CACHE for this purpose.
+ * Make them AP_FTYPE_CONTENT for now.
+ * XXX ianhH:they should run AFTER all the other content filters.
+ */
+ cache_save_filter_handle =
+ ap_register_output_filter("CACHE_SAVE",
+ cache_save_filter,
+ NULL,
+ AP_FTYPE_CONTENT_SET-1);
+ /* CACHE_OUT must go into the filter chain before SUBREQ_CORE to
+ * handle subrequsts. Decrementing filter type by 1 ensures this
+ * happens.
+ */
+ cache_out_filter_handle =
+ ap_register_output_filter("CACHE_OUT",
+ cache_out_filter,
+ NULL,
+ AP_FTYPE_CONTENT_SET-1);
+ ap_hook_post_config(cache_post_config, NULL, NULL, APR_HOOK_REALLY_FIRST);
+}
+
+module AP_MODULE_DECLARE_DATA cache_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ create_cache_config, /* create per-server config structure */
+ merge_cache_config, /* merge per-server config structures */
+ cache_cmds, /* command apr_table_t */
+ register_hooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.dsp
new file mode 100644
index 00000000..8fea9982
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.dsp
@@ -0,0 +1,168 @@
+# Microsoft Developer Studio Project File - Name="mod_cache" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_cache - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_cache.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_cache.mak" CFG="mod_cache - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_cache - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_cache - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_cache - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "MOD_CACHE_EXPORTS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "CACHE_DECLARE_EXPORT" /D "MOD_CACHE_EXPORTS" /Fd"Release\mod_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cache.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_cache - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "CACHE_DECLARE_EXPORT" /Fd"Debug\mod_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cache.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_cache - Win32 Release"
+# Name "mod_cache - Win32 Debug"
+# Begin Group "Source Files"
+
+# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90"
+# Begin Source File
+
+SOURCE=.\cache_cache.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_hash.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_pqueue.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_storage.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_cache.c
+# End Source File
+# End Group
+# Begin Group "Header Files"
+
+# PROP Default_Filter "h;hpp;hxx;hm;inl"
+# Begin Source File
+
+SOURCE=.\cache_cache.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_hash.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_pqueue.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_cache.h
+# End Source File
+# End Group
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_cache - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_cache.so "cache_module for Apache" ../../include/ap_release.h > .\mod_cache.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_cache - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_cache.so "cache_module for Apache" ../../include/ap_release.h > .\mod_cache.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.h b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.h
new file mode 100644
index 00000000..62298a50
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.h
@@ -0,0 +1,319 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MOD_CACHE_H
+#define MOD_CACHE_H
+
+/*
+ * Main include file for the Apache Transparent Cache
+ */
+
+#define CORE_PRIVATE
+
+#include "apr_hooks.h"
+#include "apr.h"
+#include "apr_lib.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_md5.h"
+#include "apr_pools.h"
+#include "apr_strings.h"
+#include "apr_optional.h"
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "ap_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_main.h"
+#include "http_log.h"
+#include "http_connection.h"
+#include "util_filter.h"
+#include "apr_date.h"
+#include "apr_uri.h"
+
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+
+#ifdef HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+
+#include "apr_atomic.h"
+
+#ifndef MAX
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#endif
+#ifndef MIN
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#endif
+
+/* default completion is 60% */
+#define DEFAULT_CACHE_COMPLETION (60)
+#define MSEC_ONE_DAY ((apr_time_t)(86400*APR_USEC_PER_SEC)) /* one day, in microseconds */
+#define MSEC_ONE_HR ((apr_time_t)(3600*APR_USEC_PER_SEC)) /* one hour, in microseconds */
+#define MSEC_ONE_MIN ((apr_time_t)(60*APR_USEC_PER_SEC)) /* one minute, in microseconds */
+#define MSEC_ONE_SEC ((apr_time_t)(APR_USEC_PER_SEC)) /* one second, in microseconds */
+#define DEFAULT_CACHE_MAXEXPIRE MSEC_ONE_DAY
+#define DEFAULT_CACHE_EXPIRE MSEC_ONE_HR
+#define DEFAULT_CACHE_LMFACTOR (0.1)
+
+/* Create a set of PROXY_DECLARE(type), PROXY_DECLARE_NONSTD(type) and
+ * PROXY_DECLARE_DATA with appropriate export and import tags for the platform
+ */
+#if !defined(WIN32)
+#define CACHE_DECLARE(type) type
+#define CACHE_DECLARE_NONSTD(type) type
+#define CACHE_DECLARE_DATA
+#elif defined(CACHE_DECLARE_STATIC)
+#define CACHE_DECLARE(type) type __stdcall
+#define CACHE_DECLARE_NONSTD(type) type
+#define CACHE_DECLARE_DATA
+#elif defined(CACHE_DECLARE_EXPORT)
+#define CACHE_DECLARE(type) __declspec(dllexport) type __stdcall
+#define CACHE_DECLARE_NONSTD(type) __declspec(dllexport) type
+#define CACHE_DECLARE_DATA __declspec(dllexport)
+#else
+#define CACHE_DECLARE(type) __declspec(dllimport) type __stdcall
+#define CACHE_DECLARE_NONSTD(type) __declspec(dllimport) type
+#define CACHE_DECLARE_DATA __declspec(dllimport)
+#endif
+
+struct cache_enable {
+ const char *url;
+ const char *type;
+ apr_size_t urllen;
+};
+
+struct cache_disable {
+ const char *url;
+ apr_size_t urllen;
+};
+
+/* static information about the local cache */
+typedef struct {
+ apr_array_header_t *cacheenable; /* URLs to cache */
+ apr_array_header_t *cachedisable; /* URLs not to cache */
+ apr_time_t maxex; /* Maximum time to keep cached files in msecs */
+ int maxex_set;
+ apr_time_t defex; /* default time to keep cached file in msecs */
+ int defex_set;
+ double factor; /* factor for estimating expires date */
+ int factor_set;
+ int complete; /* Force cache completion after this point */
+ int complete_set;
+ /** ignore the last-modified header when deciding to cache this request */
+ int no_last_mod_ignore_set;
+ int no_last_mod_ignore;
+ /** ignore client's requests for uncached responses */
+ int ignorecachecontrol;
+ int ignorecachecontrol_set;
+ /** store the headers that should not be stored in the cache */
+ apr_array_header_t *ignore_headers;
+ /* flag if CacheIgnoreHeader has been set */
+ #define CACHE_IGNORE_HEADERS_SET 1
+ #define CACHE_IGNORE_HEADERS_UNSET 0
+ int ignore_headers_set;
+} cache_server_conf;
+
+/* cache info information */
+typedef struct cache_info cache_info;
+struct cache_info {
+ int status;
+ char *content_type;
+ char *etag;
+ char *lastmods; /* last modified of cache entity */
+ char *filename;
+ apr_time_t date;
+ apr_time_t lastmod;
+ char lastmod_str[APR_RFC822_DATE_LEN];
+ apr_time_t expire;
+ apr_time_t request_time;
+ apr_time_t response_time;
+ apr_size_t len;
+ apr_time_t ims; /* If-Modified_Since header value */
+ apr_time_t ius; /* If-UnModified_Since header value */
+ const char *im; /* If-Match header value */
+ const char *inm; /* If-None-Match header value */
+};
+
+/* cache handle information */
+
+/* XXX TODO On the next structure change/MMN bump,
+ * count must become an apr_off_t, representing
+ * the potential size of disk cached objects.
+ * Then dig for
+ * "XXX Bad Temporary Cast - see cache_object_t notes"
+ */
+typedef struct cache_object cache_object_t;
+struct cache_object {
+ char *key;
+ cache_object_t *next;
+ cache_info info;
+ void *vobj; /* Opaque portion (specific to the cache implementation) of the cache object */
+ apr_size_t count; /* Number of body bytes written to the cache so far */
+ int complete;
+ apr_atomic_t refcount;
+ apr_size_t cleanup;
+};
+
+typedef struct cache_handle cache_handle_t;
+
+#define CACHE_PROVIDER_GROUP "cache"
+
+typedef struct {
+ int (*remove_entity) (cache_handle_t *h);
+ apr_status_t (*store_headers)(cache_handle_t *h, request_rec *r, cache_info *i);
+ apr_status_t (*store_body)(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
+ apr_status_t (*recall_headers) (cache_handle_t *h, request_rec *r);
+ apr_status_t (*recall_body) (cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+ int (*create_entity) (cache_handle_t *h, request_rec *r,
+ const char *urlkey, apr_off_t len);
+ int (*open_entity) (cache_handle_t *h, request_rec *r,
+ const char *urlkey);
+ int (*remove_url) (const char *urlkey);
+} cache_provider;
+
+/* A linked-list of authn providers. */
+typedef struct cache_provider_list cache_provider_list;
+
+struct cache_provider_list {
+ const char *provider_name;
+ const cache_provider *provider;
+ cache_provider_list *next;
+};
+
+struct cache_handle {
+ cache_object_t *cache_obj;
+ apr_table_t *req_hdrs; /* cached request headers */
+ apr_table_t *resp_hdrs; /* cached response headers */
+ apr_table_t *resp_err_hdrs; /* cached response err headers */
+ const char *content_type; /* cached content type */
+ int status; /* cached status */
+};
+
+/* per request cache information */
+typedef struct {
+ cache_provider_list *providers; /* possible cache providers */
+ const cache_provider *provider; /* current cache provider */
+ const char *provider_name; /* current cache provider name */
+ int fresh; /* is the entitey fresh? */
+ cache_handle_t *handle; /* current cache handle */
+ cache_handle_t *stale_handle; /* stale cache handle */
+ apr_table_t *stale_headers; /* original request headers. */
+ int in_checked; /* CACHE_SAVE must cache the entity */
+ int block_response; /* CACHE_SAVE must block response. */
+ apr_bucket_brigade *saved_brigade; /* copy of partial response */
+ apr_off_t saved_size; /* length of saved_brigade */
+ apr_time_t exp; /* expiration */
+ apr_time_t lastmod; /* last-modified time */
+ cache_info *info; /* current cache info */
+} cache_request_rec;
+
+
+/* cache_util.c */
+/* do a HTTP/1.1 age calculation */
+CACHE_DECLARE(apr_time_t) ap_cache_current_age(cache_info *info, const apr_time_t age_value,
+ apr_time_t now);
+
+/**
+ * Check the freshness of the cache object per RFC2616 section 13.2 (Expiration Model)
+ * @param h cache_handle_t
+ * @param r request_rec
+ * @return 0 ==> cache object is stale, 1 ==> cache object is fresh
+ */
+CACHE_DECLARE(int) ap_cache_check_freshness(cache_handle_t *h, request_rec *r);
+CACHE_DECLARE(apr_time_t) ap_cache_hex2usec(const char *x);
+CACHE_DECLARE(void) ap_cache_usec2hex(apr_time_t j, char *y);
+CACHE_DECLARE(char *) generate_name(apr_pool_t *p, int dirlevels,
+ int dirlength,
+ const char *name);
+CACHE_DECLARE(int) ap_cache_request_is_conditional(apr_table_t *table);
+CACHE_DECLARE(cache_provider_list *)ap_cache_get_providers(request_rec *r, cache_server_conf *conf, const char *url);
+CACHE_DECLARE(int) ap_cache_liststr(apr_pool_t *p, const char *list,
+ const char *key, char **val);
+CACHE_DECLARE(const char *)ap_cache_tokstr(apr_pool_t *p, const char *list, const char **str);
+
+/* Create a new table consisting of those elements from a request_rec's
+ * headers_out that are allowed to be stored in a cache
+ */
+CACHE_DECLARE(apr_table_t *)ap_cache_cacheable_hdrs_out(apr_pool_t *pool,
+ apr_table_t *t,
+ server_rec *s);
+
+/**
+ * cache_storage.c
+ */
+int cache_remove_url(request_rec *r, char *url);
+int cache_create_entity(request_rec *r, char *url, apr_off_t size);
+int cache_select_url(request_rec *r, char *url);
+apr_status_t cache_generate_key_default( request_rec *r, apr_pool_t*p, char**key );
+/**
+ * create a key for the cache based on the request record
+ * this is the 'default' version, which can be overridden by a default function
+ */
+const char* cache_create_key( request_rec*r );
+
+/*
+apr_status_t cache_store_entity_headers(cache_handle_t *h, request_rec *r, cache_info *info);
+apr_status_t cache_store_entity_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *bb);
+
+apr_status_t cache_recall_entity_headers(cache_handle_t *h, request_rec *r);
+apr_status_t cache_recall_entity_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+*/
+
+/* hooks */
+
+/* Create a set of CACHE_DECLARE(type), CACHE_DECLARE_NONSTD(type) and
+ * CACHE_DECLARE_DATA with appropriate export and import tags for the platform
+ */
+#if !defined(WIN32)
+#define CACHE_DECLARE(type) type
+#define CACHE_DECLARE_NONSTD(type) type
+#define CACHE_DECLARE_DATA
+#elif defined(CACHE_DECLARE_STATIC)
+#define CACHE_DECLARE(type) type __stdcall
+#define CACHE_DECLARE_NONSTD(type) type
+#define CACHE_DECLARE_DATA
+#elif defined(CACHE_DECLARE_EXPORT)
+#define CACHE_DECLARE(type) __declspec(dllexport) type __stdcall
+#define CACHE_DECLARE_NONSTD(type) __declspec(dllexport) type
+#define CACHE_DECLARE_DATA __declspec(dllexport)
+#else
+#define CACHE_DECLARE(type) __declspec(dllimport) type __stdcall
+#define CACHE_DECLARE_NONSTD(type) __declspec(dllimport) type
+#define CACHE_DECLARE_DATA __declspec(dllimport)
+#endif
+
+APR_DECLARE_OPTIONAL_FN(apr_status_t,
+ ap_cache_generate_key,
+ (request_rec *r, apr_pool_t*p, char**key ));
+
+
+#endif /*MOD_CACHE_H*/
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.imp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.imp
new file mode 100644
index 00000000..6bf4db08
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_cache.imp
@@ -0,0 +1,10 @@
+ (MODCACHE)
+ ap_cache_request_is_conditional,
+ ap_cache_get_providers,
+ ap_cache_liststr,
+ ap_cache_tokstr,
+ ap_cache_hex2usec,
+ ap_cache_usec2hex,
+ ap_cache_cacheable_hdrs_out,
+ generate_name
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter.c
new file mode 100644
index 00000000..657c174b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter.c
@@ -0,0 +1,137 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "apr_buckets.h"
+#include "apr_general.h"
+#include "apr_lib.h"
+#include "util_filter.h"
+#include "http_request.h"
+
+#include <ctype.h>
+
+static const char s_szCaseFilterName[]="CaseFilter";
+module AP_MODULE_DECLARE_DATA case_filter_module;
+
+typedef struct
+ {
+ int bEnabled;
+ } CaseFilterConfig;
+
+static void *CaseFilterCreateServerConfig(apr_pool_t *p,server_rec *s)
+ {
+ CaseFilterConfig *pConfig=apr_pcalloc(p,sizeof *pConfig);
+
+ pConfig->bEnabled=0;
+
+ return pConfig;
+ }
+
+static void CaseFilterInsertFilter(request_rec *r)
+ {
+ CaseFilterConfig *pConfig=ap_get_module_config(r->server->module_config,
+ &case_filter_module);
+
+ if(!pConfig->bEnabled)
+ return;
+
+ ap_add_output_filter(s_szCaseFilterName,NULL,r,r->connection);
+ }
+
+static apr_status_t CaseFilterOutFilter(ap_filter_t *f,
+ apr_bucket_brigade *pbbIn)
+ {
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ apr_bucket *pbktIn;
+ apr_bucket_brigade *pbbOut;
+
+ pbbOut=apr_brigade_create(r->pool, c->bucket_alloc);
+ APR_BRIGADE_FOREACH(pbktIn,pbbIn)
+ {
+ const char *data;
+ apr_size_t len;
+ char *buf;
+ apr_size_t n;
+ apr_bucket *pbktOut;
+
+ if(APR_BUCKET_IS_EOS(pbktIn))
+ {
+ apr_bucket *pbktEOS=apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS);
+ continue;
+ }
+
+ /* read */
+ apr_bucket_read(pbktIn,&data,&len,APR_BLOCK_READ);
+
+ /* write */
+ buf = apr_bucket_alloc(len, c->bucket_alloc);
+ for(n=0 ; n < len ; ++n)
+ buf[n] = apr_toupper(data[n]);
+
+ pbktOut = apr_bucket_heap_create(buf, len, apr_bucket_free,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(pbbOut,pbktOut);
+ }
+
+ /* Q: is there any advantage to passing a brigade for each bucket?
+ * A: obviously, it can cut down server resource consumption, if this
+ * experimental module was fed a file of 4MB, it would be using 8MB for
+ * the 'read' buckets and the 'write' buckets.
+ *
+ * Note it is more efficient to consume (destroy) each bucket as it's
+ * processed above than to do a single cleanup down here. In any case,
+ * don't let our caller pass the same buckets to us, twice;
+ */
+ apr_brigade_cleanup(pbbIn);
+ return ap_pass_brigade(f->next,pbbOut);
+ }
+
+static const char *CaseFilterEnable(cmd_parms *cmd, void *dummy, int arg)
+ {
+ CaseFilterConfig *pConfig=ap_get_module_config(cmd->server->module_config,
+ &case_filter_module);
+ pConfig->bEnabled=arg;
+
+ return NULL;
+ }
+
+static const command_rec CaseFilterCmds[] =
+ {
+ AP_INIT_FLAG("CaseFilter", CaseFilterEnable, NULL, RSRC_CONF,
+ "Run a case filter on this host"),
+ { NULL }
+ };
+
+static void CaseFilterRegisterHooks(apr_pool_t *p)
+ {
+ ap_hook_insert_filter(CaseFilterInsertFilter,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_register_output_filter(s_szCaseFilterName,CaseFilterOutFilter,NULL,
+ AP_FTYPE_RESOURCE);
+ }
+
+module AP_MODULE_DECLARE_DATA case_filter_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL,
+ NULL,
+ CaseFilterCreateServerConfig,
+ NULL,
+ CaseFilterCmds,
+ CaseFilterRegisterHooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter_in.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter_in.c
new file mode 100644
index 00000000..9cac660d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_case_filter_in.c
@@ -0,0 +1,160 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * An example input filter - this converts input to upper case. Note that
+ * because of the moment it gets inserted it does NOT convert request headers.
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "apr_buckets.h"
+#include "apr_general.h"
+#include "apr_lib.h"
+#include "util_filter.h"
+#include "http_request.h"
+
+#include <ctype.h>
+
+static const char s_szCaseFilterName[] = "CaseFilterIn";
+module AP_MODULE_DECLARE_DATA case_filter_in_module;
+
+typedef struct
+{
+ int bEnabled;
+} CaseFilterInConfig;
+
+typedef struct
+{
+ apr_bucket_brigade *pbbTmp;
+} CaseFilterInContext;
+
+static void *CaseFilterInCreateServerConfig(apr_pool_t *p, server_rec *s)
+{
+ CaseFilterInConfig *pConfig = apr_pcalloc(p, sizeof *pConfig);
+
+ pConfig->bEnabled = 0;
+
+ return pConfig;
+}
+
+static void CaseFilterInInsertFilter(request_rec *r)
+{
+ CaseFilterInConfig *pConfig=ap_get_module_config(r->server->module_config,
+ &case_filter_in_module);
+ if(!pConfig->bEnabled)
+ return;
+
+ ap_add_input_filter(s_szCaseFilterName,NULL,r,r->connection);
+}
+
+static apr_status_t CaseFilterInFilter(ap_filter_t *f,
+ apr_bucket_brigade *pbbOut,
+ ap_input_mode_t eMode,
+ apr_read_type_e eBlock,
+ apr_off_t nBytes)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ CaseFilterInContext *pCtx;
+ apr_status_t ret;
+
+ if (!(pCtx = f->ctx)) {
+ f->ctx = pCtx = apr_palloc(r->pool, sizeof *pCtx);
+ pCtx->pbbTmp = apr_brigade_create(r->pool, c->bucket_alloc);
+ }
+
+ if (APR_BRIGADE_EMPTY(pCtx->pbbTmp)) {
+ ret = ap_get_brigade(f->next, pCtx->pbbTmp, eMode, eBlock, nBytes);
+
+ if (eMode == AP_MODE_EATCRLF || ret != APR_SUCCESS)
+ return ret;
+ }
+
+ while(!APR_BRIGADE_EMPTY(pCtx->pbbTmp)) {
+ apr_bucket *pbktIn = APR_BRIGADE_FIRST(pCtx->pbbTmp);
+ apr_bucket *pbktOut;
+ const char *data;
+ apr_size_t len;
+ char *buf;
+ int n;
+
+ /* It is tempting to do this...
+ * APR_BUCKET_REMOVE(pB);
+ * APR_BRIGADE_INSERT_TAIL(pbbOut,pB);
+ * and change the case of the bucket data, but that would be wrong
+ * for a file or socket buffer, for example...
+ */
+
+ if(APR_BUCKET_IS_EOS(pbktIn)) {
+ APR_BUCKET_REMOVE(pbktIn);
+ APR_BRIGADE_INSERT_TAIL(pbbOut, pbktIn);
+ break;
+ }
+
+ ret=apr_bucket_read(pbktIn, &data, &len, eBlock);
+ if(ret != APR_SUCCESS)
+ return ret;
+
+ buf = malloc(len);
+ for(n=0 ; n < len ; ++n)
+ buf[n] = apr_toupper(data[n]);
+
+ pbktOut = apr_bucket_heap_create(buf, len, 0, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(pbbOut, pbktOut);
+ apr_bucket_delete(pbktIn);
+ }
+
+ return APR_SUCCESS;
+}
+
+
+static const char *CaseFilterInEnable(cmd_parms *cmd, void *dummy, int arg)
+{
+ CaseFilterInConfig *pConfig
+ = ap_get_module_config(cmd->server->module_config,
+ &case_filter_in_module);
+ pConfig->bEnabled=arg;
+
+ return NULL;
+}
+
+static const command_rec CaseFilterInCmds[] =
+{
+ AP_INIT_FLAG("CaseFilterIn", CaseFilterInEnable, NULL, RSRC_CONF,
+ "Run an input case filter on this host"),
+ { NULL }
+};
+
+
+static void CaseFilterInRegisterHooks(apr_pool_t *p)
+{
+ ap_hook_insert_filter(CaseFilterInInsertFilter, NULL, NULL,
+ APR_HOOK_MIDDLE);
+ ap_register_input_filter(s_szCaseFilterName, CaseFilterInFilter, NULL,
+ AP_FTYPE_RESOURCE);
+}
+
+module AP_MODULE_DECLARE_DATA case_filter_in_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL,
+ NULL,
+ CaseFilterInCreateServerConfig,
+ NULL,
+ CaseFilterInCmds,
+ CaseFilterInRegisterHooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.c
new file mode 100644
index 00000000..a39261da
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.c
@@ -0,0 +1,1082 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * simple hokey charset recoding configuration module
+ *
+ * See mod_ebcdic and mod_charset for more thought-out examples. This
+ * one is just so Jeff can learn how a module works and experiment with
+ * basic character set recoding configuration.
+ *
+ * !!!This is an extremely cheap ripoff of mod_charset.c from Russian Apache!!!
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#define CORE_PRIVATE
+#include "http_core.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_charset.h"
+#include "apr_buckets.h"
+#include "util_filter.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#include "apr_xlate.h"
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#define OUTPUT_XLATE_BUF_SIZE (16*1024) /* size of translation buffer used on output */
+#define INPUT_XLATE_BUF_SIZE (8*1024) /* size of translation buffer used on input */
+
+#define XLATE_MIN_BUFF_LEFT 128 /* flush once there is no more than this much
+ * space left in the translation buffer
+ */
+
+#define FATTEST_CHAR 8 /* we don't handle chars wider than this that straddle
+ * two buckets
+ */
+
+/* extended error status codes; this is used in addition to an apr_status_t to
+ * track errors in the translation filter
+ */
+typedef enum {
+ EES_INIT = 0, /* no error info yet; value must be 0 for easy init */
+ EES_LIMIT, /* built-in restriction encountered */
+ EES_INCOMPLETE_CHAR, /* incomplete multi-byte char at end of content */
+ EES_BUCKET_READ,
+ EES_DOWNSTREAM, /* something bad happened in a filter below xlate */
+ EES_BAD_INPUT /* input data invalid */
+} ees_t;
+
+/* registered name of the output translation filter */
+#define XLATEOUT_FILTER_NAME "XLATEOUT"
+/* registered name of input translation filter */
+#define XLATEIN_FILTER_NAME "XLATEIN"
+
+typedef struct charset_dir_t {
+ /** debug level; -1 means uninitialized, 0 means no debug */
+ int debug;
+ const char *charset_source; /* source encoding */
+ const char *charset_default; /* how to ship on wire */
+ /** module does ap_add_*_filter()? */
+ enum {IA_INIT, IA_IMPADD, IA_NOIMPADD} implicit_add;
+} charset_dir_t;
+
+/* charset_filter_ctx_t is created for each filter instance; because the same
+ * filter code is used for translating in both directions, we need this context
+ * data to tell the filter which translation handle to use; it also can hold a
+ * character which was split between buckets
+ */
+typedef struct charset_filter_ctx_t {
+ apr_xlate_t *xlate;
+ charset_dir_t *dc;
+ ees_t ees; /* extended error status */
+ apr_size_t saved;
+ char buf[FATTEST_CHAR]; /* we want to be able to build a complete char here */
+ int ran; /* has filter instance run before? */
+ int noop; /* should we pass brigades through unchanged? */
+ char *tmp; /* buffer for input filtering */
+ apr_bucket_brigade *bb; /* input buckets we couldn't finish translating */
+} charset_filter_ctx_t;
+
+/* charset_req_t is available via r->request_config if any translation is
+ * being performed
+ */
+typedef struct charset_req_t {
+ charset_dir_t *dc;
+ charset_filter_ctx_t *output_ctx, *input_ctx;
+} charset_req_t;
+
+/* debug level definitions */
+#define DBGLVL_GORY 9 /* gory details */
+#define DBGLVL_FLOW 4 /* enough messages to see what happens on
+ * each request */
+#define DBGLVL_PMC 2 /* messages about possible misconfiguration */
+
+module AP_MODULE_DECLARE_DATA charset_lite_module;
+
+static void *create_charset_dir_conf(apr_pool_t *p,char *dummy)
+{
+ charset_dir_t *dc = (charset_dir_t *)apr_pcalloc(p,sizeof(charset_dir_t));
+
+ dc->debug = -1;
+ return dc;
+}
+
+static void *merge_charset_dir_conf(apr_pool_t *p, void *basev, void *overridesv)
+{
+ charset_dir_t *a = (charset_dir_t *)apr_pcalloc (p, sizeof(charset_dir_t));
+ charset_dir_t *base = (charset_dir_t *)basev,
+ *over = (charset_dir_t *)overridesv;
+
+ /* If it is defined in the current container, use it. Otherwise, use the one
+ * from the enclosing container.
+ */
+
+ a->debug =
+ over->debug != -1 ? over->debug : base->debug;
+ a->charset_default =
+ over->charset_default ? over->charset_default : base->charset_default;
+ a->charset_source =
+ over->charset_source ? over->charset_source : base->charset_source;
+ a->implicit_add =
+ over->implicit_add != IA_INIT ? over->implicit_add : base->implicit_add;
+ return a;
+}
+
+/* CharsetSourceEnc charset
+ */
+static const char *add_charset_source(cmd_parms *cmd, void *in_dc,
+ const char *name)
+{
+ charset_dir_t *dc = in_dc;
+
+ dc->charset_source = name;
+ return NULL;
+}
+
+/* CharsetDefault charset
+ */
+static const char *add_charset_default(cmd_parms *cmd, void *in_dc,
+ const char *name)
+{
+ charset_dir_t *dc = in_dc;
+
+ dc->charset_default = name;
+ return NULL;
+}
+
+/* CharsetOptions optionflag...
+ */
+static const char *add_charset_options(cmd_parms *cmd, void *in_dc,
+ const char *flag)
+{
+ charset_dir_t *dc = in_dc;
+
+ if (!strcasecmp(flag, "ImplicitAdd")) {
+ dc->implicit_add = IA_IMPADD;
+ }
+ else if (!strcasecmp(flag, "NoImplicitAdd")) {
+ dc->implicit_add = IA_NOIMPADD;
+ }
+ else if (!strncasecmp(flag, "DebugLevel=", 11)) {
+ dc->debug = atoi(flag + 11);
+ }
+ else {
+ return apr_pstrcat(cmd->temp_pool,
+ "Invalid CharsetOptions option: ",
+ flag,
+ NULL);
+ }
+
+ return NULL;
+}
+
+/* find_code_page() is a fixup hook that decides if translation should be
+ * enabled; if so, it sets up request data for use by the filter registration
+ * hook so that it knows what to do
+ */
+static int find_code_page(request_rec *r)
+{
+ charset_dir_t *dc = ap_get_module_config(r->per_dir_config,
+ &charset_lite_module);
+ charset_req_t *reqinfo;
+ charset_filter_ctx_t *input_ctx, *output_ctx;
+ apr_status_t rv;
+ const char *mime_type;
+
+ if (dc->debug >= DBGLVL_FLOW) {
+ ap_log_rerror(APLOG_MARK,APLOG_DEBUG, 0, r,
+ "uri: %s file: %s method: %d "
+ "imt: %s flags: %s%s%s %s->%s",
+ r->uri, r->filename, r->method_number,
+ r->content_type ? r->content_type : "(unknown)",
+ r->main ? "S" : "", /* S if subrequest */
+ r->prev ? "R" : "", /* R if redirect */
+ r->proxyreq ? "P" : "", /* P if proxy */
+ dc->charset_source, dc->charset_default);
+ }
+
+ /* If we don't have a full directory configuration, bail out.
+ */
+ if (!dc->charset_source || !dc->charset_default) {
+ if (dc->debug >= DBGLVL_PMC) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "incomplete configuration: src %s, dst %s",
+ dc->charset_source ? dc->charset_source : "unspecified",
+ dc->charset_default ? dc->charset_default : "unspecified");
+ }
+ return DECLINED;
+ }
+
+ /* catch proxy requests */
+ if (r->proxyreq) return DECLINED;
+ /* mod_rewrite indicators */
+ if (!strncmp(r->filename, "redirect:", 9)) return DECLINED;
+ if (!strncmp(r->filename, "gone:", 5)) return DECLINED;
+ if (!strncmp(r->filename, "passthrough:", 12)) return DECLINED;
+ if (!strncmp(r->filename, "forbidden:", 10)) return DECLINED;
+
+ mime_type = r->content_type ? r->content_type : ap_default_type(r);
+
+ /* If mime type isn't text or message, bail out.
+ */
+
+/* XXX When we handle translation of the request body, watch out here as
+ * 1.3 allowed additional mime types: multipart and
+ * application/x-www-form-urlencoded
+ */
+
+ if (strncasecmp(mime_type, "text/", 5) &&
+#if APR_CHARSET_EBCDIC || AP_WANT_DIR_TRANSLATION
+ /* On an EBCDIC machine, be willing to translate mod_autoindex-
+ * generated output. Otherwise, it doesn't look too cool.
+ *
+ * XXX This isn't a perfect fix because this doesn't trigger us
+ * to convert from the charset of the source code to ASCII. The
+ * general solution seems to be to allow a generator to set an
+ * indicator in the r specifying that the body is coded in the
+ * implementation character set (i.e., the charset of the source
+ * code). This would get several different types of documents
+ * translated properly: mod_autoindex output, mod_status output,
+ * mod_info output, hard-coded error documents, etc.
+ */
+ strcmp(mime_type, DIR_MAGIC_TYPE) &&
+#endif
+ strncasecmp(mime_type, "message/", 8)) {
+ if (dc->debug >= DBGLVL_GORY) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "mime type is %s; no translation selected",
+ mime_type);
+ }
+ return DECLINED;
+ }
+
+ if (dc->debug >= DBGLVL_GORY) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "charset_source: %s charset_default: %s",
+ dc && dc->charset_source ? dc->charset_source : "(none)",
+ dc && dc->charset_default ? dc->charset_default : "(none)");
+ }
+
+ /* Get storage for the request data and the output filter context.
+ * We rarely need the input filter context, so allocate that separately.
+ */
+ reqinfo = (charset_req_t *)apr_pcalloc(r->pool,
+ sizeof(charset_req_t) +
+ sizeof(charset_filter_ctx_t));
+ output_ctx = (charset_filter_ctx_t *)(reqinfo + 1);
+
+ reqinfo->dc = dc;
+ output_ctx->dc = dc;
+ ap_set_module_config(r->request_config, &charset_lite_module, reqinfo);
+
+ reqinfo->output_ctx = output_ctx;
+ rv = apr_xlate_open(&output_ctx->xlate,
+ dc->charset_default, dc->charset_source, r->pool);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "can't open translation %s->%s",
+ dc->charset_source, dc->charset_default);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ switch (r->method_number) {
+ case M_PUT:
+ case M_POST:
+ /* Set up input translation. Note: A request body can be included
+ * with the OPTIONS method, but for now we don't set up translation
+ * of it.
+ */
+ input_ctx = apr_pcalloc(r->pool, sizeof(charset_filter_ctx_t));
+ input_ctx->bb = apr_brigade_create(r->pool,
+ r->connection->bucket_alloc);
+ input_ctx->tmp = apr_palloc(r->pool, INPUT_XLATE_BUF_SIZE);
+ input_ctx->dc = dc;
+ reqinfo->input_ctx = input_ctx;
+ rv = apr_xlate_open(&input_ctx->xlate, dc->charset_source,
+ dc->charset_default, r->pool);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "can't open translation %s->%s",
+ dc->charset_default, dc->charset_source);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+
+ return DECLINED;
+}
+
+static int configured_in_list(request_rec *r, const char *filter_name,
+ struct ap_filter_t *filter_list)
+{
+ struct ap_filter_t *filter = filter_list;
+
+ while (filter) {
+ if (!strcasecmp(filter_name, filter->frec->name)) {
+ return 1;
+ }
+ filter = filter->next;
+ }
+ return 0;
+}
+
+static int configured_on_input(request_rec *r, const char *filter_name)
+{
+ return configured_in_list(r, filter_name, r->input_filters);
+}
+
+static int configured_on_output(request_rec *r, const char *filter_name)
+{
+ return configured_in_list(r, filter_name, r->output_filters);
+}
+
+/* xlate_insert_filter() is a filter hook which decides whether or not
+ * to insert a translation filter for the current request.
+ */
+static void xlate_insert_filter(request_rec *r)
+{
+ /* Hey... don't be so quick to use reqinfo->dc here; reqinfo may be NULL */
+ charset_req_t *reqinfo = ap_get_module_config(r->request_config,
+ &charset_lite_module);
+ charset_dir_t *dc = ap_get_module_config(r->per_dir_config,
+ &charset_lite_module);
+
+ if (reqinfo) {
+ if (reqinfo->output_ctx && !configured_on_output(r, XLATEOUT_FILTER_NAME)) {
+ ap_add_output_filter(XLATEOUT_FILTER_NAME, reqinfo->output_ctx, r,
+ r->connection);
+ }
+ else if (dc->debug >= DBGLVL_FLOW) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "xlate output filter not added implicitly because %s",
+ !reqinfo->output_ctx ?
+ "no output configuration available" :
+ "another module added the filter");
+ }
+
+ if (reqinfo->input_ctx && !configured_on_input(r, XLATEIN_FILTER_NAME)) {
+ ap_add_input_filter(XLATEIN_FILTER_NAME, reqinfo->input_ctx, r,
+ r->connection);
+ }
+ else if (dc->debug >= DBGLVL_FLOW) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "xlate input filter not added implicitly because %s",
+ !reqinfo->input_ctx ?
+ "no input configuration available" :
+ "another module added the filter");
+ }
+ }
+}
+
+/* stuff that sucks that I know of:
+ *
+ * bucket handling:
+ * why create an eos bucket when we see it come down the stream? just send the one
+ * passed as input... news flash: this will be fixed when xlate_out_filter() starts
+ * using the more generic xlate_brigade()
+ *
+ * translation mechanics:
+ * we don't handle characters that straddle more than two buckets; an error
+ * will be generated
+ */
+
+/* send_downstream() is passed the translated data; it puts it in a single-
+ * bucket brigade and passes the brigade to the next filter
+ */
+static apr_status_t send_downstream(ap_filter_t *f, const char *tmp, apr_size_t len)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ charset_filter_ctx_t *ctx = f->ctx;
+ apr_status_t rv;
+
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = apr_bucket_transient_create(tmp, len, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(f->next, bb);
+ if (rv != APR_SUCCESS) {
+ ctx->ees = EES_DOWNSTREAM;
+ }
+ return rv;
+}
+
+static apr_status_t send_eos(ap_filter_t *f)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ charset_filter_ctx_t *ctx = f->ctx;
+ apr_status_t rv;
+
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(f->next, bb);
+ if (rv != APR_SUCCESS) {
+ ctx->ees = EES_DOWNSTREAM;
+ }
+ return rv;
+}
+
+static apr_status_t set_aside_partial_char(charset_filter_ctx_t *ctx,
+ const char *partial,
+ apr_size_t partial_len)
+{
+ apr_status_t rv;
+
+ if (sizeof(ctx->buf) > partial_len) {
+ ctx->saved = partial_len;
+ memcpy(ctx->buf, partial, partial_len);
+ rv = APR_SUCCESS;
+ }
+ else {
+ rv = APR_INCOMPLETE;
+ ctx->ees = EES_LIMIT; /* we don't handle chars this wide which straddle
+ * buckets
+ */
+ }
+ return rv;
+}
+
+static apr_status_t finish_partial_char(charset_filter_ctx_t *ctx,
+ /* input buffer: */
+ const char **cur_str,
+ apr_size_t *cur_len,
+ /* output buffer: */
+ char **out_str,
+ apr_size_t *out_len)
+{
+ apr_status_t rv;
+ apr_size_t tmp_input_len;
+
+ /* Keep adding bytes from the input string to the saved string until we
+ * 1) finish the input char
+ * 2) get an error
+ * or 3) run out of bytes to add
+ */
+
+ do {
+ ctx->buf[ctx->saved] = **cur_str;
+ ++ctx->saved;
+ ++*cur_str;
+ --*cur_len;
+ tmp_input_len = ctx->saved;
+ rv = apr_xlate_conv_buffer(ctx->xlate,
+ ctx->buf,
+ &tmp_input_len,
+ *out_str,
+ out_len);
+ } while (rv == APR_INCOMPLETE && *cur_len);
+
+ if (rv == APR_SUCCESS) {
+ ctx->saved = 0;
+ }
+ else {
+ ctx->ees = EES_LIMIT; /* code isn't smart enough to handle chars
+ * straddling more than two buckets
+ */
+ }
+
+ return rv;
+}
+
+static void log_xlate_error(ap_filter_t *f, apr_status_t rv)
+{
+ charset_filter_ctx_t *ctx = f->ctx;
+ const char *msg;
+ char msgbuf[100];
+ int cur;
+
+ switch(ctx->ees) {
+ case EES_LIMIT:
+ rv = 0;
+ msg = "xlate filter - a built-in restriction was encountered";
+ break;
+ case EES_BAD_INPUT:
+ rv = 0;
+ msg = "xlate filter - an input character was invalid";
+ break;
+ case EES_BUCKET_READ:
+ rv = 0;
+ msg = "xlate filter - bucket read routine failed";
+ break;
+ case EES_INCOMPLETE_CHAR:
+ rv = 0;
+ strcpy(msgbuf, "xlate filter - incomplete char at end of input - ");
+ cur = 0;
+ while ((apr_size_t)cur < ctx->saved) {
+ apr_snprintf(msgbuf + strlen(msgbuf), sizeof(msgbuf) - strlen(msgbuf),
+ "%02X", (unsigned)ctx->buf[cur]);
+ ++cur;
+ }
+ msg = msgbuf;
+ break;
+ case EES_DOWNSTREAM:
+ msg = "xlate filter - an error occurred in a lower filter";
+ break;
+ default:
+ msg = "xlate filter - returning error";
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r,
+ "%s", msg);
+}
+
+/* chk_filter_chain() is called once per filter instance; it tries to
+ * determine if the current filter instance should be disabled because
+ * its translation is incompatible with the translation of an existing
+ * instance of the translate filter
+ *
+ * Example bad scenario:
+ *
+ * configured filter chain for the request:
+ * INCLUDES XLATEOUT(8859-1->UTS-16)
+ * configured filter chain for the subrequest:
+ * XLATEOUT(8859-1->UTS-16)
+ *
+ * When the subrequest is processed, the filter chain will be
+ * XLATEOUT(8859-1->UTS-16) XLATEOUT(8859-1->UTS-16)
+ * This makes no sense, so the instance of XLATEOUT added for the
+ * subrequest will be noop-ed.
+ *
+ * Example good scenario:
+ *
+ * configured filter chain for the request:
+ * INCLUDES XLATEOUT(8859-1->UTS-16)
+ * configured filter chain for the subrequest:
+ * XLATEOUT(IBM-1047->8859-1)
+ *
+ * When the subrequest is processed, the filter chain will be
+ * XLATEOUT(IBM-1047->8859-1) XLATEOUT(8859-1->UTS-16)
+ * This makes sense, so the instance of XLATEOUT added for the
+ * subrequest will be left alone and it will translate from
+ * IBM-1047->8859-1.
+ */
+static void chk_filter_chain(ap_filter_t *f)
+{
+ ap_filter_t *curf;
+ charset_filter_ctx_t *curctx, *last_xlate_ctx = NULL,
+ *ctx = f->ctx;
+ int debug = ctx->dc->debug;
+ int output = !strcasecmp(f->frec->name, XLATEOUT_FILTER_NAME);
+
+ if (ctx->noop) {
+ return;
+ }
+
+ /* walk the filter chain; see if it makes sense for our filter to
+ * do any translation
+ */
+ curf = output ? f->r->output_filters : f->r->input_filters;
+ while (curf) {
+ if (!strcasecmp(curf->frec->name, f->frec->name) &&
+ curf->ctx) {
+ curctx = (charset_filter_ctx_t *)curf->ctx;
+ if (!last_xlate_ctx) {
+ last_xlate_ctx = curctx;
+ }
+ else {
+ if (strcmp(last_xlate_ctx->dc->charset_default,
+ curctx->dc->charset_source)) {
+ /* incompatible translation
+ * if our filter instance is incompatible with an instance
+ * already in place, noop our instance
+ * Notes:
+ * . We are only willing to noop our own instance.
+ * . It is possible to noop another instance which has not
+ * yet run, but this is not currently implemented.
+ * Hopefully it will not be needed.
+ * . It is not possible to noop an instance which has
+ * already run.
+ */
+ if (last_xlate_ctx == f->ctx) {
+ last_xlate_ctx->noop = 1;
+ if (debug >= DBGLVL_PMC) {
+ const char *symbol = output ? "->" : "<-";
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG,
+ 0, f->r,
+ "%s %s - disabling "
+ "translation %s%s%s; existing "
+ "translation %s%s%s",
+ f->r->uri ? "uri" : "file",
+ f->r->uri ? f->r->uri : f->r->filename,
+ last_xlate_ctx->dc->charset_source,
+ symbol,
+ last_xlate_ctx->dc->charset_default,
+ curctx->dc->charset_source,
+ symbol,
+ curctx->dc->charset_default);
+ }
+ }
+ else {
+ const char *symbol = output ? "->" : "<-";
+
+ ap_log_rerror(APLOG_MARK, APLOG_ERR,
+ 0, f->r,
+ "chk_filter_chain() - can't disable "
+ "translation %s%s%s; existing "
+ "translation %s%s%s",
+ last_xlate_ctx->dc->charset_source,
+ symbol,
+ last_xlate_ctx->dc->charset_default,
+ curctx->dc->charset_source,
+ symbol,
+ curctx->dc->charset_default);
+ }
+ break;
+ }
+ }
+ }
+ curf = curf->next;
+ }
+}
+
+/* xlate_brigade() is used to filter request and response bodies
+ *
+ * we'll stop when one of the following occurs:
+ * . we run out of buckets
+ * . we run out of space in the output buffer
+ * . we hit an error
+ *
+ * inputs:
+ * bb: brigade to process
+ * buffer: storage to hold the translated characters
+ * buffer_size: size of buffer
+ * (and a few more uninteresting parms)
+ *
+ * outputs:
+ * return value: APR_SUCCESS or some error code
+ * bb: we've removed any buckets representing the
+ * translated characters; the eos bucket, if
+ * present, will be left in the brigade
+ * buffer: filled in with translated characters
+ * buffer_size: updated with the bytes remaining
+ * hit_eos: did we hit an EOS bucket?
+ */
+static apr_status_t xlate_brigade(charset_filter_ctx_t *ctx,
+ apr_bucket_brigade *bb,
+ char *buffer,
+ apr_size_t *buffer_avail,
+ int *hit_eos)
+{
+ apr_bucket *b = NULL; /* set to NULL only to quiet some gcc */
+ apr_bucket *consumed_bucket;
+ const char *bucket;
+ apr_size_t bytes_in_bucket; /* total bytes read from current bucket */
+ apr_size_t bucket_avail; /* bytes left in current bucket */
+ apr_status_t rv = APR_SUCCESS;
+
+ *hit_eos = 0;
+ bucket_avail = 0;
+ consumed_bucket = NULL;
+ while (1) {
+ if (!bucket_avail) { /* no bytes left to process in the current bucket... */
+ if (consumed_bucket) {
+ apr_bucket_delete(consumed_bucket);
+ consumed_bucket = NULL;
+ }
+ b = APR_BRIGADE_FIRST(bb);
+ if (b == APR_BRIGADE_SENTINEL(bb) ||
+ APR_BUCKET_IS_EOS(b)) {
+ break;
+ }
+ rv = apr_bucket_read(b, &bucket, &bytes_in_bucket, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ ctx->ees = EES_BUCKET_READ;
+ break;
+ }
+ bucket_avail = bytes_in_bucket;
+ consumed_bucket = b; /* for axing when we're done reading it */
+ }
+ if (bucket_avail) {
+ /* We've got data, so translate it. */
+ if (ctx->saved) {
+ /* Rats... we need to finish a partial character from the previous
+ * bucket.
+ *
+ * Strangely, finish_partial_char() increments the input buffer
+ * pointer but does not increment the output buffer pointer.
+ */
+ apr_size_t old_buffer_avail = *buffer_avail;
+ rv = finish_partial_char(ctx,
+ &bucket, &bucket_avail,
+ &buffer, buffer_avail);
+ buffer += old_buffer_avail - *buffer_avail;
+ }
+ else {
+ apr_size_t old_buffer_avail = *buffer_avail;
+ apr_size_t old_bucket_avail = bucket_avail;
+ rv = apr_xlate_conv_buffer(ctx->xlate,
+ bucket, &bucket_avail,
+ buffer,
+ buffer_avail);
+ buffer += old_buffer_avail - *buffer_avail;
+ bucket += old_bucket_avail - bucket_avail;
+
+ if (rv == APR_INCOMPLETE) { /* partial character at end of input */
+ /* We need to save the final byte(s) for next time; we can't
+ * convert it until we look at the next bucket.
+ */
+ rv = set_aside_partial_char(ctx, bucket, bucket_avail);
+ bucket_avail = 0;
+ }
+ }
+ if (rv != APR_SUCCESS) {
+ /* bad input byte or partial char too big to store */
+ break;
+ }
+ if (*buffer_avail < XLATE_MIN_BUFF_LEFT) {
+ /* if any data remains in the current bucket, split there */
+ if (bucket_avail) {
+ apr_bucket_split(b, bytes_in_bucket - bucket_avail);
+ }
+ apr_bucket_delete(b);
+ break;
+ }
+ }
+ }
+
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ b = APR_BRIGADE_FIRST(bb);
+ if (APR_BUCKET_IS_EOS(b)) {
+ /* Leave the eos bucket in the brigade for reporting to
+ * subsequent filters.
+ */
+ *hit_eos = 1;
+ if (ctx->saved) {
+ /* Oops... we have a partial char from the previous bucket
+ * that won't be completed because there's no more data.
+ */
+ rv = APR_INCOMPLETE;
+ ctx->ees = EES_INCOMPLETE_CHAR;
+ }
+ }
+ }
+
+ return rv;
+}
+
+/* xlate_out_filter() handles (almost) arbitrary conversions from one charset
+ * to another...
+ * translation is determined in the fixup hook (find_code_page), which is
+ * where the filter's context data is set up... the context data gives us
+ * the translation handle
+ */
+static apr_status_t xlate_out_filter(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ charset_req_t *reqinfo = ap_get_module_config(f->r->request_config,
+ &charset_lite_module);
+ charset_dir_t *dc = ap_get_module_config(f->r->per_dir_config,
+ &charset_lite_module);
+ charset_filter_ctx_t *ctx = f->ctx;
+ apr_bucket *dptr, *consumed_bucket;
+ const char *cur_str;
+ apr_size_t cur_len, cur_avail;
+ char tmp[OUTPUT_XLATE_BUF_SIZE];
+ apr_size_t space_avail;
+ int done;
+ apr_status_t rv = APR_SUCCESS;
+
+ if (!ctx) {
+ /* this is SetOutputFilter path; grab the preallocated context,
+ * if any; note that if we decided not to do anything in an earlier
+ * handler, we won't even have a reqinfo
+ */
+ if (reqinfo) {
+ ctx = f->ctx = reqinfo->output_ctx;
+ reqinfo->output_ctx = NULL; /* prevent SNAFU if user coded us twice
+ * in the filter chain; we can't have two
+ * instances using the same context
+ */
+ }
+ if (!ctx) { /* no idea how to translate; don't do anything */
+ ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(charset_filter_ctx_t));
+ ctx->dc = dc;
+ ctx->noop = 1;
+ }
+ }
+
+ if (dc->debug >= DBGLVL_GORY) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
+ "xlate_out_filter() - "
+ "charset_source: %s charset_default: %s",
+ dc && dc->charset_source ? dc->charset_source : "(none)",
+ dc && dc->charset_default ? dc->charset_default : "(none)");
+ }
+
+ if (!ctx->ran) { /* filter never ran before */
+ chk_filter_chain(f);
+ ctx->ran = 1;
+ }
+
+ if (ctx->noop) {
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ dptr = APR_BRIGADE_FIRST(bb);
+ done = 0;
+ cur_len = 0;
+ space_avail = sizeof(tmp);
+ consumed_bucket = NULL;
+ while (!done) {
+ if (!cur_len) { /* no bytes left to process in the current bucket... */
+ if (consumed_bucket) {
+ apr_bucket_delete(consumed_bucket);
+ consumed_bucket = NULL;
+ }
+ if (dptr == APR_BRIGADE_SENTINEL(bb)) {
+ done = 1;
+ break;
+ }
+ if (APR_BUCKET_IS_EOS(dptr)) {
+ done = 1;
+ cur_len = -1; /* XXX yuck, but that tells us to send
+ * eos down; when we minimize our bb construction
+ * we'll fix this crap */
+ if (ctx->saved) {
+ /* Oops... we have a partial char from the previous bucket
+ * that won't be completed because there's no more data.
+ */
+ rv = APR_INCOMPLETE;
+ ctx->ees = EES_INCOMPLETE_CHAR;
+ }
+ break;
+ }
+ rv = apr_bucket_read(dptr, &cur_str, &cur_len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ done = 1;
+ ctx->ees = EES_BUCKET_READ;
+ break;
+ }
+ consumed_bucket = dptr; /* for axing when we're done reading it */
+ dptr = APR_BUCKET_NEXT(dptr); /* get ready for when we access the
+ * next bucket */
+ }
+ /* Try to fill up our tmp buffer with translated data. */
+ cur_avail = cur_len;
+
+ if (cur_len) { /* maybe we just hit the end of a pipe (len = 0) ? */
+ if (ctx->saved) {
+ /* Rats... we need to finish a partial character from the previous
+ * bucket.
+ */
+ char *tmp_tmp;
+
+ tmp_tmp = tmp + sizeof(tmp) - space_avail;
+ rv = finish_partial_char(ctx,
+ &cur_str, &cur_len,
+ &tmp_tmp, &space_avail);
+ }
+ else {
+ rv = apr_xlate_conv_buffer(ctx->xlate,
+ cur_str, &cur_avail,
+ tmp + sizeof(tmp) - space_avail, &space_avail);
+
+ /* Update input ptr and len after consuming some bytes */
+ cur_str += cur_len - cur_avail;
+ cur_len = cur_avail;
+
+ if (rv == APR_INCOMPLETE) { /* partial character at end of input */
+ /* We need to save the final byte(s) for next time; we can't
+ * convert it until we look at the next bucket.
+ */
+ rv = set_aside_partial_char(ctx, cur_str, cur_len);
+ cur_len = 0;
+ }
+ }
+ }
+
+ if (rv != APR_SUCCESS) {
+ /* bad input byte or partial char too big to store */
+ done = 1;
+ }
+
+ if (space_avail < XLATE_MIN_BUFF_LEFT) {
+ /* It is time to flush, as there is not enough space left in the
+ * current output buffer to bother with converting more data.
+ */
+ rv = send_downstream(f, tmp, sizeof(tmp) - space_avail);
+ if (rv != APR_SUCCESS) {
+ done = 1;
+ }
+
+ /* tmp is now empty */
+ space_avail = sizeof(tmp);
+ }
+ }
+
+ if (rv == APR_SUCCESS) {
+ if (space_avail < sizeof(tmp)) { /* gotta write out what we converted */
+ rv = send_downstream(f, tmp, sizeof(tmp) - space_avail);
+ }
+ }
+ if (rv == APR_SUCCESS) {
+ if (cur_len == -1) {
+ rv = send_eos(f);
+ }
+ }
+ else {
+ log_xlate_error(f, rv);
+ }
+
+ return rv;
+}
+
+static int xlate_in_filter(ap_filter_t *f, apr_bucket_brigade *bb,
+ ap_input_mode_t mode, apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ apr_status_t rv;
+ charset_req_t *reqinfo = ap_get_module_config(f->r->request_config,
+ &charset_lite_module);
+ charset_dir_t *dc = ap_get_module_config(f->r->per_dir_config,
+ &charset_lite_module);
+ charset_filter_ctx_t *ctx = f->ctx;
+ apr_size_t buffer_size;
+ int hit_eos;
+
+ if (!ctx) {
+ /* this is SetInputFilter path; grab the preallocated context,
+ * if any; note that if we decided not to do anything in an earlier
+ * handler, we won't even have a reqinfo
+ */
+ if (reqinfo) {
+ ctx = f->ctx = reqinfo->input_ctx;
+ reqinfo->input_ctx = NULL; /* prevent SNAFU if user coded us twice
+ * in the filter chain; we can't have two
+ * instances using the same context
+ */
+ }
+ if (!ctx) { /* no idea how to translate; don't do anything */
+ ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(charset_filter_ctx_t));
+ ctx->dc = dc;
+ ctx->noop = 1;
+ }
+ }
+
+ if (dc->debug >= DBGLVL_GORY) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
+ "xlate_in_filter() - "
+ "charset_source: %s charset_default: %s",
+ dc && dc->charset_source ? dc->charset_source : "(none)",
+ dc && dc->charset_default ? dc->charset_default : "(none)");
+ }
+
+ if (!ctx->ran) { /* filter never ran before */
+ chk_filter_chain(f);
+ ctx->ran = 1;
+ }
+
+ if (ctx->noop) {
+ return ap_get_brigade(f->next, bb, mode, block, readbytes);
+ }
+
+ if (APR_BRIGADE_EMPTY(ctx->bb)) {
+ if ((rv = ap_get_brigade(f->next, bb, mode, block,
+ readbytes)) != APR_SUCCESS) {
+ return rv;
+ }
+ }
+ else {
+ APR_BRIGADE_PREPEND(bb, ctx->bb); /* first use the leftovers */
+ }
+
+ buffer_size = INPUT_XLATE_BUF_SIZE;
+ rv = xlate_brigade(ctx, bb, ctx->tmp, &buffer_size, &hit_eos);
+ if (rv == APR_SUCCESS) {
+ if (!hit_eos) {
+ /* move anything leftover into our context for next time;
+ * we don't currently "set aside" since the data came from
+ * down below, but I suspect that for long-term we need to
+ * do that
+ */
+ APR_BRIGADE_CONCAT(ctx->bb, bb);
+ }
+ if (buffer_size < INPUT_XLATE_BUF_SIZE) { /* do we have output? */
+ apr_bucket *e;
+
+ e = apr_bucket_heap_create(ctx->tmp,
+ INPUT_XLATE_BUF_SIZE - buffer_size,
+ NULL, f->r->connection->bucket_alloc);
+ /* make sure we insert at the head, because there may be
+ * an eos bucket already there, and the eos bucket should
+ * come after the data
+ */
+ APR_BRIGADE_INSERT_HEAD(bb, e);
+ }
+ else {
+ /* XXX need to get some more data... what if the last brigade
+ * we got had only the first byte of a multibyte char? we need
+ * to grab more data from the network instead of returning an
+ * empty brigade
+ */
+ }
+ }
+ else {
+ log_xlate_error(f, rv);
+ }
+
+ return rv;
+}
+
+static const command_rec cmds[] =
+{
+ AP_INIT_TAKE1("CharsetSourceEnc",
+ add_charset_source,
+ NULL,
+ OR_FILEINFO,
+ "source (html,cgi,ssi) file charset"),
+ AP_INIT_TAKE1("CharsetDefault",
+ add_charset_default,
+ NULL,
+ OR_FILEINFO,
+ "name of default charset"),
+ AP_INIT_ITERATE("CharsetOptions",
+ add_charset_options,
+ NULL,
+ OR_FILEINFO,
+ "valid options: ImplicitAdd, NoImplicitAdd, DebugLevel=n"),
+ {NULL}
+};
+
+static void charset_register_hooks(apr_pool_t *p)
+{
+ ap_hook_fixups(find_code_page, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_insert_filter(xlate_insert_filter, NULL, NULL, APR_HOOK_REALLY_LAST);
+ ap_register_output_filter(XLATEOUT_FILTER_NAME, xlate_out_filter, NULL,
+ AP_FTYPE_RESOURCE);
+ ap_register_input_filter(XLATEIN_FILTER_NAME, xlate_in_filter, NULL,
+ AP_FTYPE_RESOURCE);
+}
+
+module AP_MODULE_DECLARE_DATA charset_lite_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_charset_dir_conf,
+ merge_charset_dir_conf,
+ NULL,
+ NULL,
+ cmds,
+ charset_register_hooks
+};
+
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.dsp
new file mode 100644
index 00000000..8a133ff7
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.dsp
@@ -0,0 +1,124 @@
+# Microsoft Developer Studio Project File - Name="mod_charset_lite" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_charset_lite - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_charset_lite.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_charset_lite.mak" CFG="mod_charset_lite - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_charset_lite - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_charset_lite - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_charset_lite - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_charset_lite_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_charset_lite.so" /base:@..\..\os\win32\BaseAddr.ref,mod_charset_lite.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_charset_lite - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_charset_lite_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_charset_lite.so" /base:@..\..\os\win32\BaseAddr.ref,mod_charset_lite.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_charset_lite - Win32 Release"
+# Name "mod_charset_lite - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_charset_lite.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_charset_lite - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_charset_lite.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_charset_lite.so "charset_lite_module for Apache" ../../include/ap_release.h > .\mod_charset_lite.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_charset_lite - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_charset_lite.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_charset_lite.so "charset_lite_module for Apache" ../../include/ap_release.h > .\mod_charset_lite.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.exp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.exp
new file mode 100644
index 00000000..3f0bf14b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_charset_lite.exp
@@ -0,0 +1 @@
+charset_lite_module
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.c
new file mode 100644
index 00000000..f8c1642f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.c
@@ -0,0 +1,963 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_file_io.h"
+#include "apr_strings.h"
+#include "mod_cache.h"
+#include "ap_provider.h"
+#include "util_filter.h"
+#include "util_script.h"
+
+#if APR_HAVE_UNISTD_H
+#include <unistd.h> /* needed for unlink/link */
+#endif
+
+/* Our on-disk header format is:
+ *
+ * disk_cache_info_t
+ * entity name (dobj->name) [length is in disk_cache_info_t->name_len]
+ * r->headers_out (delimited by CRLF)
+ * CRLF
+ * r->headers_in (delimited by CRLF)
+ * CRLF
+ */
+#define DISK_FORMAT_VERSION 0
+typedef struct {
+ /* Indicates the format of the header struct stored on-disk. */
+ int format;
+ /* The HTTP status code returned for this response. */
+ int status;
+ /* The size of the entity name that follows. */
+ apr_size_t name_len;
+ /* The number of times we've cached this entity. */
+ apr_size_t entity_version;
+ /* Miscellaneous time values. */
+ apr_time_t date;
+ apr_time_t expire;
+ apr_time_t request_time;
+ apr_time_t response_time;
+} disk_cache_info_t;
+
+/*
+ * disk_cache_object_t
+ * Pointed to by cache_object_t::vobj
+ */
+typedef struct disk_cache_object {
+ const char *root; /* the location of the cache directory */
+ char *tempfile; /* temp file tohold the content */
+#if 0
+ int dirlevels; /* Number of levels of subdirectories */
+ int dirlength; /* Length of subdirectory names */
+#endif
+ char *datafile; /* name of file where the data will go */
+ char *hdrsfile; /* name of file where the hdrs will go */
+ char *hashfile; /* Computed hash key for this URI */
+ char *name;
+ apr_file_t *fd; /* data file */
+ apr_file_t *hfd; /* headers file */
+ apr_file_t *tfd; /* temporary file for data */
+ apr_off_t file_size; /* File size of the cached data file */
+ disk_cache_info_t disk_info; /* Header information. */
+} disk_cache_object_t;
+
+
+/*
+ * mod_disk_cache configuration
+ */
+/* TODO: Make defaults OS specific */
+#define CACHEFILE_LEN 20 /* must be less than HASH_LEN/2 */
+#define DEFAULT_DIRLEVELS 3
+#define DEFAULT_DIRLENGTH 2
+#define DEFAULT_MIN_FILE_SIZE 1
+#define DEFAULT_MAX_FILE_SIZE 1000000
+#define DEFAULT_CACHE_SIZE 1000000
+
+typedef struct {
+ const char* cache_root;
+ apr_size_t cache_root_len;
+ off_t space; /* Maximum cache size (in 1024 bytes) */
+ apr_time_t maxexpire; /* Maximum time to keep cached files in msecs */
+ apr_time_t defaultexpire; /* default time to keep cached file in msecs */
+ double lmfactor; /* factor for estimating expires date */
+ apr_time_t gcinterval; /* garbage collection interval, in msec */
+ int dirlevels; /* Number of levels of subdirectories */
+ int dirlength; /* Length of subdirectory names */
+ int expirychk; /* true if expiry time is observed for cached files */
+ apr_size_t minfs; /* minumum file size for cached files */
+ apr_size_t maxfs; /* maximum file size for cached files */
+ apr_time_t mintm; /* minimum time margin for caching files */
+ /* dgc_time_t gcdt; time of day for daily garbage collection */
+ apr_array_header_t *gcclnun; /* gc_retain_t entries for unused files */
+ apr_array_header_t *gcclean; /* gc_retain_t entries for all files */
+ int maxgcmem; /* maximum memory used by garbage collection */
+} disk_cache_conf;
+
+module AP_MODULE_DECLARE_DATA disk_cache_module;
+
+/* Forward declarations */
+static int remove_entity(cache_handle_t *h);
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *i);
+static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r);
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+
+/*
+ * Local static functions
+ */
+#define CACHE_HEADER_SUFFIX ".header"
+#define CACHE_DATA_SUFFIX ".data"
+static char *header_file(apr_pool_t *p, disk_cache_conf *conf,
+ disk_cache_object_t *dobj, const char *name)
+{
+ if (!dobj->hashfile) {
+ dobj->hashfile = generate_name(p, conf->dirlevels, conf->dirlength,
+ name);
+ }
+ return apr_pstrcat(p, conf->cache_root, "/", dobj->hashfile,
+ CACHE_HEADER_SUFFIX, NULL);
+}
+
+static char *data_file(apr_pool_t *p, disk_cache_conf *conf,
+ disk_cache_object_t *dobj, const char *name)
+{
+ if (!dobj->hashfile) {
+ dobj->hashfile = generate_name(p, conf->dirlevels, conf->dirlength,
+ name);
+ }
+ return apr_pstrcat(p, conf->cache_root, "/", dobj->hashfile,
+ CACHE_DATA_SUFFIX, NULL);
+}
+
+static void mkdir_structure(disk_cache_conf *conf, char *file, apr_pool_t *pool)
+{
+ apr_status_t rv;
+ char *p;
+
+ for (p = file + conf->cache_root_len + 1;;) {
+ p = strchr(p, '/');
+ if (!p)
+ break;
+ *p = '\0';
+
+ rv = apr_dir_make(file,
+ APR_UREAD|APR_UWRITE|APR_UEXECUTE, pool);
+ if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
+ /* XXX */
+ }
+ *p = '/';
+ ++p;
+ }
+}
+
+static apr_status_t file_cache_el_final(disk_cache_object_t *dobj,
+ request_rec *r)
+{
+ /* move the data over */
+ if (dobj->tfd) {
+ apr_status_t rv;
+
+ apr_file_close(dobj->tfd);
+
+ /* This assumes that the tempfile is on the same file system
+ * as the cache_root. If not, then we need a file copy/move
+ * rather than a rename.
+ */
+ rv = apr_file_rename(dobj->tempfile, dobj->datafile, r->pool);
+ if (rv != APR_SUCCESS) {
+ /* XXX log */
+ }
+
+ dobj->tfd = NULL;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t file_cache_errorcleanup(disk_cache_object_t *dobj, request_rec *r)
+{
+ /* Remove the header file and the body file. */
+ apr_file_remove(dobj->hdrsfile, r->pool);
+ apr_file_remove(dobj->datafile, r->pool);
+
+ /* If we opened the temporary data file, close and remove it. */
+ if (dobj->tfd) {
+ apr_file_close(dobj->tfd);
+ apr_file_remove(dobj->tempfile, r->pool);
+ dobj->tfd = NULL;
+ }
+
+ return APR_SUCCESS;
+}
+
+
+/* These two functions get and put state information into the data
+ * file for an ap_cache_el, this state information will be read
+ * and written transparent to clients of this module
+ */
+static int file_cache_recall_mydata(apr_file_t *fd, cache_info *info,
+ disk_cache_object_t *dobj, request_rec *r)
+{
+ apr_status_t rv;
+ char *urlbuff;
+ disk_cache_info_t disk_info;
+ apr_size_t len;
+
+ /* read the data from the cache file */
+ len = sizeof(disk_cache_info_t);
+ rv = apr_file_read_full(fd, &disk_info, len, &len);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ if (disk_info.format != DISK_FORMAT_VERSION) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "cache_disk: URL %s had a on-disk version mismatch",
+ r->uri);
+ return APR_EGENERAL;
+ }
+
+ /* Store it away so we can get it later. */
+ dobj->disk_info = disk_info;
+
+ info->date = disk_info.date;
+ info->expire = disk_info.expire;
+ info->request_time = disk_info.request_time;
+ info->response_time = disk_info.response_time;
+
+ /* Note that we could optimize this by conditionally doing the palloc
+ * depending upon the size. */
+ urlbuff = apr_palloc(r->pool, disk_info.name_len + 1);
+ len = disk_info.name_len;
+ rv = apr_file_read_full(fd, urlbuff, len, &len);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ urlbuff[disk_info.name_len] = '\0';
+
+ /* check that we have the same URL */
+ /* Would strncmp be correct? */
+ if (strcmp(urlbuff, dobj->name) != 0) {
+ return APR_EGENERAL;
+ }
+
+ return APR_SUCCESS;
+}
+
+/*
+ * Hook and mod_cache callback functions
+ */
+#define AP_TEMPFILE "/aptmpXXXXXX"
+static int create_entity(cache_handle_t *h, request_rec *r,
+ const char *key,
+ apr_off_t len)
+{
+ disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
+ &disk_cache_module);
+ cache_object_t *obj;
+ disk_cache_object_t *dobj;
+
+ if (conf->cache_root == NULL) {
+ return DECLINED;
+ }
+
+ /* If the Content-Length is still unknown, cache anyway */
+ if (len != -1 && (len < conf->minfs || len > conf->maxfs)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache_disk: URL %s failed the size check, "
+ "or is incomplete",
+ key);
+ return DECLINED;
+ }
+
+ /* Allocate and initialize cache_object_t and disk_cache_object_t */
+ h->cache_obj = obj = apr_pcalloc(r->pool, sizeof(*obj));
+ obj->vobj = dobj = apr_pcalloc(r->pool, sizeof(*dobj));
+
+ obj->key = apr_pstrdup(r->pool, key);
+ /* XXX Bad Temporary Cast - see cache_object_t notes */
+ obj->info.len = (apr_size_t) len;
+ obj->complete = 0; /* Cache object is not complete */
+
+ dobj->name = obj->key;
+ dobj->datafile = data_file(r->pool, conf, dobj, key);
+ dobj->hdrsfile = header_file(r->pool, conf, dobj, key);
+ dobj->tempfile = apr_pstrcat(r->pool, conf->cache_root, AP_TEMPFILE, NULL);
+
+ return OK;
+}
+
+static int open_entity(cache_handle_t *h, request_rec *r, const char *key)
+{
+ apr_status_t rc;
+ static int error_logged = 0;
+ disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
+ &disk_cache_module);
+ apr_finfo_t finfo;
+ cache_object_t *obj;
+ cache_info *info;
+ disk_cache_object_t *dobj;
+ int flags;
+
+ h->cache_obj = NULL;
+
+ /* Look up entity keyed to 'url' */
+ if (conf->cache_root == NULL) {
+ if (!error_logged) {
+ error_logged = 1;
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "disk_cache: Cannot cache files to disk without a CacheRoot specified.");
+ }
+ return DECLINED;
+ }
+
+ /* Create and init the cache object */
+ h->cache_obj = obj = apr_pcalloc(r->pool, sizeof(cache_object_t));
+ obj->vobj = dobj = apr_pcalloc(r->pool, sizeof(disk_cache_object_t));
+
+ info = &(obj->info);
+ obj->key = (char *) key;
+ dobj->name = (char *) key;
+ dobj->datafile = data_file(r->pool, conf, dobj, key);
+ dobj->hdrsfile = header_file(r->pool, conf, dobj, key);
+ dobj->tempfile = apr_pstrcat(r->pool, conf->cache_root, AP_TEMPFILE, NULL);
+
+ /* Open the data file */
+ flags = APR_READ|APR_BINARY;
+#ifdef APR_SENDFILE_ENABLED
+ flags |= APR_SENDFILE_ENABLED;
+#endif
+ rc = apr_file_open(&dobj->fd, dobj->datafile, flags, 0, r->pool);
+ if (rc != APR_SUCCESS) {
+ /* XXX: Log message */
+ return DECLINED;
+ }
+
+ /* Open the headers file */
+ flags = APR_READ|APR_BINARY|APR_BUFFERED;
+ rc = apr_file_open(&dobj->hfd, dobj->hdrsfile, flags, 0, r->pool);
+ if (rc != APR_SUCCESS) {
+ /* XXX: Log message */
+ return DECLINED;
+ }
+
+ rc = apr_file_info_get(&finfo, APR_FINFO_SIZE, dobj->fd);
+ if (rc == APR_SUCCESS) {
+ dobj->file_size = finfo.size;
+ }
+
+ /* Read the bytes to setup the cache_info fields */
+ rc = file_cache_recall_mydata(dobj->hfd, info, dobj, r);
+ if (rc != APR_SUCCESS) {
+ /* XXX log message */
+ return DECLINED;
+ }
+
+ /* Initialize the cache_handle callback functions */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "disk_cache: Recalled cached URL info header %s", dobj->name);
+ return OK;
+}
+
+static int remove_entity(cache_handle_t *h)
+{
+ /* Null out the cache object pointer so next time we start from scratch */
+ h->cache_obj = NULL;
+ return OK;
+}
+
+static int remove_url(const char *key)
+{
+ /* XXX: Delete file from cache! */
+ return OK;
+}
+
+static apr_status_t read_table(cache_handle_t *handle, request_rec *r,
+ apr_table_t *table, apr_file_t *file)
+{
+ char w[MAX_STRING_LEN];
+ char *l;
+ int p;
+ apr_status_t rv;
+
+ while (1) {
+
+ /* ### What about APR_EOF? */
+ rv = apr_file_gets(w, MAX_STRING_LEN - 1, file);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Premature end of cache headers.");
+ return rv;
+ }
+
+ /* Delete terminal (CR?)LF */
+
+ p = strlen(w);
+ /* Indeed, the host's '\n':
+ '\012' for UNIX; '\015' for MacOS; '\025' for OS/390
+ -- whatever the script generates.
+ */
+ if (p > 0 && w[p - 1] == '\n') {
+ if (p > 1 && w[p - 2] == CR) {
+ w[p - 2] = '\0';
+ }
+ else {
+ w[p - 1] = '\0';
+ }
+ }
+
+ /* If we've finished reading the headers, break out of the loop. */
+ if (w[0] == '\0') {
+ break;
+ }
+
+#if APR_CHARSET_EBCDIC
+ /* Chances are that we received an ASCII header text instead of
+ * the expected EBCDIC header lines. Try to auto-detect:
+ */
+ if (!(l = strchr(w, ':'))) {
+ int maybeASCII = 0, maybeEBCDIC = 0;
+ unsigned char *cp, native;
+ apr_size_t inbytes_left, outbytes_left;
+
+ for (cp = w; *cp != '\0'; ++cp) {
+ native = apr_xlate_conv_byte(ap_hdrs_from_ascii, *cp);
+ if (apr_isprint(*cp) && !apr_isprint(native))
+ ++maybeEBCDIC;
+ if (!apr_isprint(*cp) && apr_isprint(native))
+ ++maybeASCII;
+ }
+ if (maybeASCII > maybeEBCDIC) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "CGI Interface Error: Script headers apparently ASCII: (CGI = %s)",
+ r->filename);
+ inbytes_left = outbytes_left = cp - w;
+ apr_xlate_conv_buffer(ap_hdrs_from_ascii,
+ w, &inbytes_left, w, &outbytes_left);
+ }
+ }
+#endif /*APR_CHARSET_EBCDIC*/
+
+ /* if we see a bogus header don't ignore it. Shout and scream */
+ if (!(l = strchr(w, ':'))) {
+ return APR_EGENERAL;
+ }
+
+ *l++ = '\0';
+ while (*l && apr_isspace(*l)) {
+ ++l;
+ }
+
+ apr_table_add(table, w, l);
+ }
+
+ return APR_SUCCESS;
+}
+
+/*
+ * Reads headers from a buffer and returns an array of headers.
+ * Returns NULL on file error
+ * This routine tries to deal with too long lines and continuation lines.
+ * @@@: XXX: FIXME: currently the headers are passed thru un-merged.
+ * Is that okay, or should they be collapsed where possible?
+ */
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r)
+{
+ disk_cache_object_t *dobj = (disk_cache_object_t *) h->cache_obj->vobj;
+
+ /* This case should not happen... */
+ if (!dobj->hfd) {
+ /* XXX log message */
+ return APR_NOTFOUND;
+ }
+
+ h->req_hdrs = apr_table_make(r->pool, 20);
+ h->resp_hdrs = apr_table_make(r->pool, 20);
+ h->resp_err_hdrs = apr_table_make(r->pool, 20);
+
+ /* Call routine to read the header lines/status line */
+ read_table(h, r, h->resp_hdrs, dobj->hfd);
+ read_table(h, r, h->req_hdrs, dobj->hfd);
+
+ apr_file_close(dobj->hfd);
+
+ h->status = dobj->disk_info.status;
+ h->content_type = apr_table_get(h->resp_hdrs, "Content-Type");
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "disk_cache: Recalled headers for URL %s", dobj->name);
+ return APR_SUCCESS;
+}
+
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb)
+{
+ apr_bucket *e;
+ disk_cache_object_t *dobj = (disk_cache_object_t*) h->cache_obj->vobj;
+
+ e = apr_bucket_file_create(dobj->fd, 0, (apr_size_t) dobj->file_size, p,
+ bb->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(bb, e);
+ e = apr_bucket_eos_create(bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t store_table(apr_file_t *fd, apr_table_t *table)
+{
+ int i;
+ apr_status_t rv;
+ struct iovec iov[4];
+ apr_size_t amt;
+ apr_table_entry_t *elts;
+
+ elts = (apr_table_entry_t *) apr_table_elts(table)->elts;
+ for (i = 0; i < apr_table_elts(table)->nelts; ++i) {
+ if (elts[i].key != NULL) {
+ iov[0].iov_base = elts[i].key;
+ iov[0].iov_len = strlen(elts[i].key);
+ iov[1].iov_base = ": ";
+ iov[1].iov_len = sizeof(": ") - 1;
+ iov[2].iov_base = elts[i].val;
+ iov[2].iov_len = strlen(elts[i].val);
+ iov[3].iov_base = CRLF;
+ iov[3].iov_len = sizeof(CRLF) - 1;
+
+ rv = apr_file_writev(fd, (const struct iovec *) &iov, 4,
+ &amt);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+ }
+ iov[0].iov_base = CRLF;
+ iov[0].iov_len = sizeof(CRLF) - 1;
+ rv = apr_file_writev(fd, (const struct iovec *) &iov, 1,
+ &amt);
+ return rv;
+}
+
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *info)
+{
+ disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
+ &disk_cache_module);
+ apr_status_t rv;
+ apr_size_t amt;
+ disk_cache_object_t *dobj = (disk_cache_object_t*) h->cache_obj->vobj;
+
+ if (!dobj->hfd) {
+ disk_cache_info_t disk_info;
+ struct iovec iov[2];
+
+ /* This is flaky... we need to manage the cache_info differently */
+ h->cache_obj->info = *info;
+
+ /* Remove old file with the same name. If remove fails, then
+ * perhaps we need to create the directory tree where we are
+ * about to write the new headers file.
+ */
+ rv = apr_file_remove(dobj->hdrsfile, r->pool);
+ if (rv != APR_SUCCESS) {
+ mkdir_structure(conf, dobj->hdrsfile, r->pool);
+ }
+
+ rv = apr_file_open(&dobj->hfd, dobj->hdrsfile,
+ APR_WRITE | APR_CREATE | APR_EXCL,
+ APR_OS_DEFAULT, r->pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ dobj->name = h->cache_obj->key;
+
+ disk_info.format = DISK_FORMAT_VERSION;
+ disk_info.date = info->date;
+ disk_info.expire = info->expire;
+ disk_info.entity_version = dobj->disk_info.entity_version++;
+ disk_info.request_time = info->request_time;
+ disk_info.response_time = info->response_time;
+ disk_info.status = info->status;
+
+ disk_info.name_len = strlen(dobj->name);
+
+ iov[0].iov_base = (void*)&disk_info;
+ iov[0].iov_len = sizeof(disk_cache_info_t);
+ iov[1].iov_base = dobj->name;
+ iov[1].iov_len = disk_info.name_len;
+
+ rv = apr_file_writev(dobj->hfd, (const struct iovec *) &iov, 2, &amt);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ if (r->headers_out) {
+ apr_table_t *headers_out;
+
+ headers_out = ap_cache_cacheable_hdrs_out(r->pool, r->headers_out,
+ r->server);
+
+ if (!apr_table_get(headers_out, "Content-Type") &&
+ r->content_type) {
+ apr_table_setn(headers_out, "Content-Type",
+ ap_make_content_type(r, r->content_type));
+ }
+
+ rv = store_table(dobj->hfd, headers_out);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ }
+
+ /* Parse the vary header and dump those fields from the headers_in. */
+ /* Make call to the same thing cache_select_url calls to crack Vary. */
+ /* @@@ Some day, not today. */
+ if (r->headers_in) {
+ apr_table_t *headers_in;
+
+ headers_in = ap_cache_cacheable_hdrs_out(r->pool, r->headers_in,
+ r->server);
+ rv = store_table(dobj->hfd, headers_in);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+ apr_file_close(dobj->hfd); /* flush and close */
+ }
+ else {
+ /* XXX log message */
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "disk_cache: Stored headers for URL %s", dobj->name);
+ return APR_SUCCESS;
+}
+
+static apr_status_t store_body(cache_handle_t *h, request_rec *r,
+ apr_bucket_brigade *bb)
+{
+ apr_bucket *e;
+ apr_status_t rv;
+ disk_cache_object_t *dobj = (disk_cache_object_t *) h->cache_obj->vobj;
+ disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
+ &disk_cache_module);
+
+ /* We write to a temp file and then atomically rename the file over
+ * in file_cache_el_final().
+ */
+ if (!dobj->tfd) {
+ rv = apr_file_mktemp(&dobj->tfd, dobj->tempfile,
+ APR_CREATE | APR_WRITE | APR_BINARY |
+ APR_BUFFERED | APR_EXCL, r->pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ dobj->file_size = 0;
+ }
+
+ for (e = APR_BRIGADE_FIRST(bb);
+ e != APR_BRIGADE_SENTINEL(bb);
+ e = APR_BUCKET_NEXT(e))
+ {
+ const char *str;
+ apr_size_t length, written;
+ apr_bucket_read(e, &str, &length, APR_BLOCK_READ);
+ rv = apr_file_write_full(dobj->tfd, str, length, &written);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "cache_disk: Error when writing cache file for URL %s",
+ h->cache_obj->key);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ file_cache_errorcleanup(dobj, r);
+ return APR_EGENERAL;
+ }
+ dobj->file_size += written;
+ if (dobj->file_size > conf->maxfs) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache_disk: URL %s failed the size check (%lu>%lu)",
+ h->cache_obj->key, (unsigned long)dobj->file_size,
+ (unsigned long)conf->maxfs);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ file_cache_errorcleanup(dobj, r);
+ return APR_EGENERAL;
+ }
+ }
+
+ /* Was this the final bucket? If yes, close the temp file and perform
+ * sanity checks.
+ */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
+ if (h->cache_obj->info.len <= 0) {
+ /* If the target value of the content length is unknown
+ * (h->cache_obj->info.len <= 0), check if connection has been
+ * aborted by client to avoid caching incomplete request bodies.
+ *
+ * This can happen with large responses from slow backends like
+ * Tomcat via mod_jk.
+ */
+ if (r->connection->aborted) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "disk_cache: Discarding body for URL %s "
+ "because connection has been aborted.",
+ h->cache_obj->key);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ file_cache_errorcleanup(dobj, r);
+ return APR_EGENERAL;
+ }
+ /* XXX Fixme: file_size isn't constrained by size_t. */
+ h->cache_obj->info.len = dobj->file_size;
+ }
+ else if (h->cache_obj->info.len != dobj->file_size) {
+ /* "Content-Length" and actual content disagree in size. Log that. */
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "disk_cache: URL %s failed the size check (%lu != %lu)",
+ h->cache_obj->key,
+ (unsigned long)h->cache_obj->info.len,
+ (unsigned long)dobj->file_size);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ file_cache_errorcleanup(dobj, r);
+ return APR_EGENERAL;
+ }
+ if (dobj->file_size < conf->minfs) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache_disk: URL %s failed the size check (%lu<%lu)",
+ h->cache_obj->key, (unsigned long)dobj->file_size, (unsigned long)conf->minfs);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ file_cache_errorcleanup(dobj, r);
+ return APR_EGENERAL;
+ }
+
+ /* All checks were fine. Move tempfile to final destination */
+ /* Link to the perm file, and close the descriptor */
+ file_cache_el_final(dobj, r);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "disk_cache: Body for URL %s cached.", dobj->name);
+ }
+
+ return APR_SUCCESS;
+}
+
+static void *create_config(apr_pool_t *p, server_rec *s)
+{
+ disk_cache_conf *conf = apr_pcalloc(p, sizeof(disk_cache_conf));
+
+ /* XXX: Set default values */
+ conf->dirlevels = DEFAULT_DIRLEVELS;
+ conf->dirlength = DEFAULT_DIRLENGTH;
+ conf->space = DEFAULT_CACHE_SIZE;
+ conf->maxfs = DEFAULT_MAX_FILE_SIZE;
+ conf->minfs = DEFAULT_MIN_FILE_SIZE;
+ conf->expirychk = 1;
+
+ conf->cache_root = NULL;
+ conf->cache_root_len = 0;
+
+ return conf;
+}
+
+/*
+ * mod_disk_cache configuration directives handlers.
+ */
+static const char
+*set_cache_root(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ conf->cache_root = arg;
+ conf->cache_root_len = strlen(arg);
+ /* TODO: canonicalize cache_root and strip off any trailing slashes */
+
+ return NULL;
+}
+static const char
+*set_cache_size(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ conf->space = atoi(arg);
+ return NULL;
+}
+static const char
+*set_cache_gcint(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+/*
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+*/
+ /* XXX */
+ return NULL;
+}
+/*
+ * Consider eliminating the next two directives in favor of
+ * Ian's prime number hash...
+ * key = hash_fn( r->uri)
+ * filename = "/key % prime1 /key %prime2/key %prime3"
+ */
+static const char
+*set_cache_dirlevels(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ int val = atoi(arg);
+ if (val < 1)
+ return "CacheDirLevels value must be an integer greater than 0";
+ if (val * conf->dirlength > CACHEFILE_LEN)
+ return "CacheDirLevels*CacheDirLength value must not be higher than 20";
+ conf->dirlevels = val;
+ return NULL;
+}
+static const char
+*set_cache_dirlength(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ int val = atoi(arg);
+ if (val < 1)
+ return "CacheDirLength value must be an integer greater than 0";
+ if (val * conf->dirlevels > CACHEFILE_LEN)
+ return "CacheDirLevels*CacheDirLength value must not be higher than 20";
+
+ conf->dirlength = val;
+ return NULL;
+}
+static const char
+*set_cache_exchk(cmd_parms *parms, void *in_struct_ptr, int flag)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ conf->expirychk = flag;
+
+ return NULL;
+}
+static const char
+*set_cache_minfs(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ conf->minfs = atoi(arg);
+ return NULL;
+}
+static const char
+*set_cache_maxfs(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ conf->maxfs = atoi(arg);
+ return NULL;
+}
+static const char
+*set_cache_minetm(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ /* XXX
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ */
+ return NULL;
+}
+static const char
+*set_cache_gctime(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ /* XXX
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ */
+ return NULL;
+}
+static const char
+*add_cache_gcclean(cmd_parms *parms, void *in_struct_ptr, const char *arg, const char *arg1)
+{
+ /* XXX
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ */
+ return NULL;
+}
+static const char
+*add_cache_gcclnun(cmd_parms *parms, void *in_struct_ptr, const char *arg, const char *arg1)
+{
+ /* XXX
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ */
+ return NULL;
+}
+static const char
+*set_cache_maxgcmem(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ /* XXX
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ &disk_cache_module);
+ */
+ return NULL;
+}
+
+static const command_rec disk_cache_cmds[] =
+{
+ AP_INIT_TAKE1("CacheRoot", set_cache_root, NULL, RSRC_CONF,
+ "The directory to store cache files"),
+ AP_INIT_TAKE1("CacheSize", set_cache_size, NULL, RSRC_CONF,
+ "The maximum disk space used by the cache in KB"),
+ AP_INIT_TAKE1("CacheGcInterval", set_cache_gcint, NULL, RSRC_CONF,
+ "The interval between garbage collections, in hours"),
+ AP_INIT_TAKE1("CacheDirLevels", set_cache_dirlevels, NULL, RSRC_CONF,
+ "The number of levels of subdirectories in the cache"),
+ AP_INIT_TAKE1("CacheDirLength", set_cache_dirlength, NULL, RSRC_CONF,
+ "The number of characters in subdirectory names"),
+ AP_INIT_FLAG("CacheExpiryCheck", set_cache_exchk, NULL, RSRC_CONF,
+ "on if cache observes Expires date when seeking files"),
+ AP_INIT_TAKE1("CacheMinFileSize", set_cache_minfs, NULL, RSRC_CONF,
+ "The minimum file size to cache a document"),
+ AP_INIT_TAKE1("CacheMaxFileSize", set_cache_maxfs, NULL, RSRC_CONF,
+ "The maximum file size to cache a document"),
+ AP_INIT_TAKE1("CacheTimeMargin", set_cache_minetm, NULL, RSRC_CONF,
+ "The minimum time margin to cache a document"),
+ AP_INIT_TAKE1("CacheGcDaily", set_cache_gctime, NULL, RSRC_CONF,
+ "The time of day for garbage collection (24 hour clock)"),
+ AP_INIT_TAKE2("CacheGcUnused", add_cache_gcclnun, NULL, RSRC_CONF,
+ "The time in hours to retain unused file that match a url"),
+ AP_INIT_TAKE2("CacheGcClean", add_cache_gcclean, NULL, RSRC_CONF,
+ "The time in hours to retain unchanged files that match a url"),
+ AP_INIT_TAKE1("CacheGcMemUsage", set_cache_maxgcmem, NULL, RSRC_CONF,
+ "The maximum kilobytes of memory used for garbage collection"),
+ {NULL}
+};
+
+static const cache_provider cache_disk_provider =
+{
+ &remove_entity,
+ &store_headers,
+ &store_body,
+ &recall_headers,
+ &recall_body,
+ &create_entity,
+ &open_entity,
+ &remove_url,
+};
+
+static void disk_cache_register_hook(apr_pool_t *p)
+{
+ /* cache initializer */
+ ap_register_provider(p, CACHE_PROVIDER_GROUP, "disk", "0",
+ &cache_disk_provider);
+}
+
+module AP_MODULE_DECLARE_DATA disk_cache_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ create_config, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ disk_cache_cmds, /* command apr_table_t */
+ disk_cache_register_hook /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.dsp
new file mode 100644
index 00000000..9e7bf622
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_disk_cache.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_disk_cache" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_disk_cache - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_disk_cache.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_disk_cache.mak" CFG="mod_disk_cache - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_disk_cache - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_disk_cache - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_disk_cache - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /Fd"Release\mod_disk_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_disk_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_disk_cache.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_disk_cache - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /Fd"Debug\mod_disk_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_disk_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_disk_cache.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_disk_cache - Win32 Release"
+# Name "mod_disk_cache - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_cache.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_disk_cache.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_disk_cache - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_disk_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_disk_cache.so "disk_cache_module for Apache" ../../include/ap_release.h > .\mod_disk_cache.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_disk_cache - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_disk_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_disk_cache.so "disk_cache_module for Apache" ../../include/ap_release.h > .\mod_disk_cache.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.c
new file mode 100644
index 00000000..06ac65f5
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.c
@@ -0,0 +1,215 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Originally written @ Covalent by Jim Jagielski
+ */
+
+/*
+ * mod_dumpio.c:
+ * Think of this as a filter sniffer for Apache 2.x. It logs
+ * all filter data right before and after it goes out on the
+ * wire (BUT right before SSL encoded or after SSL decoded).
+ * It can produce a *huge* amount of data.
+ */
+
+
+#include "httpd.h"
+#include "http_connection.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+
+module AP_MODULE_DECLARE_DATA dumpio_module ;
+
+typedef struct dumpio_conf_t {
+ int enable_input;
+ int enable_output;
+} dumpio_conf_t;
+
+/*
+ * Workhorse function: simply log to the current error_log
+ * info about the data in the bucket as well as the data itself
+ */
+static void dumpit(ap_filter_t *f, apr_bucket *b)
+{
+ conn_rec *c = f->c;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
+ "mod_dumpio: %s (%s-%s): %" APR_SIZE_T_FMT " bytes",
+ f->frec->name,
+ (APR_BUCKET_IS_METADATA(b)) ? "metadata" : "data",
+ b->type->name,
+ b->length) ;
+
+ if (!(APR_BUCKET_IS_METADATA(b))) {
+ const char *buf;
+ apr_size_t nbytes;
+ char *obuf;
+ if (apr_bucket_read(b, &buf, &nbytes, APR_BLOCK_READ) == APR_SUCCESS) {
+ if (nbytes) {
+ obuf = malloc(nbytes+1); /* use pool? */
+ memcpy(obuf, buf, nbytes);
+ obuf[nbytes] = '\0';
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
+ "mod_dumpio: %s (%s-%s): %s",
+ f->frec->name,
+ (APR_BUCKET_IS_METADATA(b)) ? "metadata" : "data",
+ b->type->name,
+ obuf);
+ free(obuf);
+ }
+ } else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
+ "mod_dumpio: %s (%s-%s): %s",
+ f->frec->name,
+ (APR_BUCKET_IS_METADATA(b)) ? "metadata" : "data",
+ b->type->name,
+ "error reading data");
+ }
+ }
+}
+
+#define whichmode( mode ) \
+ ( (( mode ) == AP_MODE_READBYTES) ? "readbytes" : \
+ (( mode ) == AP_MODE_GETLINE) ? "getline" : \
+ (( mode ) == AP_MODE_EATCRLF) ? "eatcrlf" : \
+ (( mode ) == AP_MODE_SPECULATIVE) ? "speculative" : \
+ (( mode ) == AP_MODE_EXHAUSTIVE) ? "exhaustive" : \
+ (( mode ) == AP_MODE_INIT) ? "init" : "unknown" \
+ )
+
+static int dumpio_input_filter (ap_filter_t *f, apr_bucket_brigade *bb,
+ ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes)
+{
+
+ apr_bucket *b;
+ apr_status_t ret;
+ conn_rec *c = f->c;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
+ "mod_dumpio: %s [%s-%s] %" APR_OFF_T_FMT " readbytes",
+ f->frec->name,
+ whichmode(mode),
+ ((block) == APR_BLOCK_READ) ? "blocking" : "nonblocking",
+ readbytes) ;
+
+ ret = ap_get_brigade(f->next, bb, mode, block, readbytes);
+
+ if (ret == APR_SUCCESS) {
+ for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
+ dumpit(f, b);
+ }
+ } else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
+ "mod_dumpio: %s - %d", f->frec->name, ret) ;
+ }
+
+ return APR_SUCCESS ;
+}
+
+static int dumpio_output_filter (ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+ conn_rec *c = f->c;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, "mod_dumpio: %s", f->frec->name) ;
+
+ for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
+ /*
+ * If we ever see an EOS, make sure to FLUSH.
+ */
+ if (APR_BUCKET_IS_EOS(b)) {
+ apr_bucket *flush = apr_bucket_flush_create(f->c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(b, flush);
+ }
+ dumpit(f, b);
+ }
+
+ return ap_pass_brigade(f->next, bb) ;
+}
+
+static int dumpio_pre_conn(conn_rec *c, void *csd)
+{
+ dumpio_conf_t *ptr =
+ (dumpio_conf_t *) ap_get_module_config(c->base_server->module_config,
+ &dumpio_module);
+
+ if (ptr->enable_input)
+ ap_add_input_filter("DUMPIO_IN", NULL, NULL, c);
+ if (ptr->enable_output)
+ ap_add_output_filter("DUMPIO_OUT", NULL, NULL, c);
+ return OK;
+}
+
+static void dumpio_register_hooks(apr_pool_t *p)
+{
+/*
+ * We know that SSL is CONNECTION + 5
+ */
+ ap_register_output_filter("DUMPIO_OUT", dumpio_output_filter,
+ NULL, AP_FTYPE_CONNECTION + 3) ;
+
+ ap_register_input_filter("DUMPIO_IN", dumpio_input_filter,
+ NULL, AP_FTYPE_CONNECTION + 3) ;
+
+ ap_hook_pre_connection(dumpio_pre_conn, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+static void *dumpio_create_sconfig(apr_pool_t *p, server_rec *s)
+{
+ dumpio_conf_t *ptr = apr_pcalloc(p, sizeof *ptr);
+ ptr->enable_input = ptr->enable_output = 0;
+ return ptr;
+}
+
+static const char *dumpio_enable_input(cmd_parms *cmd, void *dummy, int arg)
+{
+ dumpio_conf_t *ptr =
+ (dumpio_conf_t *) ap_get_module_config(cmd->server->module_config,
+ &dumpio_module);
+
+ ptr->enable_input = arg;
+ return NULL;
+}
+
+static const char *dumpio_enable_output(cmd_parms *cmd, void *dummy, int arg)
+{
+ dumpio_conf_t *ptr =
+ (dumpio_conf_t *) ap_get_module_config(cmd->server->module_config,
+ &dumpio_module);
+
+ ptr->enable_output = arg;
+ return NULL;
+}
+
+static const command_rec dumpio_cmds[] = {
+ AP_INIT_FLAG("DumpIOInput", dumpio_enable_input, NULL,
+ RSRC_CONF, "Enable I/O Dump on Input Data"),
+ AP_INIT_FLAG("DumpIOOutput", dumpio_enable_output, NULL,
+ RSRC_CONF, "Enable I/O Dump on Output Data"),
+ { NULL }
+};
+
+module AP_MODULE_DECLARE_DATA dumpio_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL,
+ NULL,
+ dumpio_create_sconfig,
+ NULL,
+ dumpio_cmds,
+ dumpio_register_hooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.dsp
new file mode 100644
index 00000000..1126ddc9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_dumpio.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_dumpio" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_dumpio - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_dumpio.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_dumpio.mak" CFG="mod_dumpio - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_dumpio - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_dumpio - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_dumpio - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_dumpio_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_dumpio.so" /base:@..\..\os\win32\BaseAddr.ref,mod_dumpio.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_dumpio.so" /base:@..\..\os\win32\BaseAddr.ref,mod_dumpio.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_dumpio - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_dumpio_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_dumpio.so" /base:@..\..\os\win32\BaseAddr.ref,mod_dumpio.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_dumpio.so" /base:@..\..\os\win32\BaseAddr.ref,mod_dumpio.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_dumpio - Win32 Release"
+# Name "mod_dumpio - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_dumpio.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_dumpio.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_dumpio - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_dumpio.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_dumpio.so "dumpio_module for Apache" ../../include/ap_release.h > .\mod_dumpio.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_dumpio - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_dumpio.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_dumpio.so "dumpio_module for Apache" ../../include/ap_release.h > .\mod_dumpio.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_example.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_example.c
new file mode 100644
index 00000000..5fae6a20
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_example.c
@@ -0,0 +1,1313 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Apache example module. Provide demonstrations of how modules do things.
+ * It is not meant to be used in a production server. Since it participates
+ * in all of the processing phases, it could conceivable interfere with
+ * the proper operation of other modules -- particularly the ones related
+ * to security.
+ *
+ * In the interest of brevity, all functions and structures internal to
+ * this module, but which may have counterparts in *real* modules, are
+ * prefixed with 'x_' instead of 'example_'.
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_script.h"
+#include "http_connection.h"
+
+#include "apr_strings.h"
+
+#include <stdio.h>
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* Data declarations. */
+/* */
+/* Here are the static cells and structure declarations private to our */
+/* module. */
+/* */
+/*--------------------------------------------------------------------------*/
+
+/*
+ * Sample configuration record. Used for both per-directory and per-server
+ * configuration data.
+ *
+ * It's perfectly reasonable to have two different structures for the two
+ * different environments. The same command handlers will be called for
+ * both, though, so the handlers need to be able to tell them apart. One
+ * possibility is for both structures to start with an int which is 0 for
+ * one and 1 for the other.
+ *
+ * Note that while the per-directory and per-server configuration records are
+ * available to most of the module handlers, they should be treated as
+ * READ-ONLY by all except the command and merge handlers. Sometimes handlers
+ * are handed a record that applies to the current location by implication or
+ * inheritance, and modifying it will change the rules for other locations.
+ */
+typedef struct x_cfg {
+ int cmode; /* Environment to which record applies
+ * (directory, server, or combination).
+ */
+#define CONFIG_MODE_SERVER 1
+#define CONFIG_MODE_DIRECTORY 2
+#define CONFIG_MODE_COMBO 3 /* Shouldn't ever happen. */
+ int local; /* Boolean: "Example" directive declared
+ * here?
+ */
+ int congenital; /* Boolean: did we inherit an "Example"? */
+ char *trace; /* Pointer to trace string. */
+ char *loc; /* Location to which this record applies. */
+} x_cfg;
+
+/*
+ * Let's set up a module-local static cell to point to the accreting callback
+ * trace. As each API callback is made to us, we'll tack on the particulars
+ * to whatever we've already recorded. To avoid massive memory bloat as
+ * directories are walked again and again, we record the routine/environment
+ * the first time (non-request context only), and ignore subsequent calls for
+ * the same routine/environment.
+ */
+static const char *trace = NULL;
+static apr_table_t *static_calls_made = NULL;
+
+/*
+ * To avoid leaking memory from pools other than the per-request one, we
+ * allocate a module-private pool, and then use a sub-pool of that which gets
+ * freed each time we modify the trace. That way previous layers of trace
+ * data don't get lost.
+ */
+static apr_pool_t *x_pool = NULL;
+static apr_pool_t *x_subpool = NULL;
+
+/*
+ * Declare ourselves so the configuration routines can find and know us.
+ * We'll fill it in at the end of the module.
+ */
+module AP_MODULE_DECLARE_DATA example_module;
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* The following pseudo-prototype declarations illustrate the parameters */
+/* passed to command handlers for the different types of directive */
+/* syntax. If an argument was specified in the directive definition */
+/* (look for "command_rec" below), it's available to the command handler */
+/* via the (void *) info field in the cmd_parms argument passed to the */
+/* handler (cmd->info for the examples below). */
+/* */
+/*--------------------------------------------------------------------------*/
+
+/*
+ * Command handler for a NO_ARGS directive. Declared in the command_rec
+ * list with
+ * AP_INIT_NO_ARGS("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_NO_ARGS(cmd_parms *cmd, void *mconfig);
+ */
+
+/*
+ * Command handler for a RAW_ARGS directive. The "args" argument is the text
+ * of the commandline following the directive itself. Declared in the
+ * command_rec list with
+ * AP_INIT_RAW_ARGS("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_RAW_ARGS(cmd_parms *cmd, void *mconfig,
+ * const char *args);
+ */
+
+/*
+ * Command handler for a FLAG directive. The single parameter is passed in
+ * "bool", which is either zero or not for Off or On respectively.
+ * Declared in the command_rec list with
+ * AP_INIT_FLAG("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_FLAG(cmd_parms *cmd, void *mconfig, int bool);
+ */
+
+/*
+ * Command handler for a TAKE1 directive. The single parameter is passed in
+ * "word1". Declared in the command_rec list with
+ * AP_INIT_TAKE1("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE1(cmd_parms *cmd, void *mconfig,
+ * char *word1);
+ */
+
+/*
+ * Command handler for a TAKE2 directive. TAKE2 commands must always have
+ * exactly two arguments. Declared in the command_rec list with
+ * AP_INIT_TAKE2("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE2(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2);
+ */
+
+/*
+ * Command handler for a TAKE3 directive. Like TAKE2, these must have exactly
+ * three arguments, or the parser complains and doesn't bother calling us.
+ * Declared in the command_rec list with
+ * AP_INIT_TAKE3("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE3(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2, char *word3);
+ */
+
+/*
+ * Command handler for a TAKE12 directive. These can take either one or two
+ * arguments.
+ * - word2 is a NULL pointer if no second argument was specified.
+ * Declared in the command_rec list with
+ * AP_INIT_TAKE12("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE12(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2);
+ */
+
+/*
+ * Command handler for a TAKE123 directive. A TAKE123 directive can be given,
+ * as might be expected, one, two, or three arguments.
+ * - word2 is a NULL pointer if no second argument was specified.
+ * - word3 is a NULL pointer if no third argument was specified.
+ * Declared in the command_rec list with
+ * AP_INIT_TAKE123("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE123(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2, char *word3);
+ */
+
+/*
+ * Command handler for a TAKE13 directive. Either one or three arguments are
+ * permitted - no two-parameters-only syntax is allowed.
+ * - word2 and word3 are NULL pointers if only one argument was specified.
+ * Declared in the command_rec list with
+ * AP_INIT_TAKE13("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE13(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2, char *word3);
+ */
+
+/*
+ * Command handler for a TAKE23 directive. At least two and as many as three
+ * arguments must be specified.
+ * - word3 is a NULL pointer if no third argument was specified.
+ * Declared in the command_rec list with
+ * AP_INIT_TAKE23("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_TAKE23(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2, char *word3);
+ */
+
+/*
+ * Command handler for a ITERATE directive.
+ * - Handler is called once for each of n arguments given to the directive.
+ * - word1 points to each argument in turn.
+ * Declared in the command_rec list with
+ * AP_INIT_ITERATE("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_ITERATE(cmd_parms *cmd, void *mconfig,
+ * char *word1);
+ */
+
+/*
+ * Command handler for a ITERATE2 directive.
+ * - Handler is called once for each of the second and subsequent arguments
+ * given to the directive.
+ * - word1 is the same for each call for a particular directive instance (the
+ * first argument).
+ * - word2 points to each of the second and subsequent arguments in turn.
+ * Declared in the command_rec list with
+ * AP_INIT_ITERATE2("directive", function, mconfig, where, help)
+ *
+ * static const char *handle_ITERATE2(cmd_parms *cmd, void *mconfig,
+ * char *word1, char *word2);
+ */
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* These routines are strictly internal to this module, and support its */
+/* operation. They are not referenced by any external portion of the */
+/* server. */
+/* */
+/*--------------------------------------------------------------------------*/
+
+/*
+ * Locate our directory configuration record for the current request.
+ */
+static x_cfg *our_dconfig(const request_rec *r)
+{
+ return (x_cfg *) ap_get_module_config(r->per_dir_config, &example_module);
+}
+
+#if 0
+/*
+ * Locate our server configuration record for the specified server.
+ */
+static x_cfg *our_sconfig(const server_rec *s)
+{
+ return (x_cfg *) ap_get_module_config(s->module_config, &example_module);
+}
+
+/*
+ * Likewise for our configuration record for the specified request.
+ */
+static x_cfg *our_rconfig(const request_rec *r)
+{
+ return (x_cfg *) ap_get_module_config(r->request_config, &example_module);
+}
+#endif
+
+/*
+ * Likewise for our configuration record for a connection.
+ */
+static x_cfg *our_cconfig(const conn_rec *c)
+{
+ return (x_cfg *) ap_get_module_config(c->conn_config, &example_module);
+}
+
+/*
+ * This routine sets up some module-wide cells if they haven't been already.
+ */
+static void setup_module_cells(void)
+{
+ /*
+ * If we haven't already allocated our module-private pool, do so now.
+ */
+ if (x_pool == NULL) {
+ apr_pool_create(&x_pool, NULL);
+ };
+ /*
+ * Likewise for the table of routine/environment pairs we visit outside of
+ * request context.
+ */
+ if (static_calls_made == NULL) {
+ static_calls_made = apr_table_make(x_pool, 16);
+ };
+}
+
+/*
+ * This routine is used to add a trace of a callback to the list. We're
+ * passed the server record (if available), the request record (if available),
+ * a pointer to our private configuration record (if available) for the
+ * environment to which the callback is supposed to apply, and some text. We
+ * turn this into a textual representation and add it to the tail of the list.
+ * The list can be displayed by the x_handler() routine.
+ *
+ * If the call occurs within a request context (i.e., we're passed a request
+ * record), we put the trace into the request apr_pool_t and attach it to the
+ * request via the notes mechanism. Otherwise, the trace gets added
+ * to the static (non-request-specific) list.
+ *
+ * Note that the r->notes table is only for storing strings; if you need to
+ * maintain per-request data of any other type, you need to use another
+ * mechanism.
+ */
+
+#define TRACE_NOTE "example-trace"
+
+static void trace_add(server_rec *s, request_rec *r, x_cfg *mconfig,
+ const char *note)
+{
+ const char *sofar;
+ char *addon;
+ char *where;
+ apr_pool_t *p;
+ const char *trace_copy;
+
+ /*
+ * Make sure our pools and tables are set up - we need 'em.
+ */
+ setup_module_cells();
+ /*
+ * Now, if we're in request-context, we use the request pool.
+ */
+ if (r != NULL) {
+ p = r->pool;
+ if ((trace_copy = apr_table_get(r->notes, TRACE_NOTE)) == NULL) {
+ trace_copy = "";
+ }
+ }
+ else {
+ /*
+ * We're not in request context, so the trace gets attached to our
+ * module-wide pool. We do the create/destroy every time we're called
+ * in non-request context; this avoids leaking memory in some of
+ * the subsequent calls that allocate memory only once (such as the
+ * key formation below).
+ *
+ * Make a new sub-pool and copy any existing trace to it. Point the
+ * trace cell at the copied value.
+ */
+ apr_pool_create(&p, x_pool);
+ if (trace != NULL) {
+ trace = apr_pstrdup(p, trace);
+ }
+ /*
+ * Now, if we have a sub-pool from before, nuke it and replace with
+ * the one we just allocated.
+ */
+ if (x_subpool != NULL) {
+ apr_pool_destroy(x_subpool);
+ }
+ x_subpool = p;
+ trace_copy = trace;
+ }
+ /*
+ * If we weren't passed a configuration record, we can't figure out to
+ * what location this call applies. This only happens for co-routines
+ * that don't operate in a particular directory or server context. If we
+ * got a valid record, extract the location (directory or server) to which
+ * it applies.
+ */
+ where = (mconfig != NULL) ? mconfig->loc : "nowhere";
+ where = (where != NULL) ? where : "";
+ /*
+ * Now, if we're not in request context, see if we've been called with
+ * this particular combination before. The apr_table_t is allocated in the
+ * module's private pool, which doesn't get destroyed.
+ */
+ if (r == NULL) {
+ char *key;
+
+ key = apr_pstrcat(p, note, ":", where, NULL);
+ if (apr_table_get(static_calls_made, key) != NULL) {
+ /*
+ * Been here, done this.
+ */
+ return;
+ }
+ else {
+ /*
+ * First time for this combination of routine and environment -
+ * log it so we don't do it again.
+ */
+ apr_table_set(static_calls_made, key, "been here");
+ }
+ }
+ addon = apr_pstrcat(p,
+ " <li>\n"
+ " <dl>\n"
+ " <dt><samp>", note, "</samp></dt>\n"
+ " <dd><samp>[", where, "]</samp></dd>\n"
+ " </dl>\n"
+ " </li>\n",
+ NULL);
+ sofar = (trace_copy == NULL) ? "" : trace_copy;
+ trace_copy = apr_pstrcat(p, sofar, addon, NULL);
+ if (r != NULL) {
+ apr_table_set(r->notes, TRACE_NOTE, trace_copy);
+ }
+ else {
+ trace = trace_copy;
+ }
+ /*
+ * You *could* change the following if you wanted to see the calling
+ * sequence reported in the server's error_log, but beware - almost all of
+ * these co-routines are called for every single request, and the impact
+ * on the size (and readability) of the error_log is considerable.
+ */
+#define EXAMPLE_LOG_EACH 0
+ if (EXAMPLE_LOG_EACH && (s != NULL)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "mod_example: %s", note);
+ }
+}
+
+/*--------------------------------------------------------------------------*/
+/* We prototyped the various syntax for command handlers (routines that */
+/* are called when the configuration parser detects a directive declared */
+/* by our module) earlier. Now we actually declare a "real" routine that */
+/* will be invoked by the parser when our "real" directive is */
+/* encountered. */
+/* */
+/* If a command handler encounters a problem processing the directive, it */
+/* signals this fact by returning a non-NULL pointer to a string */
+/* describing the problem. */
+/* */
+/* The magic return value DECLINE_CMD is used to deal with directives */
+/* that might be declared by multiple modules. If the command handler */
+/* returns NULL, the directive was processed; if it returns DECLINE_CMD, */
+/* the next module (if any) that declares the directive is given a chance */
+/* at it. If it returns any other value, it's treated as the text of an */
+/* error message. */
+/*--------------------------------------------------------------------------*/
+/*
+ * Command handler for the NO_ARGS "Example" directive. All we do is mark the
+ * call in the trace log, and flag the applicability of the directive to the
+ * current location in that location's configuration record.
+ */
+static const char *cmd_example(cmd_parms *cmd, void *mconfig)
+{
+ x_cfg *cfg = (x_cfg *) mconfig;
+
+ /*
+ * "Example Wuz Here"
+ */
+ cfg->local = 1;
+ trace_add(cmd->server, NULL, cfg, "cmd_example()");
+ return NULL;
+}
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* Now we declare our content handlers, which are invoked when the server */
+/* encounters a document which our module is supposed to have a chance to */
+/* see. (See mod_mime's SetHandler and AddHandler directives, and the */
+/* mod_info and mod_status examples, for more details.) */
+/* */
+/* Since content handlers are dumping data directly into the connection */
+/* (using the r*() routines, such as rputs() and rprintf()) without */
+/* intervention by other parts of the server, they need to make */
+/* sure any accumulated HTTP headers are sent first. This is done by */
+/* calling send_http_header(). Otherwise, no header will be sent at all, */
+/* and the output sent to the client will actually be HTTP-uncompliant. */
+/*--------------------------------------------------------------------------*/
+/*
+ * Sample content handler. All this does is display the call list that has
+ * been built up so far.
+ *
+ * The return value instructs the caller concerning what happened and what to
+ * do next:
+ * OK ("we did our thing")
+ * DECLINED ("this isn't something with which we want to get involved")
+ * HTTP_mumble ("an error status should be reported")
+ */
+static int x_handler(request_rec *r)
+{
+ x_cfg *dcfg;
+
+ if (strcmp(r->handler, "example-handler")) {
+ return DECLINED;
+ }
+
+ dcfg = our_dconfig(r);
+ trace_add(r->server, r, dcfg, "x_handler()");
+ /*
+ * We're about to start sending content, so we need to force the HTTP
+ * headers to be sent at this point. Otherwise, no headers will be sent
+ * at all. We can set any we like first, of course. **NOTE** Here's
+ * where you set the "Content-type" header, and you do so by putting it in
+ * r->content_type, *not* r->headers_out("Content-type"). If you don't
+ * set it, it will be filled in with the server's default type (typically
+ * "text/plain"). You *must* also ensure that r->content_type is lower
+ * case.
+ *
+ * We also need to start a timer so the server can know if the connexion
+ * is broken.
+ */
+ ap_set_content_type(r, "text/html");
+ /*
+ * If we're only supposed to send header information (HEAD request), we're
+ * already there.
+ */
+ if (r->header_only) {
+ return OK;
+ }
+
+ /*
+ * Now send our actual output. Since we tagged this as being
+ * "text/html", we need to embed any HTML.
+ */
+ ap_rputs(DOCTYPE_HTML_3_2, r);
+ ap_rputs("<HTML>\n", r);
+ ap_rputs(" <HEAD>\n", r);
+ ap_rputs(" <TITLE>mod_example Module Content-Handler Output\n", r);
+ ap_rputs(" </TITLE>\n", r);
+ ap_rputs(" </HEAD>\n", r);
+ ap_rputs(" <BODY>\n", r);
+ ap_rputs(" <H1><SAMP>mod_example</SAMP> Module Content-Handler Output\n", r);
+ ap_rputs(" </H1>\n", r);
+ ap_rputs(" <P>\n", r);
+ ap_rprintf(r, " Apache HTTP Server version: \"%s\"\n",
+ ap_get_server_version());
+ ap_rputs(" <BR>\n", r);
+ ap_rprintf(r, " Server built: \"%s\"\n", ap_get_server_built());
+ ap_rputs(" </P>\n", r);;
+ ap_rputs(" <P>\n", r);
+ ap_rputs(" The format for the callback trace is:\n", r);
+ ap_rputs(" </P>\n", r);
+ ap_rputs(" <DL>\n", r);
+ ap_rputs(" <DT><EM>n</EM>.<SAMP>&lt;routine-name&gt;", r);
+ ap_rputs("(&lt;routine-data&gt;)</SAMP>\n", r);
+ ap_rputs(" </DT>\n", r);
+ ap_rputs(" <DD><SAMP>[&lt;applies-to&gt;]</SAMP>\n", r);
+ ap_rputs(" </DD>\n", r);
+ ap_rputs(" </DL>\n", r);
+ ap_rputs(" <P>\n", r);
+ ap_rputs(" The <SAMP>&lt;routine-data&gt;</SAMP> is supplied by\n", r);
+ ap_rputs(" the routine when it requests the trace,\n", r);
+ ap_rputs(" and the <SAMP>&lt;applies-to&gt;</SAMP> is extracted\n", r);
+ ap_rputs(" from the configuration record at the time of the trace.\n", r);
+ ap_rputs(" <STRONG>SVR()</STRONG> indicates a server environment\n", r);
+ ap_rputs(" (blank means the main or default server, otherwise it's\n", r);
+ ap_rputs(" the name of the VirtualHost); <STRONG>DIR()</STRONG>\n", r);
+ ap_rputs(" indicates a location in the URL or filesystem\n", r);
+ ap_rputs(" namespace.\n", r);
+ ap_rputs(" </P>\n", r);
+ ap_rprintf(r, " <H2>Static callbacks so far:</H2>\n <OL>\n%s </OL>\n",
+ trace);
+ ap_rputs(" <H2>Request-specific callbacks so far:</H2>\n", r);
+ ap_rprintf(r, " <OL>\n%s </OL>\n", apr_table_get(r->notes, TRACE_NOTE));
+ ap_rputs(" <H2>Environment for <EM>this</EM> call:</H2>\n", r);
+ ap_rputs(" <UL>\n", r);
+ ap_rprintf(r, " <LI>Applies-to: <SAMP>%s</SAMP>\n </LI>\n", dcfg->loc);
+ ap_rprintf(r, " <LI>\"Example\" directive declared here: %s\n </LI>\n",
+ (dcfg->local ? "YES" : "NO"));
+ ap_rprintf(r, " <LI>\"Example\" inherited: %s\n </LI>\n",
+ (dcfg->congenital ? "YES" : "NO"));
+ ap_rputs(" </UL>\n", r);
+ ap_rputs(" </BODY>\n", r);
+ ap_rputs("</HTML>\n", r);
+ /*
+ * We're all done, so cancel the timeout we set. Since this is probably
+ * the end of the request we *could* assume this would be done during
+ * post-processing - but it's possible that another handler might be
+ * called and inherit our outstanding timer. Not good; to each its own.
+ */
+ /*
+ * We did what we wanted to do, so tell the rest of the server we
+ * succeeded.
+ */
+ return OK;
+}
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* Now let's declare routines for each of the callback phase in order. */
+/* (That's the order in which they're listed in the callback list, *not */
+/* the order in which the server calls them! See the command_rec */
+/* declaration near the bottom of this file.) Note that these may be */
+/* called for situations that don't relate primarily to our function - in */
+/* other words, the fixup handler shouldn't assume that the request has */
+/* to do with "example" stuff. */
+/* */
+/* With the exception of the content handler, all of our routines will be */
+/* called for each request, unless an earlier handler from another module */
+/* aborted the sequence. */
+/* */
+/* Handlers that are declared as "int" can return the following: */
+/* */
+/* OK Handler accepted the request and did its thing with it. */
+/* DECLINED Handler took no action. */
+/* HTTP_mumble Handler looked at request and found it wanting. */
+/* */
+/* What the server does after calling a module handler depends upon the */
+/* handler's return value. In all cases, if the handler returns */
+/* DECLINED, the server will continue to the next module with an handler */
+/* for the current phase. However, if the handler return a non-OK, */
+/* non-DECLINED status, the server aborts the request right there. If */
+/* the handler returns OK, the server's next action is phase-specific; */
+/* see the individual handler comments below for details. */
+/* */
+/*--------------------------------------------------------------------------*/
+/*
+ * This function is called during server initialisation. Any information
+ * that needs to be recorded must be in static cells, since there's no
+ * configuration record.
+ *
+ * There is no return value.
+ */
+
+/*
+ * This function is called when an heavy-weight process (such as a child) is
+ * being run down or destroyed. As with the child initialisation function,
+ * any information that needs to be recorded must be in static cells, since
+ * there's no configuration record.
+ *
+ * There is no return value.
+ */
+
+/*
+ * This function is called during server initialisation when an heavy-weight
+ * process (such as a child) is being initialised. As with the
+ * module initialisation function, any information that needs to be recorded
+ * must be in static cells, since there's no configuration record.
+ *
+ * There is no return value.
+ */
+
+/*
+ * This function gets called to create a per-directory configuration
+ * record. This will be called for the "default" server environment, and for
+ * each directory for which the parser finds any of our directives applicable.
+ * If a directory doesn't have any of our directives involved (i.e., they
+ * aren't in the .htaccess file, or a <Location>, <Directory>, or related
+ * block), this routine will *not* be called - the configuration for the
+ * closest ancestor is used.
+ *
+ * The return value is a pointer to the created module-specific
+ * structure.
+ */
+static void *x_create_dir_config(apr_pool_t *p, char *dirspec)
+{
+ x_cfg *cfg;
+ char *dname = dirspec;
+
+ /*
+ * Allocate the space for our record from the pool supplied.
+ */
+ cfg = (x_cfg *) apr_pcalloc(p, sizeof(x_cfg));
+ /*
+ * Now fill in the defaults. If there are any `parent' configuration
+ * records, they'll get merged as part of a separate callback.
+ */
+ cfg->local = 0;
+ cfg->congenital = 0;
+ cfg->cmode = CONFIG_MODE_DIRECTORY;
+ /*
+ * Finally, add our trace to the callback list.
+ */
+ dname = (dname != NULL) ? dname : "";
+ cfg->loc = apr_pstrcat(p, "DIR(", dname, ")", NULL);
+ trace_add(NULL, NULL, cfg, "x_create_dir_config()");
+ return (void *) cfg;
+}
+
+/*
+ * This function gets called to merge two per-directory configuration
+ * records. This is typically done to cope with things like .htaccess files
+ * or <Location> directives for directories that are beneath one for which a
+ * configuration record was already created. The routine has the
+ * responsibility of creating a new record and merging the contents of the
+ * other two into it appropriately. If the module doesn't declare a merge
+ * routine, the record for the closest ancestor location (that has one) is
+ * used exclusively.
+ *
+ * The routine MUST NOT modify any of its arguments!
+ *
+ * The return value is a pointer to the created module-specific structure
+ * containing the merged values.
+ */
+static void *x_merge_dir_config(apr_pool_t *p, void *parent_conf,
+ void *newloc_conf)
+{
+
+ x_cfg *merged_config = (x_cfg *) apr_pcalloc(p, sizeof(x_cfg));
+ x_cfg *pconf = (x_cfg *) parent_conf;
+ x_cfg *nconf = (x_cfg *) newloc_conf;
+ char *note;
+
+ /*
+ * Some things get copied directly from the more-specific record, rather
+ * than getting merged.
+ */
+ merged_config->local = nconf->local;
+ merged_config->loc = apr_pstrdup(p, nconf->loc);
+ /*
+ * Others, like the setting of the `congenital' flag, get ORed in. The
+ * setting of that particular flag, for instance, is TRUE if it was ever
+ * true anywhere in the upstream configuration.
+ */
+ merged_config->congenital = (pconf->congenital | pconf->local);
+ /*
+ * If we're merging records for two different types of environment (server
+ * and directory), mark the new record appropriately. Otherwise, inherit
+ * the current value.
+ */
+ merged_config->cmode =
+ (pconf->cmode == nconf->cmode) ? pconf->cmode : CONFIG_MODE_COMBO;
+ /*
+ * Now just record our being called in the trace list. Include the
+ * locations we were asked to merge.
+ */
+ note = apr_pstrcat(p, "x_merge_dir_config(\"", pconf->loc, "\",\"",
+ nconf->loc, "\")", NULL);
+ trace_add(NULL, NULL, merged_config, note);
+ return (void *) merged_config;
+}
+
+/*
+ * This function gets called to create a per-server configuration
+ * record. It will always be called for the "default" server.
+ *
+ * The return value is a pointer to the created module-specific
+ * structure.
+ */
+static void *x_create_server_config(apr_pool_t *p, server_rec *s)
+{
+
+ x_cfg *cfg;
+ char *sname = s->server_hostname;
+
+ /*
+ * As with the x_create_dir_config() reoutine, we allocate and fill
+ * in an empty record.
+ */
+ cfg = (x_cfg *) apr_pcalloc(p, sizeof(x_cfg));
+ cfg->local = 0;
+ cfg->congenital = 0;
+ cfg->cmode = CONFIG_MODE_SERVER;
+ /*
+ * Note that we were called in the trace list.
+ */
+ sname = (sname != NULL) ? sname : "";
+ cfg->loc = apr_pstrcat(p, "SVR(", sname, ")", NULL);
+ trace_add(s, NULL, cfg, "x_create_server_config()");
+ return (void *) cfg;
+}
+
+/*
+ * This function gets called to merge two per-server configuration
+ * records. This is typically done to cope with things like virtual hosts and
+ * the default server configuration The routine has the responsibility of
+ * creating a new record and merging the contents of the other two into it
+ * appropriately. If the module doesn't declare a merge routine, the more
+ * specific existing record is used exclusively.
+ *
+ * The routine MUST NOT modify any of its arguments!
+ *
+ * The return value is a pointer to the created module-specific structure
+ * containing the merged values.
+ */
+static void *x_merge_server_config(apr_pool_t *p, void *server1_conf,
+ void *server2_conf)
+{
+
+ x_cfg *merged_config = (x_cfg *) apr_pcalloc(p, sizeof(x_cfg));
+ x_cfg *s1conf = (x_cfg *) server1_conf;
+ x_cfg *s2conf = (x_cfg *) server2_conf;
+ char *note;
+
+ /*
+ * Our inheritance rules are our own, and part of our module's semantics.
+ * Basically, just note whence we came.
+ */
+ merged_config->cmode =
+ (s1conf->cmode == s2conf->cmode) ? s1conf->cmode : CONFIG_MODE_COMBO;
+ merged_config->local = s2conf->local;
+ merged_config->congenital = (s1conf->congenital | s1conf->local);
+ merged_config->loc = apr_pstrdup(p, s2conf->loc);
+ /*
+ * Trace our call, including what we were asked to merge.
+ */
+ note = apr_pstrcat(p, "x_merge_server_config(\"", s1conf->loc, "\",\"",
+ s2conf->loc, "\")", NULL);
+ trace_add(NULL, NULL, merged_config, note);
+ return (void *) merged_config;
+}
+
+/*
+ * This routine is called before the server processes the configuration
+ * files. There is no return value.
+ */
+static int x_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptemp)
+{
+ /*
+ * Log the call and exit.
+ */
+ trace_add(NULL, NULL, NULL, "x_pre_config()");
+
+ return OK;
+}
+
+/*
+ * This routine is called to perform any module-specific fixing of header
+ * fields, et cetera. It is invoked just before any content-handler.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static int x_post_config(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ /*
+ * Log the call and exit.
+ */
+ trace_add(NULL, NULL, NULL, "x_post_config()");
+ return OK;
+}
+
+/*
+ * This routine is called to perform any module-specific log file
+ * openings. It is invoked just before the post_config phase
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static int x_open_logs(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ /*
+ * Log the call and exit.
+ */
+ trace_add(s, NULL, NULL, "x_open_logs()");
+ return OK;
+}
+
+/*
+ * All our process-death routine does is add its trace to the log.
+ */
+static apr_status_t x_child_exit(void *data)
+{
+ char *note;
+ server_rec *s = data;
+ char *sname = s->server_hostname;
+
+ /*
+ * The arbitrary text we add to our trace entry indicates for which server
+ * we're being called.
+ */
+ sname = (sname != NULL) ? sname : "";
+ note = apr_pstrcat(s->process->pool, "x_child_exit(", sname, ")", NULL);
+ trace_add(s, NULL, NULL, note);
+ return APR_SUCCESS;
+}
+
+/*
+ * All our process initialiser does is add its trace to the log.
+ */
+static void x_child_init(apr_pool_t *p, server_rec *s)
+{
+ char *note;
+ char *sname = s->server_hostname;
+
+ /*
+ * Set up any module cells that ought to be initialised.
+ */
+ setup_module_cells();
+ /*
+ * The arbitrary text we add to our trace entry indicates for which server
+ * we're being called.
+ */
+ sname = (sname != NULL) ? sname : "";
+ note = apr_pstrcat(p, "x_child_init(", sname, ")", NULL);
+ trace_add(s, NULL, NULL, note);
+
+ apr_pool_cleanup_register(p, s, x_child_exit, x_child_exit);
+}
+
+/*
+ * XXX: This routine is called XXX
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+#if 0
+static const char *x_http_method(const request_rec *r)
+{
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call and exit.
+ */
+ trace_add(r->server, NULL, cfg, "x_http_method()");
+ return "foo";
+}
+
+/*
+ * XXX: This routine is called XXX
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static apr_port_t x_default_port(const request_rec *r)
+{
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call and exit.
+ */
+ trace_add(r->server, NULL, cfg, "x_default_port()");
+ return 80;
+}
+#endif /*0*/
+
+/*
+ * XXX: This routine is called XXX
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static void x_insert_filter(request_rec *r)
+{
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call and exit.
+ */
+ trace_add(r->server, NULL, cfg, "x_insert_filter()");
+}
+
+/*
+ * XXX: This routine is called XXX
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static int x_quick_handler(request_rec *r, int lookup_uri)
+{
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call and exit.
+ */
+ trace_add(r->server, NULL, cfg, "x_post_config()");
+ return DECLINED;
+}
+
+/*
+ * This routine is called just after the server accepts the connection,
+ * but before it is handed off to a protocol module to be served. The point
+ * of this hook is to allow modules an opportunity to modify the connection
+ * as soon as possible. The core server uses this phase to setup the
+ * connection record based on the type of connection that is being used.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static int x_pre_connection(conn_rec *c, void *csd)
+{
+ x_cfg *cfg;
+
+ cfg = our_cconfig(c);
+#if 0
+ /*
+ * Log the call and exit.
+ */
+ trace_add(r->server, NULL, cfg, "x_post_config()");
+#endif
+ return OK;
+}
+
+/* This routine is used to actually process the connection that was received.
+ * Only protocol modules should implement this hook, as it gives them an
+ * opportunity to replace the standard HTTP processing with processing for
+ * some other protocol. Both echo and POP3 modules are available as
+ * examples.
+ *
+ * The return VALUE is OK, DECLINED, or HTTP_mumble. If we return OK, no
+ * further modules are called for this phase.
+ */
+static int x_process_connection(conn_rec *c)
+{
+ return DECLINED;
+}
+
+/*
+ * This routine is called after the request has been read but before any other
+ * phases have been processed. This allows us to make decisions based upon
+ * the input header fields.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, no
+ * further modules are called for this phase.
+ */
+static int x_post_read_request(request_rec *r)
+{
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * We don't actually *do* anything here, except note the fact that we were
+ * called.
+ */
+ trace_add(r->server, r, cfg, "x_post_read_request()");
+ return DECLINED;
+}
+
+/*
+ * This routine gives our module an opportunity to translate the URI into an
+ * actual filename. If we don't do anything special, the server's default
+ * rules (Alias directives and the like) will continue to be followed.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, no
+ * further modules are called for this phase.
+ */
+static int x_translate_handler(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * We don't actually *do* anything here, except note the fact that we were
+ * called.
+ */
+ trace_add(r->server, r, cfg, "x_translate_handler()");
+ return DECLINED;
+}
+
+/*
+ * this routine gives our module another chance to examine the request
+ * headers and to take special action. This is the first phase whose
+ * hooks' configuration directives can appear inside the <Directory>
+ * and similar sections, because at this stage the URI has been mapped
+ * to the filename. For example this phase can be used to block evil
+ * clients, while little resources were wasted on these.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK,
+ * the server will still call any remaining modules with an handler
+ * for this phase.
+ */
+static int x_header_parser_handler(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * We don't actually *do* anything here, except note the fact that we were
+ * called.
+ */
+ trace_add(r->server, r, cfg, "header_parser_handler()");
+ return DECLINED;
+}
+
+
+/*
+ * This routine is called to check the authentication information sent with
+ * the request (such as looking up the user in a database and verifying that
+ * the [encrypted] password sent matches the one in the database).
+ *
+ * The return value is OK, DECLINED, or some HTTP_mumble error (typically
+ * HTTP_UNAUTHORIZED). If we return OK, no other modules are given a chance
+ * at the request during this phase.
+ */
+static int x_check_user_id(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Don't do anything except log the call.
+ */
+ trace_add(r->server, r, cfg, "x_check_user_id()");
+ return DECLINED;
+}
+
+/*
+ * This routine is called to check to see if the resource being requested
+ * requires authorisation.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, no
+ * other modules are called during this phase.
+ *
+ * If *all* modules return DECLINED, the request is aborted with a server
+ * error.
+ */
+static int x_auth_checker(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call and return OK, or access will be denied (even though we
+ * didn't actually do anything).
+ */
+ trace_add(r->server, r, cfg, "x_auth_checker()");
+ return DECLINED;
+}
+
+/*
+ * This routine is called to check for any module-specific restrictions placed
+ * upon the requested resource. (See the mod_access module for an example.)
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. All modules with an
+ * handler for this phase are called regardless of whether their predecessors
+ * return OK or DECLINED. The first one to return any other status, however,
+ * will abort the sequence (and the request) as usual.
+ */
+static int x_access_checker(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ trace_add(r->server, r, cfg, "x_access_checker()");
+ return DECLINED;
+}
+
+/*
+ * This routine is called to determine and/or set the various document type
+ * information bits, like Content-type (via r->content_type), language, et
+ * cetera.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, no
+ * further modules are given a chance at the request for this phase.
+ */
+static int x_type_checker(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call, but don't do anything else - and report truthfully that
+ * we didn't do anything.
+ */
+ trace_add(r->server, r, cfg, "x_type_checker()");
+ return DECLINED;
+}
+
+/*
+ * This routine is called to perform any module-specific fixing of header
+ * fields, et cetera. It is invoked just before any content-handler.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, the
+ * server will still call any remaining modules with an handler for this
+ * phase.
+ */
+static int x_fixer_upper(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ /*
+ * Log the call and exit.
+ */
+ trace_add(r->server, r, cfg, "x_fixer_upper()");
+ return OK;
+}
+
+/*
+ * This routine is called to perform any module-specific logging activities
+ * over and above the normal server things.
+ *
+ * The return value is OK, DECLINED, or HTTP_mumble. If we return OK, any
+ * remaining modules with an handler for this phase will still be called.
+ */
+static int x_logger(request_rec *r)
+{
+
+ x_cfg *cfg;
+
+ cfg = our_dconfig(r);
+ trace_add(r->server, r, cfg, "x_logger()");
+ return DECLINED;
+}
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* Which functions are responsible for which hooks in the server. */
+/* */
+/*--------------------------------------------------------------------------*/
+/*
+ * Each function our module provides to handle a particular hook is
+ * specified here. The functions are registered using
+ * ap_hook_foo(name, predecessors, successors, position)
+ * where foo is the name of the hook.
+ *
+ * The args are as follows:
+ * name -> the name of the function to call.
+ * predecessors -> a list of modules whose calls to this hook must be
+ * invoked before this module.
+ * successors -> a list of modules whose calls to this hook must be
+ * invoked after this module.
+ * position -> The relative position of this module. One of
+ * APR_HOOK_FIRST, APR_HOOK_MIDDLE, or APR_HOOK_LAST.
+ * Most modules will use APR_HOOK_MIDDLE. If multiple
+ * modules use the same relative position, Apache will
+ * determine which to call first.
+ * If your module relies on another module to run first,
+ * or another module running after yours, use the
+ * predecessors and/or successors.
+ *
+ * The number in brackets indicates the order in which the routine is called
+ * during request processing. Note that not all routines are necessarily
+ * called (such as if a resource doesn't have access restrictions).
+ * The actual delivery of content to the browser [9] is not handled by
+ * a hook; see the handler declarations below.
+ */
+static void x_register_hooks(apr_pool_t *p)
+{
+ ap_hook_pre_config(x_pre_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_config(x_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_open_logs(x_open_logs, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_child_init(x_child_init, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_handler(x_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_quick_handler(x_quick_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_pre_connection(x_pre_connection, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_process_connection(x_process_connection, NULL, NULL, APR_HOOK_MIDDLE);
+ /* [1] post read_request handling */
+ ap_hook_post_read_request(x_post_read_request, NULL, NULL,
+ APR_HOOK_MIDDLE);
+ ap_hook_log_transaction(x_logger, NULL, NULL, APR_HOOK_MIDDLE);
+#if 0
+ ap_hook_http_method(x_http_method, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_default_port(x_default_port, NULL, NULL, APR_HOOK_MIDDLE);
+#endif
+ ap_hook_translate_name(x_translate_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_header_parser(x_header_parser_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_check_user_id(x_check_user_id, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_fixups(x_fixer_upper, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_type_checker(x_type_checker, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_access_checker(x_access_checker, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_auth_checker(x_auth_checker, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_insert_filter(x_insert_filter, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* All of the routines have been declared now. Here's the list of */
+/* directives specific to our module, and information about where they */
+/* may appear and how the command parser should pass them to us for */
+/* processing. Note that care must be taken to ensure that there are NO */
+/* collisions of directive names between modules. */
+/* */
+/*--------------------------------------------------------------------------*/
+/*
+ * List of directives specific to our module.
+ */
+static const command_rec x_cmds[] =
+{
+ AP_INIT_NO_ARGS(
+ "Example", /* directive name */
+ cmd_example, /* config action routine */
+ NULL, /* argument to include in call */
+ OR_OPTIONS, /* where available */
+ "Example directive - no arguments" /* directive description */
+ ),
+ {NULL}
+};
+/*--------------------------------------------------------------------------*/
+/* */
+/* Finally, the list of callback routines and data structures that provide */
+/* the static hooks into our module from the other parts of the server. */
+/* */
+/*--------------------------------------------------------------------------*/
+/*
+ * Module definition for configuration. If a particular callback is not
+ * needed, replace its routine name below with the word NULL.
+ */
+module AP_MODULE_DECLARE_DATA example_module =
+{
+ STANDARD20_MODULE_STUFF,
+ x_create_dir_config, /* per-directory config creator */
+ x_merge_dir_config, /* dir config merger */
+ x_create_server_config, /* server config creator */
+ x_merge_server_config, /* server config merger */
+ x_cmds, /* command table */
+ x_register_hooks, /* set up other request processing hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.c b/rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.c
new file mode 100644
index 00000000..0812b976
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.c
@@ -0,0 +1,1198 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Rules for managing obj->refcount:
+ * refcount should be incremented when an object is placed in the cache. Insertion
+ * of an object into the cache and the refcount increment should happen under
+ * protection of the sconf->lock.
+ *
+ * refcount should be decremented when the object is removed from the cache.
+ * Object should be removed from the cache and the refcount decremented while
+ * under protection of the sconf->lock.
+ *
+ * refcount should be incremented when an object is retrieved from the cache
+ * by a worker thread. The retrieval/find operation and refcount increment
+ * should occur under protection of the sconf->lock
+ *
+ * refcount can be atomically decremented w/o protection of the sconf->lock
+ * by worker threads.
+ *
+ * Any object whose refcount drops to 0 should be freed/cleaned up. A refcount
+ * of 0 means the object is not in the cache and no worker threads are accessing
+ * it.
+ */
+#define CORE_PRIVATE
+#include "mod_cache.h"
+#include "cache_pqueue.h"
+#include "cache_cache.h"
+#include "ap_provider.h"
+#include "ap_mpm.h"
+#include "apr_thread_mutex.h"
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#if !APR_HAS_THREADS
+#error This module does not currently compile unless you have a thread-capable APR. Sorry!
+#endif
+
+module AP_MODULE_DECLARE_DATA mem_cache_module;
+
+typedef enum {
+ CACHE_TYPE_FILE = 1,
+ CACHE_TYPE_HEAP,
+ CACHE_TYPE_MMAP
+} cache_type_e;
+
+typedef struct {
+ char* hdr;
+ char* val;
+} cache_header_tbl_t;
+
+typedef struct mem_cache_object {
+ cache_type_e type;
+ apr_ssize_t num_header_out;
+ apr_ssize_t num_err_header_out;
+ apr_ssize_t num_subprocess_env;
+ apr_ssize_t num_notes;
+ apr_ssize_t num_req_hdrs;
+ cache_header_tbl_t *header_out;
+ cache_header_tbl_t *err_header_out;
+ cache_header_tbl_t *subprocess_env;
+ cache_header_tbl_t *notes;
+ cache_header_tbl_t *req_hdrs; /* for Vary negotiation */
+ apr_size_t m_len;
+ void *m;
+ apr_os_file_t fd;
+ apr_int32_t flags; /* File open flags */
+ long priority; /**< the priority of this entry */
+ long total_refs; /**< total number of references this entry has had */
+
+ apr_uint32_t pos; /**< the position of this entry in the cache */
+
+} mem_cache_object_t;
+
+typedef struct {
+ apr_thread_mutex_t *lock;
+ cache_cache_t *cache_cache;
+
+ /* Fields set by config directives */
+ apr_size_t min_cache_object_size; /* in bytes */
+ apr_size_t max_cache_object_size; /* in bytes */
+ apr_size_t max_cache_size; /* in bytes */
+ apr_size_t max_object_cnt;
+ cache_pqueue_set_priority cache_remove_algorithm;
+
+ /* maximum amount of data to buffer on a streamed response where
+ * we haven't yet seen EOS */
+ apr_off_t max_streaming_buffer_size;
+} mem_cache_conf;
+static mem_cache_conf *sconf;
+
+#define DEFAULT_MAX_CACHE_SIZE 100*1024
+#define DEFAULT_MIN_CACHE_OBJECT_SIZE 0
+#define DEFAULT_MAX_CACHE_OBJECT_SIZE 10000
+#define DEFAULT_MAX_OBJECT_CNT 1009
+#define DEFAULT_MAX_STREAMING_BUFFER_SIZE 100000
+#define CACHEFILE_LEN 20
+
+/* Forward declarations */
+static int remove_entity(cache_handle_t *h);
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *i);
+static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r);
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+
+static void cleanup_cache_object(cache_object_t *obj);
+
+static long memcache_get_priority(void*a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+
+ return mobj->priority;
+}
+
+static void memcache_inc_frequency(void*a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+
+ mobj->total_refs++;
+ mobj->priority = 0;
+}
+
+static void memcache_set_pos(void *a, apr_ssize_t pos)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+
+ apr_atomic_set(&mobj->pos, pos);
+}
+static apr_ssize_t memcache_get_pos(void *a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+
+ return apr_atomic_read(&mobj->pos);
+}
+
+static apr_size_t memcache_cache_get_size(void*a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+ return mobj->m_len;
+}
+/** callback to get the key of a item */
+static const char* memcache_cache_get_key(void*a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ return obj->key;
+}
+/**
+ * memcache_cache_free()
+ * memcache_cache_free is a callback that is only invoked by a thread
+ * running in cache_insert(). cache_insert() runs under protection
+ * of sconf->lock. By the time this function has been entered, the cache_object
+ * has been ejected from the cache. decrement the refcount and if the refcount drops
+ * to 0, cleanup the cache object.
+ */
+static void memcache_cache_free(void*a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+
+ /* Decrement the refcount to account for the object being ejected
+ * from the cache. If the refcount is 0, free the object.
+ */
+ if (!apr_atomic_dec(&obj->refcount)) {
+ cleanup_cache_object(obj);
+ }
+}
+/*
+ * functions return a 'negative' score since priority queues
+ * dequeue the object with the highest value first
+ */
+static long memcache_lru_algorithm(long queue_clock, void *a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+ if (mobj->priority == 0)
+ mobj->priority = queue_clock - mobj->total_refs;
+
+ /*
+ * a 'proper' LRU function would just be
+ * mobj->priority = mobj->total_refs;
+ */
+ return mobj->priority;
+}
+
+static long memcache_gdsf_algorithm(long queue_clock, void *a)
+{
+ cache_object_t *obj = (cache_object_t *)a;
+ mem_cache_object_t *mobj = obj->vobj;
+
+ if (mobj->priority == 0)
+ mobj->priority = queue_clock -
+ (long)(mobj->total_refs*1000 / mobj->m_len);
+
+ return mobj->priority;
+}
+
+static void cleanup_cache_object(cache_object_t *obj)
+{
+ mem_cache_object_t *mobj = obj->vobj;
+
+ /* TODO:
+ * We desperately need a more efficient way of allocating objects. We're
+ * making way too many malloc calls to create a fully populated
+ * cache object...
+ */
+
+ /* Cleanup the cache_object_t */
+ if (obj->key) {
+ free(obj->key);
+ }
+ if (obj->info.content_type) {
+ free(obj->info.content_type);
+ }
+ if (obj->info.etag) {
+ free(obj->info.etag);
+ }
+ if (obj->info.lastmods) {
+ free(obj->info.lastmods);
+ }
+ if (obj->info.filename) {
+ free(obj->info.filename);
+ }
+
+ free(obj);
+
+ /* Cleanup the mem_cache_object_t */
+ if (mobj) {
+ if (mobj->type == CACHE_TYPE_HEAP && mobj->m) {
+ free(mobj->m);
+ }
+ if (mobj->type == CACHE_TYPE_FILE && mobj->fd) {
+#ifdef WIN32
+ CloseHandle(mobj->fd);
+#else
+ close(mobj->fd);
+#endif
+ }
+ if (mobj->header_out) {
+ if (mobj->header_out[0].hdr)
+ free(mobj->header_out[0].hdr);
+ free(mobj->header_out);
+ }
+ if (mobj->err_header_out) {
+ if (mobj->err_header_out[0].hdr)
+ free(mobj->err_header_out[0].hdr);
+ free(mobj->err_header_out);
+ }
+ if (mobj->subprocess_env) {
+ if (mobj->subprocess_env[0].hdr)
+ free(mobj->subprocess_env[0].hdr);
+ free(mobj->subprocess_env);
+ }
+ if (mobj->notes) {
+ if (mobj->notes[0].hdr)
+ free(mobj->notes[0].hdr);
+ free(mobj->notes);
+ }
+ if (mobj->req_hdrs) {
+ if (mobj->req_hdrs[0].hdr)
+ free(mobj->req_hdrs[0].hdr);
+ free(mobj->req_hdrs);
+ }
+ free(mobj);
+ }
+}
+static apr_status_t decrement_refcount(void *arg)
+{
+ cache_object_t *obj = (cache_object_t *) arg;
+
+ /* If obj->complete is not set, the cache update failed and the
+ * object needs to be removed from the cache then cleaned up.
+ * The garbage collector may have ejected the object from the
+ * cache already, so make sure it is really still in the cache
+ * before attempting to remove it.
+ */
+ if (!obj->complete) {
+ cache_object_t *tobj = NULL;
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+ tobj = cache_find(sconf->cache_cache, obj->key);
+ if (tobj == obj) {
+ cache_remove(sconf->cache_cache, obj);
+ apr_atomic_dec(&obj->refcount);
+ }
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+ }
+
+ /* If the refcount drops to 0, cleanup the cache object */
+ if (!apr_atomic_dec(&obj->refcount)) {
+ cleanup_cache_object(obj);
+ }
+ return APR_SUCCESS;
+}
+static apr_status_t cleanup_cache_mem(void *sconfv)
+{
+ cache_object_t *obj;
+ mem_cache_conf *co = (mem_cache_conf*) sconfv;
+
+ if (!co) {
+ return APR_SUCCESS;
+ }
+ if (!co->cache_cache) {
+ return APR_SUCCESS;
+ }
+
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+ obj = cache_pop(co->cache_cache);
+ while (obj) {
+ /* Iterate over the cache and clean up each unreferenced entry */
+ if (!apr_atomic_dec(&obj->refcount)) {
+ cleanup_cache_object(obj);
+ }
+ obj = cache_pop(co->cache_cache);
+ }
+
+ /* Cache is empty, free the cache table */
+ cache_free(co->cache_cache);
+
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+ return APR_SUCCESS;
+}
+/*
+ * TODO: enable directives to be overridden in various containers
+ */
+static void *create_cache_config(apr_pool_t *p, server_rec *s)
+{
+ sconf = apr_pcalloc(p, sizeof(mem_cache_conf));
+
+ sconf->min_cache_object_size = DEFAULT_MIN_CACHE_OBJECT_SIZE;
+ sconf->max_cache_object_size = DEFAULT_MAX_CACHE_OBJECT_SIZE;
+ /* Number of objects in the cache */
+ sconf->max_object_cnt = DEFAULT_MAX_OBJECT_CNT;
+ /* Size of the cache in bytes */
+ sconf->max_cache_size = DEFAULT_MAX_CACHE_SIZE;
+ sconf->cache_cache = NULL;
+ sconf->cache_remove_algorithm = memcache_gdsf_algorithm;
+ sconf->max_streaming_buffer_size = DEFAULT_MAX_STREAMING_BUFFER_SIZE;
+
+ return sconf;
+}
+
+static int create_entity(cache_handle_t *h, cache_type_e type_e,
+ request_rec *r, const char *key, apr_off_t len)
+{
+ cache_object_t *obj, *tmp_obj;
+ mem_cache_object_t *mobj;
+ apr_size_t key_len;
+
+ if (len == -1) {
+ /* Caching a streaming response. Assume the response is
+ * less than or equal to max_streaming_buffer_size. We will
+ * correct all the cache size counters in store_body once
+ * we know exactly know how much we are caching.
+ */
+ len = sconf->max_streaming_buffer_size;
+ }
+
+ /* Note: cache_insert() will automatically garbage collect
+ * objects from the cache if the max_cache_size threshold is
+ * exceeded. This means mod_mem_cache does not need to implement
+ * max_cache_size checks.
+ */
+ if (len < sconf->min_cache_object_size ||
+ len > sconf->max_cache_object_size) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "mem_cache: URL %s failed the size check and will not be cached.",
+ key);
+ return DECLINED;
+ }
+
+ if (type_e == CACHE_TYPE_FILE) {
+ /* CACHE_TYPE_FILE is only valid for local content handled by the
+ * default handler. Need a better way to check if the file is
+ * local or not.
+ */
+ if (!r->filename) {
+ return DECLINED;
+ }
+ }
+
+ /* Allocate and initialize cache_object_t */
+ obj = calloc(1, sizeof(*obj));
+ if (!obj) {
+ return DECLINED;
+ }
+ key_len = strlen(key) + 1;
+ obj->key = malloc(key_len);
+ if (!obj->key) {
+ cleanup_cache_object(obj);
+ return DECLINED;
+ }
+ memcpy(obj->key, key, key_len);
+ /* Safe cast: We tested < sconf->max_cache_object_size above */
+ obj->info.len = (apr_size_t)len;
+
+ /* Allocate and init mem_cache_object_t */
+ mobj = calloc(1, sizeof(*mobj));
+ if (!mobj) {
+ cleanup_cache_object(obj);
+ return DECLINED;
+ }
+
+ /* Finish initing the cache object */
+ apr_atomic_set(&obj->refcount, 1);
+ mobj->total_refs = 1;
+ obj->complete = 0;
+ obj->vobj = mobj;
+ /* Safe cast: We tested < sconf->max_cache_object_size above */
+ mobj->m_len = (apr_size_t)len;
+ mobj->type = type_e;
+
+ /* Place the cache_object_t into the hash table.
+ * Note: Perhaps we should wait to put the object in the
+ * hash table when the object is complete? I add the object here to
+ * avoid multiple threads attempting to cache the same content only
+ * to discover at the very end that only one of them will succeed.
+ * Furthermore, adding the cache object to the table at the end could
+ * open up a subtle but easy to exploit DoS hole: someone could request
+ * a very large file with multiple requests. Better to detect this here
+ * rather than after the cache object has been completely built and
+ * initialized...
+ * XXX Need a way to insert into the cache w/o such coarse grained locking
+ */
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+ tmp_obj = (cache_object_t *) cache_find(sconf->cache_cache, key);
+
+ if (!tmp_obj) {
+ cache_insert(sconf->cache_cache, obj);
+ /* Add a refcount to account for the reference by the
+ * hashtable in the cache. Refcount should be 2 now, one
+ * for this thread, and one for the cache.
+ */
+ apr_atomic_inc(&obj->refcount);
+ }
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+
+ if (tmp_obj) {
+ /* This thread collided with another thread loading the same object
+ * into the cache at the same time. Defer to the other thread which
+ * is further along.
+ */
+ cleanup_cache_object(obj);
+ return DECLINED;
+ }
+
+ apr_pool_cleanup_register(r->pool, obj, decrement_refcount,
+ apr_pool_cleanup_null);
+
+ /* Populate the cache handle */
+ h->cache_obj = obj;
+
+ return OK;
+}
+
+static int create_mem_entity(cache_handle_t *h, request_rec *r,
+ const char *key, apr_off_t len)
+{
+ return create_entity(h, CACHE_TYPE_HEAP, r, key, len);
+}
+
+static int create_fd_entity(cache_handle_t *h, request_rec *r,
+ const char *key, apr_off_t len)
+{
+ return create_entity(h, CACHE_TYPE_FILE, r, key, len);
+}
+
+static int open_entity(cache_handle_t *h, request_rec *r, const char *key)
+{
+ cache_object_t *obj;
+
+ /* Look up entity keyed to 'url' */
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+ obj = (cache_object_t *) cache_find(sconf->cache_cache, key);
+ if (obj) {
+ if (obj->complete) {
+ request_rec *rmain=r, *rtmp;
+ apr_atomic_inc(&obj->refcount);
+ /* cache is worried about overall counts, not 'open' ones */
+ cache_update(sconf->cache_cache, obj);
+
+ /* If this is a subrequest, register the cleanup against
+ * the main request. This will prevent the cache object
+ * from being cleaned up from under the request after the
+ * subrequest is destroyed.
+ */
+ rtmp = r;
+ while (rtmp) {
+ rmain = rtmp;
+ rtmp = rmain->main;
+ }
+ apr_pool_cleanup_register(rmain->pool, obj, decrement_refcount,
+ apr_pool_cleanup_null);
+ }
+ else {
+ obj = NULL;
+ }
+ }
+
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+
+ if (!obj) {
+ return DECLINED;
+ }
+
+ /* Initialize the cache_handle */
+ h->cache_obj = obj;
+ h->req_hdrs = NULL; /* Pick these up in recall_headers() */
+ return OK;
+}
+
+/* remove_entity()
+ * Notes:
+ * refcount should be at least 1 upon entry to this function to account
+ * for this thread's reference to the object. If the refcount is 1, then
+ * object has been removed from the cache by another thread and this thread
+ * is the last thread accessing the object.
+ */
+static int remove_entity(cache_handle_t *h)
+{
+ cache_object_t *obj = h->cache_obj;
+ cache_object_t *tobj = NULL;
+
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+
+ /* If the entity is still in the cache, remove it and decrement the
+ * refcount. If the entity is not in the cache, do nothing. In both cases
+ * decrement_refcount called by the last thread referencing the object will
+ * trigger the cleanup.
+ */
+ tobj = cache_find(sconf->cache_cache, obj->key);
+ if (tobj == obj) {
+ cache_remove(sconf->cache_cache, obj);
+ apr_atomic_dec(&obj->refcount);
+ }
+
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+
+ return OK;
+}
+static apr_status_t serialize_table(cache_header_tbl_t **obj,
+ apr_ssize_t *nelts,
+ apr_table_t *table)
+{
+ const apr_array_header_t *elts_arr = apr_table_elts(table);
+ apr_table_entry_t *elts = (apr_table_entry_t *) elts_arr->elts;
+ apr_ssize_t i;
+ apr_size_t len = 0;
+ apr_size_t idx = 0;
+ char *buf;
+
+ *nelts = elts_arr->nelts;
+ if (*nelts == 0 ) {
+ *obj=NULL;
+ return APR_SUCCESS;
+ }
+ *obj = malloc(sizeof(cache_header_tbl_t) * elts_arr->nelts);
+ if (NULL == *obj) {
+ return APR_ENOMEM;
+ }
+ for (i = 0; i < elts_arr->nelts; ++i) {
+ len += strlen(elts[i].key);
+ len += strlen(elts[i].val);
+ len += 2; /* Extra space for NULL string terminator for key and val */
+ }
+
+ /* Transfer the headers into a contiguous memory block */
+ buf = malloc(len);
+ if (!buf) {
+ *obj = NULL;
+ return APR_ENOMEM;
+ }
+
+ for (i = 0; i < *nelts; ++i) {
+ (*obj)[i].hdr = &buf[idx];
+ len = strlen(elts[i].key) + 1; /* Include NULL terminator */
+ memcpy(&buf[idx], elts[i].key, len);
+ idx+=len;
+
+ (*obj)[i].val = &buf[idx];
+ len = strlen(elts[i].val) + 1;
+ memcpy(&buf[idx], elts[i].val, len);
+ idx+=len;
+ }
+ return APR_SUCCESS;
+}
+static int unserialize_table( cache_header_tbl_t *ctbl,
+ int num_headers,
+ apr_table_t *t )
+{
+ int i;
+
+ for (i = 0; i < num_headers; ++i) {
+ apr_table_addn(t, ctbl[i].hdr, ctbl[i].val);
+ }
+
+ return APR_SUCCESS;
+}
+/* Define request processing hook handlers */
+/* remove_url()
+ * Notes:
+ */
+static int remove_url(const char *key)
+{
+ cache_object_t *obj;
+ int cleanup = 0;
+
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+
+ obj = cache_find(sconf->cache_cache, key);
+ if (obj) {
+ cache_remove(sconf->cache_cache, obj);
+ /* For performance, cleanup cache object after releasing the lock */
+ cleanup = !apr_atomic_dec(&obj->refcount);
+ }
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+
+ if (cleanup) {
+ cleanup_cache_object(obj);
+ }
+
+ return OK;
+}
+
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r)
+{
+ int rc;
+ mem_cache_object_t *mobj = (mem_cache_object_t*) h->cache_obj->vobj;
+
+ h->req_hdrs = apr_table_make(r->pool, mobj->num_req_hdrs);
+ h->resp_hdrs = apr_table_make(r->pool, mobj->num_header_out);
+ h->resp_err_hdrs = apr_table_make(r->pool, mobj->num_err_header_out);
+ /* ### FIXME: These two items should not be saved. */
+ r->subprocess_env = apr_table_make(r->pool, mobj->num_subprocess_env);
+ r->notes = apr_table_make(r->pool, mobj->num_notes);
+
+ rc = unserialize_table(mobj->req_hdrs,
+ mobj->num_req_hdrs,
+ h->req_hdrs);
+ rc = unserialize_table( mobj->header_out,
+ mobj->num_header_out,
+ h->resp_hdrs);
+ rc = unserialize_table( mobj->err_header_out,
+ mobj->num_err_header_out,
+ h->resp_err_hdrs);
+ rc = unserialize_table( mobj->subprocess_env,
+ mobj->num_subprocess_env,
+ r->subprocess_env);
+ rc = unserialize_table( mobj->notes,
+ mobj->num_notes,
+ r->notes);
+
+ /* Content-Type: header may not be set if content is local since
+ * CACHE_IN runs before header filters....
+ */
+ h->content_type = h->cache_obj->info.content_type;
+ h->status = h->cache_obj->info.status;
+
+ return rc;
+}
+
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+ mem_cache_object_t *mobj = (mem_cache_object_t*) h->cache_obj->vobj;
+
+ if (mobj->type == CACHE_TYPE_FILE) {
+ /* CACHE_TYPE_FILE */
+ apr_file_t *file;
+ apr_os_file_put(&file, &mobj->fd, mobj->flags, p);
+ b = apr_bucket_file_create(file, 0, mobj->m_len, p, bb->bucket_alloc);
+ }
+ else {
+ /* CACHE_TYPE_HEAP */
+ b = apr_bucket_immortal_create(mobj->m, mobj->m_len, bb->bucket_alloc);
+ }
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_eos_create(bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ return APR_SUCCESS;
+}
+
+
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *info)
+{
+ cache_object_t *obj = h->cache_obj;
+ mem_cache_object_t *mobj = (mem_cache_object_t*) obj->vobj;
+ int rc;
+
+ /*
+ * The cache needs to keep track of the following information:
+ * - Date, LastMod, Version, ReqTime, RespTime, ContentLength
+ * - The original request headers (for Vary)
+ * - The original response headers (for returning with a cached response)
+ * - The body of the message
+ */
+ rc = serialize_table(&mobj->req_hdrs,
+ &mobj->num_req_hdrs,
+ r->headers_in);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+
+ /* Precompute how much storage we need to hold the headers */
+ rc = serialize_table(&mobj->header_out,
+ &mobj->num_header_out,
+ ap_cache_cacheable_hdrs_out(r->pool, r->headers_out,
+ r->server));
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+ rc = serialize_table(&mobj->err_header_out,
+ &mobj->num_err_header_out,
+ ap_cache_cacheable_hdrs_out(r->pool,
+ r->err_headers_out,
+ r->server));
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+ rc = serialize_table(&mobj->subprocess_env,
+ &mobj->num_subprocess_env,
+ r->subprocess_env );
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+
+ rc = serialize_table(&mobj->notes, &mobj->num_notes, r->notes);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+
+ /* Init the info struct */
+ obj->info.status = info->status;
+ if (info->date) {
+ obj->info.date = info->date;
+ }
+ if (info->lastmod) {
+ obj->info.lastmod = info->lastmod;
+ }
+ if (info->response_time) {
+ obj->info.response_time = info->response_time;
+ }
+ if (info->request_time) {
+ obj->info.request_time = info->request_time;
+ }
+ if (info->expire) {
+ obj->info.expire = info->expire;
+ }
+ if (info->content_type) {
+ apr_size_t len = strlen(info->content_type) + 1;
+ obj->info.content_type = (char*) malloc(len);
+ if (!obj->info.content_type) {
+ return APR_ENOMEM;
+ }
+ memcpy(obj->info.content_type, info->content_type, len);
+ }
+ if (info->etag) {
+ apr_size_t len = strlen(info->etag) + 1;
+ obj->info.etag = (char*) malloc(len);
+ if (!obj->info.etag) {
+ return APR_ENOMEM;
+ }
+ memcpy(obj->info.etag, info->etag, len);
+ }
+ if (info->lastmods) {
+ apr_size_t len = strlen(info->lastmods) + 1;
+ obj->info.lastmods = (char*) malloc(len);
+ if (!obj->info.lastmods) {
+ return APR_ENOMEM;
+ }
+ memcpy(obj->info.lastmods, info->lastmods, len);
+ }
+ if ( info->filename) {
+ apr_size_t len = strlen(info->filename) + 1;
+ obj->info.filename = (char*) malloc(len);
+ if (!obj->info.filename ) {
+ return APR_ENOMEM;
+ }
+ memcpy(obj->info.filename, info->filename, len);
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b)
+{
+ apr_status_t rv;
+ cache_object_t *obj = h->cache_obj;
+ cache_object_t *tobj = NULL;
+ mem_cache_object_t *mobj = (mem_cache_object_t*) obj->vobj;
+ apr_read_type_e eblock = APR_BLOCK_READ;
+ apr_bucket *e;
+ char *cur;
+ int eos = 0;
+
+ if (mobj->type == CACHE_TYPE_FILE) {
+ apr_file_t *file = NULL;
+ int fd = 0;
+ int other = 0;
+
+ /* We can cache an open file descriptor if:
+ * - the brigade contains one and only one file_bucket &&
+ * - the brigade is complete &&
+ * - the file_bucket is the last data bucket in the brigade
+ */
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (APR_BUCKET_IS_EOS(e)) {
+ eos = 1;
+ }
+ else if (APR_BUCKET_IS_FILE(e)) {
+ apr_bucket_file *a = e->data;
+ fd++;
+ file = a->fd;
+ }
+ else {
+ other++;
+ }
+ }
+ if (fd == 1 && !other && eos) {
+ apr_file_t *tmpfile;
+ const char *name;
+ /* Open a new XTHREAD handle to the file */
+ apr_file_name_get(&name, file);
+ mobj->flags = ((APR_SENDFILE_ENABLED & apr_file_flags_get(file))
+ | APR_READ | APR_BINARY | APR_XTHREAD | APR_FILE_NOCLEANUP);
+ rv = apr_file_open(&tmpfile, name, mobj->flags,
+ APR_OS_DEFAULT, r->pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ apr_file_inherit_unset(tmpfile);
+ apr_os_file_get(&(mobj->fd), tmpfile);
+
+ /* Open for business */
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "mem_cache: Cached file: %s with key: %s", name, obj->key);
+ obj->complete = 1;
+ return APR_SUCCESS;
+ }
+
+ /* Content not suitable for fd caching. Cache in-memory instead. */
+ mobj->type = CACHE_TYPE_HEAP;
+ }
+
+ /*
+ * FD cacheing is not enabled or the content was not
+ * suitable for fd caching.
+ */
+ if (mobj->m == NULL) {
+ mobj->m = malloc(mobj->m_len);
+ if (mobj->m == NULL) {
+ return APR_ENOMEM;
+ }
+ obj->count = 0;
+ }
+ cur = (char*) mobj->m + obj->count;
+
+ /* Iterate accross the brigade and populate the cache storage */
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ const char *s;
+ apr_size_t len;
+
+ if (APR_BUCKET_IS_EOS(e)) {
+ if (mobj->m_len > obj->count) {
+ /* Caching a streamed response. Reallocate a buffer of the
+ * correct size and copy the streamed response into that
+ * buffer */
+ char *buf = malloc(obj->count);
+ if (!buf) {
+ return APR_ENOMEM;
+ }
+ memcpy(buf, mobj->m, obj->count);
+ free(mobj->m);
+ mobj->m = buf;
+
+ /* Now comes the crufty part... there is no way to tell the
+ * cache that the size of the object has changed. We need
+ * to remove the object, update the size and re-add the
+ * object, all under protection of the lock.
+ */
+ if (sconf->lock) {
+ apr_thread_mutex_lock(sconf->lock);
+ }
+ /* Has the object been ejected from the cache?
+ */
+ tobj = (cache_object_t *) cache_find(sconf->cache_cache, obj->key);
+ if (tobj == obj) {
+ /* Object is still in the cache, remove it, update the len field then
+ * replace it under protection of sconf->lock.
+ */
+ cache_remove(sconf->cache_cache, obj);
+ /* For illustration, cache no longer has reference to the object
+ * so decrement the refcount
+ * apr_atomic_dec(&obj->refcount);
+ */
+ mobj->m_len = obj->count;
+
+ cache_insert(sconf->cache_cache, obj);
+ /* For illustration, cache now has reference to the object, so
+ * increment the refcount
+ * apr_atomic_inc(&obj->refcount);
+ */
+ }
+ else if (tobj) {
+ /* Different object with the same key found in the cache. Doing nothing
+ * here will cause the object refcount to drop to 0 in decrement_refcount
+ * and the object will be cleaned up.
+ */
+
+ } else {
+ /* Object has been ejected from the cache, add it back to the cache */
+ mobj->m_len = obj->count;
+ cache_insert(sconf->cache_cache, obj);
+ apr_atomic_inc(&obj->refcount);
+ }
+
+ if (sconf->lock) {
+ apr_thread_mutex_unlock(sconf->lock);
+ }
+ }
+ /* Open for business */
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "mem_cache: Cached url: %s", obj->key);
+ obj->complete = 1;
+ break;
+ }
+ rv = apr_bucket_read(e, &s, &len, eblock);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ if (len) {
+ /* Check for buffer overflow */
+ if ((obj->count + len) > mobj->m_len) {
+ return APR_ENOMEM;
+ }
+ else {
+ memcpy(cur, s, len);
+ cur+=len;
+ obj->count+=len;
+ }
+ }
+ /* This should not fail, but if it does, we are in BIG trouble
+ * cause we just stomped all over the heap.
+ */
+ AP_DEBUG_ASSERT(obj->count <= mobj->m_len);
+ }
+ return APR_SUCCESS;
+}
+/**
+ * Configuration and start-up
+ */
+static int mem_cache_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ int threaded_mpm;
+
+ /* Sanity check the cache configuration */
+ if (sconf->min_cache_object_size >= sconf->max_cache_object_size) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "MCacheMaxObjectSize must be greater than MCacheMinObjectSize");
+ return DONE;
+ }
+ if (sconf->max_cache_object_size >= sconf->max_cache_size) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "MCacheSize must be greater than MCacheMaxObjectSize");
+ return DONE;
+ }
+ if (sconf->max_streaming_buffer_size > sconf->max_cache_object_size) {
+ /* Issue a notice only if something other than the default config
+ * is being used */
+ if (sconf->max_streaming_buffer_size != DEFAULT_MAX_STREAMING_BUFFER_SIZE &&
+ sconf->max_cache_object_size != DEFAULT_MAX_CACHE_OBJECT_SIZE) {
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "MCacheMaxStreamingBuffer must be less than or equal to MCacheMaxObjectSize. "
+ "Resetting MCacheMaxStreamingBuffer to MCacheMaxObjectSize.");
+ }
+ sconf->max_streaming_buffer_size = sconf->max_cache_object_size;
+ }
+ if (sconf->max_streaming_buffer_size < sconf->min_cache_object_size) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "MCacheMaxStreamingBuffer must be greater than or equal to MCacheMinObjectSize. "
+ "Resetting MCacheMaxStreamingBuffer to MCacheMinObjectSize.");
+ sconf->max_streaming_buffer_size = sconf->min_cache_object_size;
+ }
+ ap_mpm_query(AP_MPMQ_IS_THREADED, &threaded_mpm);
+ if (threaded_mpm) {
+ apr_thread_mutex_create(&sconf->lock, APR_THREAD_MUTEX_DEFAULT, p);
+ }
+
+ sconf->cache_cache = cache_init(sconf->max_object_cnt,
+ sconf->max_cache_size,
+ memcache_get_priority,
+ sconf->cache_remove_algorithm,
+ memcache_get_pos,
+ memcache_set_pos,
+ memcache_inc_frequency,
+ memcache_cache_get_size,
+ memcache_cache_get_key,
+ memcache_cache_free);
+ apr_pool_cleanup_register(p, sconf, cleanup_cache_mem, apr_pool_cleanup_null);
+
+ if (sconf->cache_cache)
+ return OK;
+
+ return -1;
+
+}
+
+static const char
+*set_max_cache_size(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ apr_size_t val;
+
+ if (sscanf(arg, "%" APR_SIZE_T_FMT, &val) != 1) {
+ return "MCacheSize argument must be an integer representing the max cache size in KBytes.";
+ }
+ sconf->max_cache_size = val*1024;
+ return NULL;
+}
+static const char
+*set_min_cache_object_size(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ apr_size_t val;
+
+ if (sscanf(arg, "%" APR_SIZE_T_FMT, &val) != 1) {
+ return "MCacheMinObjectSize value must be an integer (bytes)";
+ }
+ sconf->min_cache_object_size = val;
+ return NULL;
+}
+static const char
+*set_max_cache_object_size(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ apr_size_t val;
+
+ if (sscanf(arg, "%" APR_SIZE_T_FMT, &val) != 1) {
+ return "MCacheMaxObjectSize value must be an integer (bytes)";
+ }
+ sconf->max_cache_object_size = val;
+ return NULL;
+}
+static const char
+*set_max_object_count(cmd_parms *parms, void *in_struct_ptr, const char *arg)
+{
+ apr_size_t val;
+
+ if (sscanf(arg, "%" APR_SIZE_T_FMT, &val) != 1) {
+ return "MCacheMaxObjectCount value must be an integer";
+ }
+ sconf->max_object_cnt = val;
+ return NULL;
+}
+
+static const char
+*set_cache_removal_algorithm(cmd_parms *parms, void *name, const char *arg)
+{
+ if (strcasecmp("LRU", arg)) {
+ sconf->cache_remove_algorithm = memcache_lru_algorithm;
+ }
+ else {
+ if (strcasecmp("GDSF", arg)) {
+ sconf->cache_remove_algorithm = memcache_gdsf_algorithm;
+ }
+ else {
+ return "currently implemented algorithms are LRU and GDSF";
+ }
+ }
+ return NULL;
+}
+
+static const char *set_max_streaming_buffer(cmd_parms *parms, void *dummy,
+ const char *arg)
+{
+#if 0
+ char *err;
+ if (apr_strtoff(&sconf->max_streaming_buffer_size, arg, &err, 10) || *err) {
+ return "MCacheMaxStreamingBuffer value must be a number";
+ }
+#else
+ sconf->max_streaming_buffer_size = apr_atoi64(arg);
+#endif
+ return NULL;
+}
+
+static const command_rec cache_cmds[] =
+{
+ AP_INIT_TAKE1("MCacheSize", set_max_cache_size, NULL, RSRC_CONF,
+ "The maximum amount of memory used by the cache in KBytes"),
+ AP_INIT_TAKE1("MCacheMaxObjectCount", set_max_object_count, NULL, RSRC_CONF,
+ "The maximum number of objects allowed to be placed in the cache"),
+ AP_INIT_TAKE1("MCacheMinObjectSize", set_min_cache_object_size, NULL, RSRC_CONF,
+ "The minimum size (in bytes) of an object to be placed in the cache"),
+ AP_INIT_TAKE1("MCacheMaxObjectSize", set_max_cache_object_size, NULL, RSRC_CONF,
+ "The maximum size (in bytes) of an object to be placed in the cache"),
+ AP_INIT_TAKE1("MCacheRemovalAlgorithm", set_cache_removal_algorithm, NULL, RSRC_CONF,
+ "The algorithm used to remove entries from the cache (default: GDSF)"),
+ AP_INIT_TAKE1("MCacheMaxStreamingBuffer", set_max_streaming_buffer, NULL, RSRC_CONF,
+ "Maximum number of bytes of content to buffer for a streamed response"),
+ {NULL}
+};
+
+static const cache_provider cache_mem_provider =
+{
+ &remove_entity,
+ &store_headers,
+ &store_body,
+ &recall_headers,
+ &recall_body,
+ &create_mem_entity,
+ &open_entity,
+ &remove_url,
+};
+
+static const cache_provider cache_fd_provider =
+{
+ &remove_entity,
+ &store_headers,
+ &store_body,
+ &recall_headers,
+ &recall_body,
+ &create_fd_entity,
+ &open_entity,
+ &remove_url,
+};
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(mem_cache_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ /* cache initializer */
+ /* cache_hook_init(cache_mem_init, NULL, NULL, APR_HOOK_MIDDLE); */
+ /*
+ cache_hook_create_entity(create_entity, NULL, NULL, APR_HOOK_MIDDLE);
+ cache_hook_open_entity(open_entity, NULL, NULL, APR_HOOK_MIDDLE);
+ cache_hook_remove_url(remove_url, NULL, NULL, APR_HOOK_MIDDLE);
+ */
+ ap_register_provider(p, CACHE_PROVIDER_GROUP, "mem", "0",
+ &cache_mem_provider);
+ ap_register_provider(p, CACHE_PROVIDER_GROUP, "fd", "0",
+ &cache_fd_provider);
+}
+
+module AP_MODULE_DECLARE_DATA mem_cache_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ create_cache_config, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ cache_cmds, /* command apr_table_t */
+ register_hooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.dsp
new file mode 100644
index 00000000..98a27b76
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/mod_mem_cache.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_mem_cache" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_mem_cache - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mem_cache.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mem_cache.mak" CFG="mod_mem_cache - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_mem_cache - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_mem_cache - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_mem_cache - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /D "_USRDLL" /D "mod_mem_cache_EXPORTS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_mem_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_mem_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mem_cache.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_mem_cache - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_mem_cache_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_mem_cache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mem_cache.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_mem_cache - Win32 Release"
+# Name "mod_mem_cache - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_cache.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_mem_cache.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_mem_cache - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_mem_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_mem_cache.so "mem_cache_module for Apache" ../../include/ap_release.h > .\mod_mem_cache.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_mem_cache - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_mem_cache.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_mem_cache.so "mem_cache_module for Apache" ../../include/ap_release.h > .\mod_mem_cache.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/modules.mk b/rubbos/app/httpd-2.0.64/modules/experimental/modules.mk
new file mode 100644
index 00000000..ceb52a1b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/modules.mk
@@ -0,0 +1,3 @@
+DISTCLEAN_TARGETS = modules.mk
+static =
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.c b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.c
new file mode 100644
index 00000000..adaccb32
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.c
@@ -0,0 +1,1758 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * util_ldap.c: LDAP things
+ *
+ * Original code from auth_ldap module for Apache v1.3:
+ * Copyright 1998, 1999 Enbridge Pipelines Inc.
+ * Copyright 1999-2001 Dave Carrigan
+ */
+
+#include <apr_ldap.h>
+#include <apr_strings.h>
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_ldap.h"
+#include "util_ldap_cache.h"
+
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifndef APU_HAS_LDAP
+#error mod_ldap requires APR-util to have LDAP support built in
+#endif
+
+#if !defined(OS2) && !defined(WIN32) && !defined(BEOS) && !defined(NETWARE)
+#include "unixd.h"
+#define UTIL_LDAP_SET_MUTEX_PERMS
+#endif
+
+ /* defines for certificate file types
+ */
+#define LDAP_CA_TYPE_UNKNOWN 0
+#define LDAP_CA_TYPE_DER 1
+#define LDAP_CA_TYPE_BASE64 2
+#define LDAP_CA_TYPE_CERT7_DB 3
+
+
+module AP_MODULE_DECLARE_DATA ldap_module;
+
+int util_ldap_handler(request_rec *r);
+void *util_ldap_create_config(apr_pool_t *p, server_rec *s);
+
+
+/*
+ * Some definitions to help between various versions of apache.
+ */
+
+#ifndef DOCTYPE_HTML_2_0
+#define DOCTYPE_HTML_2_0 "<!DOCTYPE HTML PUBLIC \"-//IETF//" \
+ "DTD HTML 2.0//EN\">\n"
+#endif
+
+#ifndef DOCTYPE_HTML_3_2
+#define DOCTYPE_HTML_3_2 "<!DOCTYPE HTML PUBLIC \"-//W3C//" \
+ "DTD HTML 3.2 Final//EN\">\n"
+#endif
+
+#ifndef DOCTYPE_HTML_4_0S
+#define DOCTYPE_HTML_4_0S "<!DOCTYPE HTML PUBLIC \"-//W3C//" \
+ "DTD HTML 4.0//EN\"\n" \
+ "\"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
+#endif
+
+#ifndef DOCTYPE_HTML_4_0T
+#define DOCTYPE_HTML_4_0T "<!DOCTYPE HTML PUBLIC \"-//W3C//" \
+ "DTD HTML 4.0 Transitional//EN\"\n" \
+ "\"http://www.w3.org/TR/REC-html40/loose.dtd\">\n"
+#endif
+
+#ifndef DOCTYPE_HTML_4_0F
+#define DOCTYPE_HTML_4_0F "<!DOCTYPE HTML PUBLIC \"-//W3C//" \
+ "DTD HTML 4.0 Frameset//EN\"\n" \
+ "\"http://www.w3.org/TR/REC-html40/frameset.dtd\">\n"
+#endif
+
+#define LDAP_CACHE_LOCK() \
+ if (st->util_ldap_cache_lock) \
+ apr_global_mutex_lock(st->util_ldap_cache_lock)
+#define LDAP_CACHE_UNLOCK() \
+ if (st->util_ldap_cache_lock) \
+ apr_global_mutex_unlock(st->util_ldap_cache_lock)
+
+
+static void util_ldap_strdup (char **str, const char *newstr)
+{
+ if (*str) {
+ free(*str);
+ *str = NULL;
+ }
+
+ if (newstr) {
+ *str = calloc(1, strlen(newstr)+1);
+ strcpy (*str, newstr);
+ }
+}
+
+/*
+ * Status Handler
+ * --------------
+ *
+ * This handler generates a status page about the current performance of
+ * the LDAP cache. It is enabled as follows:
+ *
+ * <Location /ldap-status>
+ * SetHandler ldap-status
+ * </Location>
+ *
+ */
+int util_ldap_handler(request_rec *r)
+{
+ util_ldap_state_t *st = (util_ldap_state_t *)ap_get_module_config(r->server->module_config, &ldap_module);
+
+ r->allowed |= (1 << M_GET);
+ if (r->method_number != M_GET)
+ return DECLINED;
+
+ if (strcmp(r->handler, "ldap-status")) {
+ return DECLINED;
+ }
+
+ r->content_type = "text/html; charset=ISO-8859-1";
+ if (r->header_only)
+ return OK;
+
+ ap_rputs(DOCTYPE_HTML_3_2
+ "<html><head><title>LDAP Cache Information</title></head>\n", r);
+ ap_rputs("<body bgcolor='#ffffff'><h1 align=center>LDAP Cache Information</h1>\n", r);
+
+ util_ald_cache_display(r, st);
+
+ return OK;
+}
+
+/* ------------------------------------------------------------------ */
+
+
+/*
+ * Closes an LDAP connection by unlocking it. The next time
+ * util_ldap_connection_find() is called this connection will be
+ * available for reuse.
+ */
+LDAP_DECLARE(void) util_ldap_connection_close(util_ldap_connection_t *ldc)
+{
+
+ /*
+ * QUESTION:
+ *
+ * Is it safe leaving bound connections floating around between the
+ * different modules? Keeping the user bound is a performance boost,
+ * but it is also a potential security problem - maybe.
+ *
+ * For now we unbind the user when we finish with a connection, but
+ * we don't have to...
+ */
+
+ /* mark our connection as available for reuse */
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(ldc->lock);
+#endif
+}
+
+
+/*
+ * Destroys an LDAP connection by unbinding and closing the connection to
+ * the LDAP server. It is used to bring the connection back to a known
+ * state after an error, and during pool cleanup.
+ */
+LDAP_DECLARE_NONSTD(apr_status_t) util_ldap_connection_unbind(void *param)
+{
+ util_ldap_connection_t *ldc = param;
+
+ if (ldc) {
+ if (ldc->ldap) {
+ ldap_unbind_s(ldc->ldap);
+ ldc->ldap = NULL;
+ }
+ ldc->bound = 0;
+ }
+
+ return APR_SUCCESS;
+}
+
+
+/*
+ * Clean up an LDAP connection by unbinding and unlocking the connection.
+ * This function is registered with the pool cleanup function - causing
+ * the LDAP connections to be shut down cleanly on graceful restart.
+ */
+LDAP_DECLARE_NONSTD(apr_status_t) util_ldap_connection_cleanup(void *param)
+{
+ util_ldap_connection_t *ldc = param;
+
+ if (ldc) {
+
+ /* unbind and disconnect from the LDAP server */
+ util_ldap_connection_unbind(ldc);
+
+ /* free the username and password */
+ if (ldc->bindpw) {
+ free((void*)ldc->bindpw);
+ }
+ if (ldc->binddn) {
+ free((void*)ldc->binddn);
+ }
+
+ /* unlock this entry */
+ util_ldap_connection_close(ldc);
+
+ }
+
+ return APR_SUCCESS;
+}
+
+
+/*
+ * Connect to the LDAP server and binds. Does not connect if already
+ * connected (i.e. ldc->ldap is non-NULL.) Does not bind if already bound.
+ *
+ * Returns LDAP_SUCCESS on success; and an error code on failure
+ */
+LDAP_DECLARE(int) util_ldap_connection_open(request_rec *r,
+ util_ldap_connection_t *ldc)
+{
+ int result = 0;
+ int failures = 0;
+ int version = LDAP_VERSION3;
+ int rc = LDAP_SUCCESS;
+ struct timeval timeOut = {10,0}; /* 10 second connection timeout */
+
+ util_ldap_state_t *st = (util_ldap_state_t *)ap_get_module_config(
+ r->server->module_config, &ldap_module);
+
+ /* If the connection is already bound, return
+ */
+ if (ldc->bound)
+ {
+ ldc->reason = "LDAP: connection open successful (already bound)";
+ return LDAP_SUCCESS;
+ }
+
+ /* create the ldap session handle
+ */
+ if (NULL == ldc->ldap)
+ {
+ /* clear connection requested */
+ if (!ldc->secure)
+ {
+ ldc->ldap = ldap_init(const_cast(ldc->host), ldc->port);
+ }
+ else /* ssl connnection requested */
+ {
+ /* check configuration to make sure it supports SSL
+ */
+ if (st->ssl_support)
+ {
+ #if APR_HAS_LDAP_SSL
+
+ #if APR_HAS_NOVELL_LDAPSDK
+ ldc->ldap = ldapssl_init(ldc->host, ldc->port, 1);
+
+ #elif APR_HAS_NETSCAPE_LDAPSDK
+ ldc->ldap = ldapssl_init(ldc->host, ldc->port, 1);
+
+ #elif APR_HAS_OPENLDAP_LDAPSDK
+ ldc->ldap = ldap_init(ldc->host, ldc->port);
+ if (NULL != ldc->ldap)
+ {
+ int SSLmode = LDAP_OPT_X_TLS_HARD;
+ result = ldap_set_option(ldc->ldap, LDAP_OPT_X_TLS, &SSLmode);
+ if (LDAP_SUCCESS != result)
+ {
+ ldap_unbind_s(ldc->ldap);
+ ldc->reason = "LDAP: ldap_set_option - LDAP_OPT_X_TLS_HARD failed";
+ ldc->ldap = NULL;
+ }
+ }
+
+ #elif APR_HAS_MICROSOFT_LDAPSDK
+ ldc->ldap = ldap_sslinit(const_cast(ldc->host), ldc->port, 1);
+
+ #else
+ ldc->reason = "LDAP: ssl connections not supported";
+ #endif /* APR_HAS_NOVELL_LDAPSDK */
+
+ #endif /* APR_HAS_LDAP_SSL */
+ }
+ else
+ ldc->reason = "LDAP: ssl connections not supported";
+ }
+
+ if (NULL == ldc->ldap)
+ {
+ ldc->bound = 0;
+ if (NULL == ldc->reason)
+ ldc->reason = "LDAP: ldap initialization failed";
+ return(-1);
+ }
+
+ /* Set the alias dereferencing option */
+ ldap_set_option(ldc->ldap, LDAP_OPT_DEREF, &(ldc->deref));
+
+ /* always default to LDAP V3 */
+ ldap_set_option(ldc->ldap, LDAP_OPT_PROTOCOL_VERSION, &version);
+
+#ifdef LDAP_OPT_NETWORK_TIMEOUT
+ if (st->connectionTimeout > 0) {
+ timeOut.tv_sec = st->connectionTimeout;
+ }
+
+ if (st->connectionTimeout >= 0) {
+ rc = ldap_set_option(ldc->ldap, LDAP_OPT_NETWORK_TIMEOUT, (void *)&timeOut);
+ if (APR_SUCCESS != rc) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "LDAP: Could not set the connection timeout" );
+ }
+ }
+#endif
+ }
+
+
+ /* loop trying to bind up to 10 times if LDAP_SERVER_DOWN error is
+ * returned. Break out of the loop on Success or any other error.
+ *
+ * NOTE: Looping is probably not a great idea. If the server isn't
+ * responding the chances it will respond after a few tries are poor.
+ * However, the original code looped and it only happens on
+ * the error condition.
+ */
+ for (failures=0; failures<10; failures++)
+ {
+ result = ldap_simple_bind_s(ldc->ldap, const_cast(ldc->binddn), const_cast(ldc->bindpw));
+ if (LDAP_SERVER_DOWN != result)
+ break;
+ }
+
+ /* free the handle if there was an error
+ */
+ if (LDAP_SUCCESS != result)
+ {
+ ldap_unbind_s(ldc->ldap);
+ ldc->ldap = NULL;
+ ldc->bound = 0;
+ ldc->reason = "LDAP: ldap_simple_bind_s() failed";
+ }
+ else {
+ ldc->bound = 1;
+ ldc->reason = "LDAP: connection open successful";
+ }
+
+ return(result);
+}
+
+
+/*
+ * Find an existing ldap connection struct that matches the
+ * provided ldap connection parameters.
+ *
+ * If not found in the cache, a new ldc structure will be allocated from st->pool
+ * and returned to the caller. If found in the cache, a pointer to the existing
+ * ldc structure will be returned.
+ */
+LDAP_DECLARE(util_ldap_connection_t *)util_ldap_connection_find(request_rec *r, const char *host, int port,
+ const char *binddn, const char *bindpw, deref_options deref,
+ int secure )
+{
+ struct util_ldap_connection_t *l, *p; /* To traverse the linked list */
+
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(r->server->module_config,
+ &ldap_module);
+
+
+#if APR_HAS_THREADS
+ /* mutex lock this function */
+ apr_thread_mutex_lock(st->mutex);
+#endif
+
+ /* Search for an exact connection match in the list that is not
+ * being used.
+ */
+ for (l=st->connections,p=NULL; l; l=l->next) {
+#if APR_HAS_THREADS
+ if (APR_SUCCESS == apr_thread_mutex_trylock(l->lock)) {
+#endif
+ if ((l->port == port) && (strcmp(l->host, host) == 0) &&
+ ((!l->binddn && !binddn) || (l->binddn && binddn && !strcmp(l->binddn, binddn))) &&
+ ((!l->bindpw && !bindpw) || (l->bindpw && bindpw && !strcmp(l->bindpw, bindpw))) &&
+ (l->deref == deref) && (l->secure == secure)) {
+
+ break;
+ }
+#if APR_HAS_THREADS
+ /* If this connection didn't match the criteria, then we
+ * need to unlock the mutex so it is available to be reused.
+ */
+ apr_thread_mutex_unlock(l->lock);
+ }
+#endif
+ p = l;
+ }
+
+ /* If nothing found, search again, but we don't care about the
+ * binddn and bindpw this time.
+ */
+ if (!l) {
+ for (l=st->connections,p=NULL; l; l=l->next) {
+#if APR_HAS_THREADS
+ if (APR_SUCCESS == apr_thread_mutex_trylock(l->lock)) {
+
+#endif
+ if ((l->port == port) && (strcmp(l->host, host) == 0) &&
+ (l->deref == deref) && (l->secure == secure)) {
+
+ /* the bind credentials have changed */
+ l->bound = 0;
+ util_ldap_strdup((char**)&(l->binddn), binddn);
+ util_ldap_strdup((char**)&(l->bindpw), bindpw);
+ break;
+ }
+#if APR_HAS_THREADS
+ /* If this connection didn't match the criteria, then we
+ * need to unlock the mutex so it is available to be reused.
+ */
+ apr_thread_mutex_unlock(l->lock);
+ }
+#endif
+ p = l;
+ }
+ }
+
+/* artificially disable cache */
+/* l = NULL; */
+
+ /* If no connection what found after the second search, we
+ * must create one.
+ */
+ if (!l) {
+
+ /*
+ * Add the new connection entry to the linked list. Note that we
+ * don't actually establish an LDAP connection yet; that happens
+ * the first time authentication is requested.
+ */
+ /* create the details to the pool in st */
+ l = apr_pcalloc(st->pool, sizeof(util_ldap_connection_t));
+#if APR_HAS_THREADS
+ apr_thread_mutex_create(&l->lock, APR_THREAD_MUTEX_DEFAULT, st->pool);
+ apr_thread_mutex_lock(l->lock);
+#endif
+ l->pool = st->pool;
+ l->bound = 0;
+ l->host = apr_pstrdup(st->pool, host);
+ l->port = port;
+ l->deref = deref;
+ util_ldap_strdup((char**)&(l->binddn), binddn);
+ util_ldap_strdup((char**)&(l->bindpw), bindpw);
+ l->secure = secure;
+
+ /* add the cleanup to the pool */
+ apr_pool_cleanup_register(l->pool, l,
+ util_ldap_connection_cleanup,
+ apr_pool_cleanup_null);
+
+ if (p) {
+ p->next = l;
+ }
+ else {
+ st->connections = l;
+ }
+ }
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(st->mutex);
+#endif
+ return l;
+}
+
+/* ------------------------------------------------------------------ */
+
+/*
+ * Compares two DNs to see if they're equal. The only way to do this correctly is to
+ * search for the dn and then do ldap_get_dn() on the result. This should match the
+ * initial dn, since it would have been also retrieved with ldap_get_dn(). This is
+ * expensive, so if the configuration value compare_dn_on_server is
+ * false, just does an ordinary strcmp.
+ *
+ * The lock for the ldap cache should already be acquired.
+ */
+LDAP_DECLARE(int) util_ldap_cache_comparedn(request_rec *r, util_ldap_connection_t *ldc,
+ const char *url, const char *dn, const char *reqdn,
+ int compare_dn_on_server)
+{
+ int result = 0;
+ util_url_node_t *curl;
+ util_url_node_t curnode;
+ util_dn_compare_node_t *node;
+ util_dn_compare_node_t newnode;
+ int failures = 0;
+ LDAPMessage *res, *entry;
+ char *searchdn;
+
+ util_ldap_state_t *st = (util_ldap_state_t *)ap_get_module_config(r->server->module_config, &ldap_module);
+
+ /* get cache entry (or create one) */
+ LDAP_CACHE_LOCK();
+
+ curnode.url = url;
+ curl = util_ald_cache_fetch(st->util_ldap_cache, &curnode);
+ if (curl == NULL) {
+ curl = util_ald_create_caches(st, url);
+ }
+ LDAP_CACHE_UNLOCK();
+
+ /* a simple compare? */
+ if (!compare_dn_on_server) {
+ /* unlock this read lock */
+ if (strcmp(dn, reqdn)) {
+ ldc->reason = "DN Comparison FALSE (direct strcmp())";
+ return LDAP_COMPARE_FALSE;
+ }
+ else {
+ ldc->reason = "DN Comparison TRUE (direct strcmp())";
+ return LDAP_COMPARE_TRUE;
+ }
+ }
+
+ if (curl) {
+ /* no - it's a server side compare */
+ LDAP_CACHE_LOCK();
+
+ /* is it in the compare cache? */
+ newnode.reqdn = (char *)reqdn;
+ node = util_ald_cache_fetch(curl->dn_compare_cache, &newnode);
+ if (node != NULL) {
+ /* If it's in the cache, it's good */
+ /* unlock this read lock */
+ LDAP_CACHE_UNLOCK();
+ ldc->reason = "DN Comparison TRUE (cached)";
+ return LDAP_COMPARE_TRUE;
+ }
+
+ /* unlock this read lock */
+ LDAP_CACHE_UNLOCK();
+ }
+
+start_over:
+ if (failures++ > 10) {
+ /* too many failures */
+ return result;
+ }
+
+ /* make a server connection */
+ if (LDAP_SUCCESS != (result = util_ldap_connection_open(r, ldc))) {
+ /* connect to server failed */
+ return result;
+ }
+
+ /* search for reqdn */
+ if ((result = ldap_search_ext_s(ldc->ldap, const_cast(reqdn), LDAP_SCOPE_BASE,
+ "(objectclass=*)", NULL, 1,
+ NULL, NULL, NULL, -1, &res)) == LDAP_SERVER_DOWN) {
+ ldc->reason = "DN Comparison ldap_search_ext_s() failed with server down";
+ util_ldap_connection_unbind(ldc);
+ goto start_over;
+ }
+ if (result != LDAP_SUCCESS) {
+ /* search for reqdn failed - no match */
+ ldc->reason = "DN Comparison ldap_search_ext_s() failed";
+ return result;
+ }
+
+ entry = ldap_first_entry(ldc->ldap, res);
+ searchdn = ldap_get_dn(ldc->ldap, entry);
+
+ ldap_msgfree(res);
+ if (strcmp(dn, searchdn) != 0) {
+ /* compare unsuccessful */
+ ldc->reason = "DN Comparison FALSE (checked on server)";
+ result = LDAP_COMPARE_FALSE;
+ }
+ else {
+ if (curl) {
+ /* compare successful - add to the compare cache */
+ LDAP_CACHE_LOCK();
+ newnode.reqdn = (char *)reqdn;
+ newnode.dn = (char *)dn;
+
+ node = util_ald_cache_fetch(curl->dn_compare_cache, &newnode);
+ if ((node == NULL) ||
+ (strcmp(reqdn, node->reqdn) != 0) || (strcmp(dn, node->dn) != 0)) {
+
+ util_ald_cache_insert(curl->dn_compare_cache, &newnode);
+ }
+ LDAP_CACHE_UNLOCK();
+ }
+ ldc->reason = "DN Comparison TRUE (checked on server)";
+ result = LDAP_COMPARE_TRUE;
+ }
+ ldap_memfree(searchdn);
+ return result;
+
+}
+
+/*
+ * Does an generic ldap_compare operation. It accepts a cache that it will use
+ * to lookup the compare in the cache. We cache two kinds of compares
+ * (require group compares) and (require user compares). Each compare has a different
+ * cache node: require group includes the DN; require user does not because the
+ * require user cache is owned by the
+ *
+ */
+LDAP_DECLARE(int) util_ldap_cache_compare(request_rec *r, util_ldap_connection_t *ldc,
+ const char *url, const char *dn,
+ const char *attrib, const char *value)
+{
+ int result = 0;
+ util_url_node_t *curl;
+ util_url_node_t curnode;
+ util_compare_node_t *compare_nodep;
+ util_compare_node_t the_compare_node;
+ apr_time_t curtime = 0; /* silence gcc -Wall */
+ int failures = 0;
+
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(r->server->module_config,
+ &ldap_module);
+
+ /* get cache entry (or create one) */
+ LDAP_CACHE_LOCK();
+ curnode.url = url;
+ curl = util_ald_cache_fetch(st->util_ldap_cache, &curnode);
+ if (curl == NULL) {
+ curl = util_ald_create_caches(st, url);
+ }
+ LDAP_CACHE_UNLOCK();
+
+ if (curl) {
+ /* make a comparison to the cache */
+ LDAP_CACHE_LOCK();
+ curtime = apr_time_now();
+
+ the_compare_node.dn = (char *)dn;
+ the_compare_node.attrib = (char *)attrib;
+ the_compare_node.value = (char *)value;
+ the_compare_node.result = 0;
+
+ compare_nodep = util_ald_cache_fetch(curl->compare_cache, &the_compare_node);
+
+ if (compare_nodep != NULL) {
+ /* found it... */
+ if (curtime - compare_nodep->lastcompare > st->compare_cache_ttl) {
+ /* ...but it is too old */
+ util_ald_cache_remove(curl->compare_cache, compare_nodep);
+ }
+ else {
+ /* ...and it is good */
+ /* unlock this read lock */
+ LDAP_CACHE_UNLOCK();
+ if (LDAP_COMPARE_TRUE == compare_nodep->result) {
+ ldc->reason = "Comparison true (cached)";
+ return compare_nodep->result;
+ }
+ else if (LDAP_COMPARE_FALSE == compare_nodep->result) {
+ ldc->reason = "Comparison false (cached)";
+ return compare_nodep->result;
+ }
+ else if (LDAP_NO_SUCH_ATTRIBUTE == compare_nodep->result) {
+ ldc->reason = "Comparison no such attribute (cached)";
+ return compare_nodep->result;
+ }
+ else {
+ ldc->reason = "Comparison undefined (cached)";
+ return compare_nodep->result;
+ }
+ }
+ }
+ /* unlock this read lock */
+ LDAP_CACHE_UNLOCK();
+ }
+
+start_over:
+ if (failures++ > 10) {
+ /* too many failures */
+ return result;
+ }
+ if (LDAP_SUCCESS != (result = util_ldap_connection_open(r, ldc))) {
+ /* connect failed */
+ return result;
+ }
+
+ if ((result = ldap_compare_s(ldc->ldap, const_cast(dn), const_cast(attrib), const_cast(value)))
+ == LDAP_SERVER_DOWN) {
+ /* connection failed - try again */
+ ldc->reason = "ldap_compare_s() failed with server down";
+ util_ldap_connection_unbind(ldc);
+ goto start_over;
+ }
+
+ ldc->reason = "Comparison complete";
+ if ((LDAP_COMPARE_TRUE == result) ||
+ (LDAP_COMPARE_FALSE == result) ||
+ (LDAP_NO_SUCH_ATTRIBUTE == result)) {
+ if (curl) {
+ /* compare completed; caching result */
+ LDAP_CACHE_LOCK();
+ the_compare_node.lastcompare = curtime;
+ the_compare_node.result = result;
+
+ /* If the node doesn't exist then insert it, otherwise just update it with
+ the last results */
+ compare_nodep = util_ald_cache_fetch(curl->compare_cache, &the_compare_node);
+ if ((compare_nodep == NULL) ||
+ (strcmp(the_compare_node.dn, compare_nodep->dn) != 0) ||
+ (strcmp(the_compare_node.attrib, compare_nodep->attrib) != 0) ||
+ (strcmp(the_compare_node.value, compare_nodep->value) != 0)) {
+
+ util_ald_cache_insert(curl->compare_cache, &the_compare_node);
+ }
+ else {
+ compare_nodep->lastcompare = curtime;
+ compare_nodep->result = result;
+ }
+ LDAP_CACHE_UNLOCK();
+ }
+ if (LDAP_COMPARE_TRUE == result) {
+ ldc->reason = "Comparison true (adding to cache)";
+ return LDAP_COMPARE_TRUE;
+ }
+ else if (LDAP_COMPARE_FALSE == result) {
+ ldc->reason = "Comparison false (adding to cache)";
+ return LDAP_COMPARE_FALSE;
+ }
+ else {
+ ldc->reason = "Comparison no such attribute (adding to cache)";
+ return LDAP_NO_SUCH_ATTRIBUTE;
+ }
+ }
+ return result;
+}
+
+LDAP_DECLARE(int) util_ldap_cache_checkuserid(request_rec *r, util_ldap_connection_t *ldc,
+ const char *url, const char *basedn, int scope, char **attrs,
+ const char *filter, const char *bindpw, const char **binddn,
+ const char ***retvals)
+{
+ const char **vals = NULL;
+ int numvals = 0;
+ int result = 0;
+ LDAPMessage *res, *entry;
+ char *dn;
+ int count;
+ int failures = 0;
+ util_url_node_t *curl; /* Cached URL node */
+ util_url_node_t curnode;
+ util_search_node_t *search_nodep; /* Cached search node */
+ util_search_node_t the_search_node;
+ apr_time_t curtime;
+
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(r->server->module_config,
+ &ldap_module);
+
+ /* Get the cache node for this url */
+ LDAP_CACHE_LOCK();
+ curnode.url = url;
+ curl = (util_url_node_t *)util_ald_cache_fetch(st->util_ldap_cache, &curnode);
+ if (curl == NULL) {
+ curl = util_ald_create_caches(st, url);
+ }
+ LDAP_CACHE_UNLOCK();
+
+ if (curl) {
+ LDAP_CACHE_LOCK();
+ the_search_node.username = filter;
+ search_nodep = util_ald_cache_fetch(curl->search_cache, &the_search_node);
+ if (search_nodep != NULL) {
+
+ /* found entry in search cache... */
+ curtime = apr_time_now();
+
+ /*
+ * Remove this item from the cache if its expired.
+ * If the sent password doesn't match the stored password,
+ * the entry will be removed and readded later if the
+ * credentials pass authentication.
+ */
+ if ((curtime - search_nodep->lastbind) > st->search_cache_ttl) {
+ /* ...but entry is too old */
+ util_ald_cache_remove(curl->search_cache, search_nodep);
+ }
+ else if ((search_nodep->bindpw) &&
+ (search_nodep->bindpw[0] != '\0') &&
+ (strcmp(search_nodep->bindpw, bindpw) == 0)) {
+ /* ...and entry is valid */
+ *binddn = search_nodep->dn;
+ *retvals = search_nodep->vals;
+ LDAP_CACHE_UNLOCK();
+ ldc->reason = "Authentication successful (cached)";
+ return LDAP_SUCCESS;
+ }
+ }
+ /* unlock this read lock */
+ LDAP_CACHE_UNLOCK();
+ }
+
+ /*
+ * At this point, there is no valid cached search, so lets do the search.
+ */
+
+ /*
+ * If any LDAP operation fails due to LDAP_SERVER_DOWN, control returns here.
+ */
+start_over:
+ if (failures++ > 10) {
+ return result;
+ }
+ if (LDAP_SUCCESS != (result = util_ldap_connection_open(r, ldc))) {
+ return result;
+ }
+
+ /* try do the search */
+ if ((result = ldap_search_ext_s(ldc->ldap,
+ const_cast(basedn), scope,
+ const_cast(filter), attrs, 0,
+ NULL, NULL, NULL, -1, &res)) == LDAP_SERVER_DOWN) {
+ ldc->reason = "ldap_search_ext_s() for user failed with server down";
+ util_ldap_connection_unbind(ldc);
+ goto start_over;
+ }
+
+ /* if there is an error (including LDAP_NO_SUCH_OBJECT) return now */
+ if (result != LDAP_SUCCESS) {
+ ldc->reason = "ldap_search_ext_s() for user failed";
+ return result;
+ }
+
+ /*
+ * We should have found exactly one entry; to find a different
+ * number is an error.
+ */
+ count = ldap_count_entries(ldc->ldap, res);
+ if (count != 1)
+ {
+ if (count == 0 )
+ ldc->reason = "User not found";
+ else
+ ldc->reason = "User is not unique (search found two or more matches)";
+ ldap_msgfree(res);
+ return LDAP_NO_SUCH_OBJECT;
+ }
+
+ entry = ldap_first_entry(ldc->ldap, res);
+
+ /* Grab the dn, copy it into the pool, and free it again */
+ dn = ldap_get_dn(ldc->ldap, entry);
+ *binddn = apr_pstrdup(r->pool, dn);
+ ldap_memfree(dn);
+
+ /*
+ * A bind to the server with an empty password always succeeds, so
+ * we check to ensure that the password is not empty. This implies
+ * that users who actually do have empty passwords will never be
+ * able to authenticate with this module. I don't see this as a big
+ * problem.
+ */
+ if (!bindpw || strlen(bindpw) <= 0) {
+ ldap_msgfree(res);
+ ldc->reason = "Empty password not allowed";
+ return LDAP_INVALID_CREDENTIALS;
+ }
+
+ /*
+ * Attempt to bind with the retrieved dn and the password. If the bind
+ * fails, it means that the password is wrong (the dn obviously
+ * exists, since we just retrieved it)
+ */
+ if ((result =
+ ldap_simple_bind_s(ldc->ldap, const_cast(*binddn), const_cast(bindpw))) ==
+ LDAP_SERVER_DOWN) {
+ ldc->reason = "ldap_simple_bind_s() to check user credentials failed with server down";
+ ldap_msgfree(res);
+ util_ldap_connection_unbind(ldc);
+ goto start_over;
+ }
+
+ /* failure? if so - return */
+ if (result != LDAP_SUCCESS) {
+ ldc->reason = "ldap_simple_bind_s() to check user credentials failed";
+ ldap_msgfree(res);
+ util_ldap_connection_unbind(ldc);
+ return result;
+ }
+ else {
+ /*
+ * We have just bound the connection to a different user and password
+ * combination, which might be reused unintentionally next time this
+ * connection is used from the connection pool. To ensure no confusion,
+ * we mark the connection as unbound.
+ */
+ ldc->bound = 0;
+ }
+
+ /*
+ * Get values for the provided attributes.
+ */
+ if (attrs) {
+ int k = 0;
+ int i = 0;
+ while (attrs[k++]);
+ vals = apr_pcalloc(r->pool, sizeof(char *) * (k+1));
+ numvals = k;
+ while (attrs[i]) {
+ char **values;
+ int j = 0;
+ char *str = NULL;
+ /* get values */
+ values = ldap_get_values(ldc->ldap, entry, attrs[i]);
+ while (values && values[j]) {
+ str = str ? apr_pstrcat(r->pool, str, "; ", values[j], NULL) : apr_pstrdup(r->pool, values[j]);
+ j++;
+ }
+ ldap_value_free(values);
+ vals[i] = str;
+ i++;
+ }
+ *retvals = vals;
+ }
+
+ /*
+ * Add the new username to the search cache.
+ */
+ if (curl) {
+ LDAP_CACHE_LOCK();
+ the_search_node.username = filter;
+ the_search_node.dn = *binddn;
+ the_search_node.bindpw = bindpw;
+ the_search_node.lastbind = apr_time_now();
+ the_search_node.vals = vals;
+ the_search_node.numvals = numvals;
+
+ /* Search again to make sure that another thread didn't ready insert this node
+ into the cache before we got here. If it does exist then update the lastbind */
+ search_nodep = util_ald_cache_fetch(curl->search_cache, &the_search_node);
+ if ((search_nodep == NULL) ||
+ (strcmp(*binddn, search_nodep->dn) != 0)) {
+
+ /* Nothing in cache, insert new entry */
+ util_ald_cache_insert(curl->search_cache, &the_search_node);
+ }
+ else if ((!search_nodep->bindpw) ||
+ (strcmp(bindpw, search_nodep->bindpw) != 0)) {
+
+ /* Entry in cache is invalid, remove it and insert new one */
+ util_ald_cache_remove(curl->search_cache, search_nodep);
+ util_ald_cache_insert(curl->search_cache, &the_search_node);
+ }
+ else {
+ /* Cache entry is valid, update lastbind */
+ search_nodep->lastbind = the_search_node.lastbind;
+ }
+ LDAP_CACHE_UNLOCK();
+ }
+ ldap_msgfree(res);
+
+ ldc->reason = "Authentication successful";
+ return LDAP_SUCCESS;
+}
+
+/*
+ * This function will return the DN of the entry matching userid.
+ * It is used to get the DN in case some other module than mod_auth_ldap
+ * has authenticated the user.
+ * The function is basically a copy of util_ldap_cache_checkuserid
+ * with password checking removed.
+ */
+LDAP_DECLARE(int) util_ldap_cache_getuserdn(request_rec *r, util_ldap_connection_t *ldc,
+ const char *url, const char *basedn, int scope, char **attrs,
+ const char *filter, const char **binddn,
+ const char ***retvals)
+{
+ const char **vals = NULL;
+ int numvals = 0;
+ int result = 0;
+ LDAPMessage *res, *entry;
+ char *dn;
+ int count;
+ int failures = 0;
+ util_url_node_t *curl; /* Cached URL node */
+ util_url_node_t curnode;
+ util_search_node_t *search_nodep; /* Cached search node */
+ util_search_node_t the_search_node;
+ apr_time_t curtime;
+
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(r->server->module_config,
+ &ldap_module);
+
+ /* Get the cache node for this url */
+ LDAP_CACHE_LOCK();
+ curnode.url = url;
+ curl = (util_url_node_t *)util_ald_cache_fetch(st->util_ldap_cache, &curnode);
+ if (curl == NULL) {
+ curl = util_ald_create_caches(st, url);
+ }
+ LDAP_CACHE_UNLOCK();
+
+ if (curl) {
+ LDAP_CACHE_LOCK();
+ the_search_node.username = filter;
+ search_nodep = util_ald_cache_fetch(curl->search_cache, &the_search_node);
+ if (search_nodep != NULL) {
+
+ /* found entry in search cache... */
+ curtime = apr_time_now();
+
+ /*
+ * Remove this item from the cache if its expired.
+ */
+ if ((curtime - search_nodep->lastbind) > st->search_cache_ttl) {
+ /* ...but entry is too old */
+ util_ald_cache_remove(curl->search_cache, search_nodep);
+ }
+ else {
+ /* ...and entry is valid */
+ *binddn = search_nodep->dn;
+ *retvals = search_nodep->vals;
+ LDAP_CACHE_UNLOCK();
+ ldc->reason = "Search successful (cached)";
+ return LDAP_SUCCESS;
+ }
+ }
+ /* unlock this read lock */
+ LDAP_CACHE_UNLOCK();
+ }
+
+ /*
+ * At this point, there is no valid cached search, so lets do the search.
+ */
+
+ /*
+ * If any LDAP operation fails due to LDAP_SERVER_DOWN, control returns here.
+ */
+start_over:
+ if (failures++ > 10) {
+ return result;
+ }
+ if (LDAP_SUCCESS != (result = util_ldap_connection_open(r, ldc))) {
+ return result;
+ }
+
+ /* try do the search */
+ if ((result = ldap_search_ext_s(ldc->ldap,
+ const_cast(basedn), scope,
+ const_cast(filter), attrs, 0,
+ NULL, NULL, NULL, -1, &res)) == LDAP_SERVER_DOWN) {
+ ldc->reason = "ldap_search_ext_s() for user failed with server down";
+ util_ldap_connection_unbind(ldc);
+ goto start_over;
+ }
+
+ /* if there is an error (including LDAP_NO_SUCH_OBJECT) return now */
+ if (result != LDAP_SUCCESS) {
+ ldc->reason = "ldap_search_ext_s() for user failed";
+ return result;
+ }
+
+ /*
+ * We should have found exactly one entry; to find a different
+ * number is an error.
+ */
+ count = ldap_count_entries(ldc->ldap, res);
+ if (count != 1)
+ {
+ if (count == 0 )
+ ldc->reason = "User not found";
+ else
+ ldc->reason = "User is not unique (search found two or more matches)";
+ ldap_msgfree(res);
+ return LDAP_NO_SUCH_OBJECT;
+ }
+
+ entry = ldap_first_entry(ldc->ldap, res);
+
+ /* Grab the dn, copy it into the pool, and free it again */
+ dn = ldap_get_dn(ldc->ldap, entry);
+ *binddn = apr_pstrdup(r->pool, dn);
+ ldap_memfree(dn);
+
+ /*
+ * Get values for the provided attributes.
+ */
+ if (attrs) {
+ int k = 0;
+ int i = 0;
+ while (attrs[k++]);
+ vals = apr_pcalloc(r->pool, sizeof(char *) * (k+1));
+ numvals = k;
+ while (attrs[i]) {
+ char **values;
+ int j = 0;
+ char *str = NULL;
+ /* get values */
+ values = ldap_get_values(ldc->ldap, entry, attrs[i]);
+ while (values && values[j]) {
+ str = str ? apr_pstrcat(r->pool, str, "; ", values[j], NULL) : apr_pstrdup(r->pool, values[j]);
+ j++;
+ }
+ ldap_value_free(values);
+ vals[i] = str;
+ i++;
+ }
+ *retvals = vals;
+ }
+
+ /*
+ * Add the new username to the search cache.
+ */
+ if (curl) {
+ LDAP_CACHE_LOCK();
+ the_search_node.username = filter;
+ the_search_node.dn = *binddn;
+ the_search_node.bindpw = NULL;
+ the_search_node.lastbind = apr_time_now();
+ the_search_node.vals = vals;
+ the_search_node.numvals = numvals;
+
+ /* Search again to make sure that another thread didn't ready insert this node
+ into the cache before we got here. If it does exist then update the lastbind */
+ search_nodep = util_ald_cache_fetch(curl->search_cache, &the_search_node);
+ if ((search_nodep == NULL) ||
+ (strcmp(*binddn, search_nodep->dn) != 0)) {
+
+ /* Nothing in cache, insert new entry */
+ util_ald_cache_insert(curl->search_cache, &the_search_node);
+ }
+ /*
+ * Don't update lastbind on entries with bindpw because
+ * we haven't verified that password. It's OK to update
+ * the entry if there is no password in it.
+ */
+ else if (!search_nodep->bindpw) {
+ /* Cache entry is valid, update lastbind */
+ search_nodep->lastbind = the_search_node.lastbind;
+ }
+ LDAP_CACHE_UNLOCK();
+ }
+ ldap_msgfree(res);
+
+ ldc->reason = "Search successful";
+ return LDAP_SUCCESS;
+}
+
+/*
+ * Reports if ssl support is enabled
+ *
+ * 1 = enabled, 0 = not enabled
+ */
+LDAP_DECLARE(int) util_ldap_ssl_supported(request_rec *r)
+{
+ util_ldap_state_t *st = (util_ldap_state_t *)ap_get_module_config(
+ r->server->module_config, &ldap_module);
+
+ return(st->ssl_support);
+}
+
+
+/* ---------------------------------------- */
+/* config directives */
+
+
+static const char *util_ldap_set_cache_bytes(cmd_parms *cmd, void *dummy, const char *bytes)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+
+ st->cache_bytes = atol(bytes);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "[%" APR_PID_T_FMT "] ldap cache: Setting shared memory "
+ " cache size to %" APR_SIZE_T_FMT " bytes.",
+ getpid(), st->cache_bytes);
+
+ return NULL;
+}
+
+static const char *util_ldap_set_cache_file(cmd_parms *cmd, void *dummy, const char *file)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+
+ if (file) {
+ st->cache_file = ap_server_root_relative(st->pool, file);
+ }
+ else {
+ st->cache_file = NULL;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "LDAP cache: Setting shared memory cache file to %s bytes.",
+ st->cache_file);
+
+ return NULL;
+}
+
+static const char *util_ldap_set_cache_ttl(cmd_parms *cmd, void *dummy, const char *ttl)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+
+ st->search_cache_ttl = atol(ttl) * 1000000;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "[%d] ldap cache: Setting cache TTL to %ld microseconds.",
+ getpid(), st->search_cache_ttl);
+
+ return NULL;
+}
+
+static const char *util_ldap_set_cache_entries(cmd_parms *cmd, void *dummy, const char *size)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+
+
+ st->search_cache_size = atol(size);
+ if (st->search_cache_size < 0) {
+ st->search_cache_size = 0;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "[%d] ldap cache: Setting search cache size to %ld entries.",
+ getpid(), st->search_cache_size);
+
+ return NULL;
+}
+
+static const char *util_ldap_set_opcache_ttl(cmd_parms *cmd, void *dummy, const char *ttl)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+
+ st->compare_cache_ttl = atol(ttl) * 1000000;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "[%d] ldap cache: Setting operation cache TTL to %ld microseconds.",
+ getpid(), st->compare_cache_ttl);
+
+ return NULL;
+}
+
+static const char *util_ldap_set_opcache_entries(cmd_parms *cmd, void *dummy, const char *size)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+
+ st->compare_cache_size = atol(size);
+ if (st->compare_cache_size < 0) {
+ st->compare_cache_size = 0;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "[%d] ldap cache: Setting operation cache size to %ld entries.",
+ getpid(), st->compare_cache_size);
+
+ return NULL;
+}
+
+static const char *util_ldap_set_cert_auth(cmd_parms *cmd, void *dummy, const char *file)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ apr_finfo_t finfo;
+ apr_status_t rv;
+
+ if (err != NULL) {
+ return err;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "LDAP: SSL trusted certificate authority file - %s",
+ file);
+
+ st->cert_auth_file = ap_server_root_relative(cmd->pool, file);
+
+ if (st->cert_auth_file &&
+ ((rv = apr_stat (&finfo, st->cert_auth_file, APR_FINFO_MIN, cmd->pool)) != APR_SUCCESS))
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, cmd->server,
+ "LDAP: Could not open SSL trusted certificate authority file - %s",
+ st->cert_auth_file == NULL ? file : st->cert_auth_file);
+ return "Invalid file path";
+ }
+
+ return(NULL);
+}
+
+
+static const char *util_ldap_set_cert_type(cmd_parms *cmd, void *dummy, const char *Type)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (err != NULL) {
+ return err;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "LDAP: SSL trusted certificate authority file type - %s",
+ Type);
+
+ if (0 == strcmp("DER_FILE", Type))
+ st->cert_file_type = LDAP_CA_TYPE_DER;
+
+ else if (0 == strcmp("BASE64_FILE", Type))
+ st->cert_file_type = LDAP_CA_TYPE_BASE64;
+
+ else if (0 == strcmp("CERT7_DB_PATH", Type))
+ st->cert_file_type = LDAP_CA_TYPE_CERT7_DB;
+
+ else
+ st->cert_file_type = LDAP_CA_TYPE_UNKNOWN;
+
+ return(NULL);
+}
+
+static const char *util_ldap_set_connection_timeout(cmd_parms *cmd, void *dummy, const char *ttl)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
+ &ldap_module);
+ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+
+ if (err != NULL) {
+ return err;
+ }
+
+#ifdef LDAP_OPT_NETWORK_TIMEOUT
+ st->connectionTimeout = atol(ttl);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server,
+ "[%d] ldap connection: Setting connection timeout to %ld seconds.",
+ getpid(), st->connectionTimeout);
+#else
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, cmd->server,
+ "LDAP: Connection timout option not supported by the LDAP SDK in use." );
+#endif
+
+ return NULL;
+}
+
+void *util_ldap_create_config(apr_pool_t *p, server_rec *s)
+{
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)apr_pcalloc(p, sizeof(util_ldap_state_t));
+
+ /* Create a per vhost pool for mod_ldap to use, serialized with
+ * st->mutex (also one per vhost)
+ */
+ apr_pool_create(&st->pool, p);
+#if APR_HAS_THREADS
+ apr_thread_mutex_create(&st->mutex, APR_THREAD_MUTEX_DEFAULT, st->pool);
+#endif
+
+ st->cache_bytes = 100000;
+ st->search_cache_ttl = 600000000;
+ st->search_cache_size = 1024;
+ st->compare_cache_ttl = 600000000;
+ st->compare_cache_size = 1024;
+ st->connections = NULL;
+ st->cert_auth_file = NULL;
+ st->cert_file_type = LDAP_CA_TYPE_UNKNOWN;
+ st->ssl_support = 0;
+ st->connectionTimeout = 10;
+
+ return st;
+}
+
+static apr_status_t util_ldap_cleanup_module(void *data)
+{
+#if APR_HAS_LDAP_SSL && APR_HAS_NOVELL_LDAPSDK
+ server_rec *s = data;
+ util_ldap_state_t *st = (util_ldap_state_t *)ap_get_module_config(
+ s->module_config, &ldap_module);
+
+ if (st->ssl_support)
+ ldapssl_client_deinit();
+
+#endif
+ return APR_SUCCESS;
+}
+
+static int util_ldap_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ int rc = LDAP_SUCCESS;
+ apr_status_t result;
+ char buf[MAX_STRING_LEN];
+ server_rec *s_vhost;
+ util_ldap_state_t *st_vhost;
+
+ util_ldap_state_t *st =
+ (util_ldap_state_t *)ap_get_module_config(s->module_config, &ldap_module);
+
+ void *data;
+ const char *userdata_key = "util_ldap_init";
+
+ /* util_ldap_post_config() will be called twice. Don't bother
+ * going through all of the initialization on the first call
+ * because it will just be thrown away.*/
+ apr_pool_userdata_get(&data, userdata_key, s->process->pool);
+ if (!data) {
+ apr_pool_userdata_set((const void *)1, userdata_key,
+ apr_pool_cleanup_null, s->process->pool);
+
+#if APR_HAS_SHARED_MEMORY
+ /* If the cache file already exists then delete it. Otherwise we are
+ * going to run into problems creating the shared memory. */
+ if (st->cache_file) {
+ char *lck_file = apr_pstrcat (ptemp, st->cache_file, ".lck", NULL);
+ apr_file_remove(st->cache_file, ptemp);
+ apr_file_remove(lck_file, ptemp);
+ }
+#endif
+ return OK;
+ }
+
+#if APR_HAS_SHARED_MEMORY
+ /* initializing cache if shared memory size is not zero and we already don't have shm address */
+ if (!st->cache_shm && st->cache_bytes > 0) {
+#endif
+ result = util_ldap_cache_init(p, st);
+ if (result != APR_SUCCESS) {
+ apr_strerror(result, buf, sizeof(buf));
+ ap_log_error(APLOG_MARK, APLOG_ERR, result, s,
+ "LDAP cache: error while creating a shared memory segment: %s", buf);
+ }
+
+
+#if APR_HAS_SHARED_MEMORY
+ if (st->cache_file) {
+ st->lock_file = apr_pstrcat (st->pool, st->cache_file, ".lck", NULL);
+ }
+ else
+#endif
+ st->lock_file = ap_server_root_relative(st->pool, tmpnam(NULL));
+
+ result = apr_global_mutex_create(&st->util_ldap_cache_lock, st->lock_file, APR_LOCK_DEFAULT, st->pool);
+ if (result != APR_SUCCESS) {
+ return result;
+ }
+
+#ifdef UTIL_LDAP_SET_MUTEX_PERMS
+ result = unixd_set_global_mutex_perms(st->util_ldap_cache_lock);
+ if (result != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, result, s,
+ "LDAP cache: failed to set mutex permissions");
+ return result;
+ }
+#endif
+
+ /* merge config in all vhost */
+ s_vhost = s->next;
+ while (s_vhost) {
+ st_vhost = (util_ldap_state_t *)ap_get_module_config(s_vhost->module_config, &ldap_module);
+
+#if APR_HAS_SHARED_MEMORY
+ st_vhost->cache_shm = st->cache_shm;
+ st_vhost->cache_rmm = st->cache_rmm;
+ st_vhost->cache_file = st->cache_file;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, result, s,
+ "LDAP merging Shared Cache conf: shm=0x%pp rmm=0x%pp for VHOST: %s",
+ st->cache_shm, st->cache_rmm, s_vhost->server_hostname);
+#endif
+ st_vhost->lock_file = st->lock_file;
+ s_vhost = s_vhost->next;
+ }
+#if APR_HAS_SHARED_MEMORY
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "LDAP cache: LDAPSharedCacheSize is zero, disabling shared memory cache");
+ }
+#endif
+
+ /* log the LDAP SDK used
+ */
+ #if APR_HAS_NETSCAPE_LDAPSDK
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: Built with Netscape LDAP SDK" );
+
+ #elif APR_HAS_NOVELL_LDAPSDK
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: Built with Novell LDAP SDK" );
+
+ #elif APR_HAS_OPENLDAP_LDAPSDK
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: Built with OpenLDAP LDAP SDK" );
+
+ #elif APR_HAS_MICROSOFT_LDAPSDK
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: Built with Microsoft LDAP SDK" );
+ #else
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: Built with unknown LDAP SDK" );
+
+ #endif /* APR_HAS_NETSCAPE_LDAPSDK */
+
+
+
+ apr_pool_cleanup_register(p, s, util_ldap_cleanup_module,
+ util_ldap_cleanup_module);
+
+ /* initialize SSL support if requested
+ */
+ if (st->cert_auth_file)
+ {
+ #if APR_HAS_LDAP_SSL /* compiled with ssl support */
+
+ #if APR_HAS_NETSCAPE_LDAPSDK
+
+ /* Netscape sdk only supports a cert7.db file
+ */
+ if (st->cert_file_type == LDAP_CA_TYPE_CERT7_DB)
+ {
+ rc = ldapssl_client_init(st->cert_auth_file, NULL);
+ }
+ else
+ {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "LDAP: Invalid LDAPTrustedCAType directive - "
+ "CERT7_DB_PATH type required");
+ rc = -1;
+ }
+
+ #elif APR_HAS_NOVELL_LDAPSDK
+
+ /* Novell SDK supports DER or BASE64 files
+ */
+ if (st->cert_file_type == LDAP_CA_TYPE_DER ||
+ st->cert_file_type == LDAP_CA_TYPE_BASE64 )
+ {
+ rc = ldapssl_client_init(NULL, NULL);
+ if (LDAP_SUCCESS == rc)
+ {
+ if (st->cert_file_type == LDAP_CA_TYPE_BASE64)
+ rc = ldapssl_add_trusted_cert(st->cert_auth_file,
+ LDAPSSL_CERT_FILETYPE_B64);
+ else
+ rc = ldapssl_add_trusted_cert(st->cert_auth_file,
+ LDAPSSL_CERT_FILETYPE_DER);
+
+ if (LDAP_SUCCESS != rc)
+ ldapssl_client_deinit();
+ }
+ }
+ else
+ {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "LDAP: Invalid LDAPTrustedCAType directive - "
+ "DER_FILE or BASE64_FILE type required");
+ rc = -1;
+ }
+
+ #elif APR_HAS_OPENLDAP_LDAPSDK
+
+ /* OpenLDAP SDK supports BASE64 files
+ */
+ if (st->cert_file_type == LDAP_CA_TYPE_BASE64)
+ {
+ rc = ldap_set_option(NULL, LDAP_OPT_X_TLS_CACERTFILE, st->cert_auth_file);
+ }
+ else
+ {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "LDAP: Invalid LDAPTrustedCAType directive - "
+ "BASE64_FILE type required");
+ rc = -1;
+ }
+
+
+ #elif APR_HAS_MICROSOFT_LDAPSDK
+
+ /* Microsoft SDK use the registry certificate store - always
+ * assume support is always available
+ */
+ rc = LDAP_SUCCESS;
+
+ #else
+ rc = -1;
+ #endif /* APR_HAS_NETSCAPE_LDAPSDK */
+
+ #else /* not compiled with SSL Support */
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: Not built with SSL support." );
+ rc = -1;
+
+ #endif /* APR_HAS_LDAP_SSL */
+
+ if (LDAP_SUCCESS == rc)
+ {
+ st->ssl_support = 1;
+ }
+ else
+ {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "LDAP: SSL initialization failed");
+ st->ssl_support = 0;
+ }
+ }
+
+ /* The Microsoft SDK uses the registry certificate store -
+ * always assume support is available
+ */
+ #if APR_HAS_MICROSOFT_LDAPSDK
+ st->ssl_support = 1;
+ #endif
+
+
+ /* log SSL status - If SSL isn't available it isn't necessarily
+ * an error because the modules asking for LDAP connections
+ * may not ask for SSL support
+ */
+ if (st->ssl_support)
+ {
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: SSL support available" );
+ }
+ else
+ {
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "LDAP: SSL support unavailable" );
+ }
+
+ return(OK);
+}
+
+static void util_ldap_child_init(apr_pool_t *p, server_rec *s)
+{
+ apr_status_t sts;
+ util_ldap_state_t *st = ap_get_module_config(s->module_config, &ldap_module);
+
+ if (!st->util_ldap_cache_lock) return;
+
+ sts = apr_global_mutex_child_init(&st->util_ldap_cache_lock, st->lock_file, p);
+ if (sts != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, sts, s,
+ "Failed to initialise global mutex %s in child process %"
+ APR_PID_T_FMT
+ ".",
+ st->lock_file, getpid());
+ return;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, s,
+ "Initialisation of global mutex %s in child process %"
+ APR_PID_T_FMT
+ " successful.",
+ st->lock_file, getpid());
+ }
+}
+
+command_rec util_ldap_cmds[] = {
+ AP_INIT_TAKE1("LDAPSharedCacheSize", util_ldap_set_cache_bytes, NULL, RSRC_CONF,
+ "Sets the size of the shared memory cache in bytes. "
+ "Zero means disable the shared memory cache. Defaults to 100KB."),
+
+ AP_INIT_TAKE1("LDAPSharedCacheFile", util_ldap_set_cache_file, NULL, RSRC_CONF,
+ "Sets the file of the shared memory cache."
+ "Nothing means disable the shared memory cache."),
+
+ AP_INIT_TAKE1("LDAPCacheEntries", util_ldap_set_cache_entries, NULL, RSRC_CONF,
+ "Sets the maximum number of entries that are possible in the LDAP "
+ "search cache. "
+ "Zero means no limit; -1 disables the cache. Defaults to 1024 entries."),
+
+ AP_INIT_TAKE1("LDAPCacheTTL", util_ldap_set_cache_ttl, NULL, RSRC_CONF,
+ "Sets the maximum time (in seconds) that an item can be cached in the LDAP "
+ "search cache. Zero means no limit. Defaults to 600 seconds (10 minutes)."),
+
+ AP_INIT_TAKE1("LDAPOpCacheEntries", util_ldap_set_opcache_entries, NULL, RSRC_CONF,
+ "Sets the maximum number of entries that are possible in the LDAP "
+ "compare cache. "
+ "Zero means no limit; -1 disables the cache. Defaults to 1024 entries."),
+
+ AP_INIT_TAKE1("LDAPOpCacheTTL", util_ldap_set_opcache_ttl, NULL, RSRC_CONF,
+ "Sets the maximum time (in seconds) that an item is cached in the LDAP "
+ "operation cache. Zero means no limit. Defaults to 600 seconds (10 minutes)."),
+
+ AP_INIT_TAKE1("LDAPTrustedCA", util_ldap_set_cert_auth, NULL, RSRC_CONF,
+ "Sets the file containing the trusted Certificate Authority certificate. "
+ "Used to validate the LDAP server certificate for SSL connections."),
+
+ AP_INIT_TAKE1("LDAPTrustedCAType", util_ldap_set_cert_type, NULL, RSRC_CONF,
+ "Specifies the type of the Certificate Authority file. "
+ "The following types are supported: "
+ " DER_FILE - file in binary DER format "
+ " BASE64_FILE - file in Base64 format "
+ " CERT7_DB_PATH - Netscape certificate database file "),
+
+ AP_INIT_TAKE1("LDAPConnectionTimeout", util_ldap_set_connection_timeout, NULL, RSRC_CONF,
+ "Specifies the LDAP socket connection timeout in seconds. "
+ "Default is 10 seconds. "),
+
+ {NULL}
+};
+
+static void util_ldap_register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(util_ldap_post_config,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_handler(util_ldap_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_child_init(util_ldap_child_init, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module ldap_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ util_ldap_create_config, /* server config */
+ NULL, /* merge server config */
+ util_ldap_cmds, /* command table */
+ util_ldap_register_hooks, /* set up request processing hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.def b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.def
new file mode 100644
index 00000000..f3ca3264
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.def
@@ -0,0 +1,7 @@
+EXPORT ldap_module
+EXPORT util_ldap_connection_find
+EXPORT util_ldap_connection_close
+EXPORT util_ldap_cache_checkuserid
+EXPORT util_ldap_cache_getuserdn
+EXPORT util_ldap_cache_compare
+EXPORT util_ldap_cache_comparedn
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.dsp b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.dsp
new file mode 100644
index 00000000..ad33d824
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap.dsp
@@ -0,0 +1,140 @@
+# Microsoft Developer Studio Project File - Name="util_ldap" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=util_ldap - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "util_ldap.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "util_ldap.mak" CFG="util_ldap - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "util_ldap - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "util_ldap - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "util_ldap - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "LDAP_DECLARE_EXPORT" /Fd"Release\util_ldap_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/util_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,util_ldap.so
+# ADD LINK32 kernel32.lib wldap32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/util_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,util_ldap.so /opt:ref
+
+!ELSEIF "$(CFG)" == "util_ldap - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "LDAP_DECLARE_EXPORT" /Fd"Debug\util_ldap_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/util_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,util_ldap.so
+# ADD LINK32 kernel32.lib wldap32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/util_ldap.so" /base:@..\..\os\win32\BaseAddr.ref,util_ldap.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "util_ldap - Win32 Release"
+# Name "util_ldap - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\util_ldap.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\util_ldap.rc
+# End Source File
+# Begin Source File
+
+SOURCE=.\util_ldap_cache.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\util_ldap_cache.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\util_ldap_cache_mgr.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "util_ldap - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\util_ldap.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk util_ldap.so "LDAP Utility Module for Apache" ../../include/ap_release.h > .\util_ldap.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "util_ldap - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\util_ldap.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk util_ldap.so "LDAP Utility Module for Apache" ../../include/ap_release.h > .\util_ldap.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.c b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.c
new file mode 100644
index 00000000..8f6062bb
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.c
@@ -0,0 +1,450 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * util_ldap_cache.c: LDAP cache things
+ *
+ * Original code from auth_ldap module for Apache v1.3:
+ * Copyright 1998, 1999 Enbridge Pipelines Inc.
+ * Copyright 1999-2001 Dave Carrigan
+ */
+
+#include <apr_ldap.h>
+#include <apr_strings.h>
+#include "util_ldap.h"
+#include "util_ldap_cache.h"
+
+#ifdef APU_HAS_LDAP
+
+#if APR_HAS_SHARED_MEMORY
+#define MODLDAP_SHMEM_CACHE "/tmp/mod_ldap_cache"
+#endif
+
+/* ------------------------------------------------------------------ */
+
+unsigned long util_ldap_url_node_hash(void *n)
+{
+ util_url_node_t *node = (util_url_node_t *)n;
+ return util_ald_hash_string(1, node->url);
+}
+
+int util_ldap_url_node_compare(void *a, void *b)
+{
+ util_url_node_t *na = (util_url_node_t *)a;
+ util_url_node_t *nb = (util_url_node_t *)b;
+
+ return(strcmp(na->url, nb->url) == 0);
+}
+
+void *util_ldap_url_node_copy(util_ald_cache_t *cache, void *c)
+{
+ util_url_node_t *n = (util_url_node_t *)c;
+ util_url_node_t *node = (util_url_node_t *)util_ald_alloc(cache, sizeof(util_url_node_t));
+
+ if (node) {
+ if (!(node->url = util_ald_strdup(cache, n->url))) {
+ util_ald_free(cache, node->url);
+ return NULL;
+ }
+ node->search_cache = n->search_cache;
+ node->compare_cache = n->compare_cache;
+ node->dn_compare_cache = n->dn_compare_cache;
+ return node;
+ }
+ else {
+ return NULL;
+ }
+}
+
+void util_ldap_url_node_free(util_ald_cache_t *cache, void *n)
+{
+ util_url_node_t *node = (util_url_node_t *)n;
+
+ util_ald_free(cache, node->url);
+ util_ald_destroy_cache(node->search_cache);
+ util_ald_destroy_cache(node->compare_cache);
+ util_ald_destroy_cache(node->dn_compare_cache);
+ util_ald_free(cache, node);
+}
+
+void util_ldap_url_node_display(request_rec *r, util_ald_cache_t *cache, void *n)
+{
+ util_url_node_t *node = (util_url_node_t *)n;
+ char date_str[APR_CTIME_LEN+1];
+ char *buf;
+ const char *type_str;
+ util_ald_cache_t *cache_node;
+ int x;
+
+ for (x=0;x<3;x++) {
+ switch (x) {
+ case 0:
+ cache_node = node->search_cache;
+ type_str = "Searches";
+ break;
+ case 1:
+ cache_node = node->compare_cache;
+ type_str = "Compares";
+ break;
+ case 2:
+ cache_node = node->dn_compare_cache;
+ type_str = "DN Compares";
+ break;
+ }
+
+ if (cache_node->marktime) {
+ apr_ctime(date_str, cache_node->marktime);
+ }
+ else
+ date_str[0] = 0;
+
+ buf = apr_psprintf(r->pool,
+ "<tr valign='top'>"
+ "<td nowrap>%s (%s)</td>"
+ "<td nowrap>%ld</td>"
+ "<td nowrap>%ld</td>"
+ "<td nowrap>%ld</td>"
+ "<td nowrap>%ld</td>"
+ "<td nowrap>%s</td>"
+ "<tr>",
+ node->url,
+ type_str,
+ cache_node->size,
+ cache_node->maxentries,
+ cache_node->numentries,
+ cache_node->fullmark,
+ date_str);
+
+ ap_rputs(buf, r);
+ }
+
+}
+
+/* ------------------------------------------------------------------ */
+
+/* Cache functions for search nodes */
+unsigned long util_ldap_search_node_hash(void *n)
+{
+ util_search_node_t *node = (util_search_node_t *)n;
+ return util_ald_hash_string(1, ((util_search_node_t *)(node))->username);
+}
+
+int util_ldap_search_node_compare(void *a, void *b)
+{
+ return(strcmp(((util_search_node_t *)a)->username,
+ ((util_search_node_t *)b)->username) == 0);
+}
+
+void *util_ldap_search_node_copy(util_ald_cache_t *cache, void *c)
+{
+ util_search_node_t *node = (util_search_node_t *)c;
+ util_search_node_t *newnode = util_ald_alloc(cache, sizeof(util_search_node_t));
+
+ /* safety check */
+ if (newnode) {
+
+ /* copy vals */
+ if (node->vals) {
+ int k = node->numvals;
+ int i = 0;
+ if (!(newnode->vals = util_ald_alloc(cache, sizeof(char *) * (k+1)))) {
+ util_ldap_search_node_free(cache, newnode);
+ return NULL;
+ }
+ newnode->numvals = node->numvals;
+ for (;k;k--) {
+ if (node->vals[i]) {
+ if (!(newnode->vals[i] = util_ald_strdup(cache, node->vals[i]))) {
+ util_ldap_search_node_free(cache, newnode);
+ return NULL;
+ }
+ }
+ else
+ newnode->vals[i] = NULL;
+ i++;
+ }
+ }
+ else {
+ newnode->vals = NULL;
+ }
+ if (!(newnode->username = util_ald_strdup(cache, node->username)) ||
+ !(newnode->dn = util_ald_strdup(cache, node->dn)) ) {
+ util_ldap_search_node_free(cache, newnode);
+ return NULL;
+ }
+ if(node->bindpw) {
+ if(!(newnode->bindpw = util_ald_strdup(cache, node->bindpw))) {
+ util_ldap_search_node_free(cache, newnode);
+ return NULL;
+ }
+ } else {
+ newnode->bindpw = NULL;
+ }
+ newnode->lastbind = node->lastbind;
+
+ }
+ return (void *)newnode;
+}
+
+void util_ldap_search_node_free(util_ald_cache_t *cache, void *n)
+{
+ int i = 0;
+ util_search_node_t *node = (util_search_node_t *)n;
+ int k = node->numvals;
+
+ if (node->vals) {
+ for (;k;k--,i++) {
+ if (node->vals[i]) {
+ util_ald_free(cache, node->vals[i]);
+ }
+ }
+ util_ald_free(cache, node->vals);
+ }
+ util_ald_free(cache, node->username);
+ util_ald_free(cache, node->dn);
+ util_ald_free(cache, node->bindpw);
+ util_ald_free(cache, node);
+}
+
+void util_ldap_search_node_display(request_rec *r, util_ald_cache_t *cache, void *n)
+{
+ util_search_node_t *node = (util_search_node_t *)n;
+ char date_str[APR_CTIME_LEN+1];
+ char *buf;
+
+ apr_ctime(date_str, node->lastbind);
+
+ buf = apr_psprintf(r->pool,
+ "<tr valign='top'>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<tr>",
+ node->username,
+ node->dn,
+ date_str);
+
+ ap_rputs(buf, r);
+}
+
+/* ------------------------------------------------------------------ */
+
+unsigned long util_ldap_compare_node_hash(void *n)
+{
+ util_compare_node_t *node = (util_compare_node_t *)n;
+ return util_ald_hash_string(3, node->dn, node->attrib, node->value);
+}
+
+int util_ldap_compare_node_compare(void *a, void *b)
+{
+ util_compare_node_t *na = (util_compare_node_t *)a;
+ util_compare_node_t *nb = (util_compare_node_t *)b;
+ return (strcmp(na->dn, nb->dn) == 0 &&
+ strcmp(na->attrib, nb->attrib) == 0 &&
+ strcmp(na->value, nb->value) == 0);
+}
+
+void *util_ldap_compare_node_copy(util_ald_cache_t *cache, void *c)
+{
+ util_compare_node_t *n = (util_compare_node_t *)c;
+ util_compare_node_t *node = (util_compare_node_t *)util_ald_alloc(cache, sizeof(util_compare_node_t));
+
+ if (node) {
+ if (!(node->dn = util_ald_strdup(cache, n->dn)) ||
+ !(node->attrib = util_ald_strdup(cache, n->attrib)) ||
+ !(node->value = util_ald_strdup(cache, n->value))) {
+ util_ldap_compare_node_free(cache, node);
+ return NULL;
+ }
+ node->lastcompare = n->lastcompare;
+ node->result = n->result;
+ return node;
+ }
+ else {
+ return NULL;
+ }
+}
+
+void util_ldap_compare_node_free(util_ald_cache_t *cache, void *n)
+{
+ util_compare_node_t *node = (util_compare_node_t *)n;
+ util_ald_free(cache, node->dn);
+ util_ald_free(cache, node->attrib);
+ util_ald_free(cache, node->value);
+ util_ald_free(cache, node);
+}
+
+void util_ldap_compare_node_display(request_rec *r, util_ald_cache_t *cache, void *n)
+{
+ util_compare_node_t *node = (util_compare_node_t *)n;
+ char date_str[APR_CTIME_LEN+1];
+ char *buf, *cmp_result;
+
+ apr_ctime(date_str, node->lastcompare);
+
+ if (node->result == LDAP_COMPARE_TRUE) {
+ cmp_result = "LDAP_COMPARE_TRUE";
+ }
+ else if (node->result == LDAP_COMPARE_FALSE) {
+ cmp_result = "LDAP_COMPARE_FALSE";
+ }
+ else {
+ cmp_result = apr_itoa(r->pool, node->result);
+ }
+
+ buf = apr_psprintf(r->pool,
+ "<tr valign='top'>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<tr>",
+ node->dn,
+ node->attrib,
+ node->value,
+ date_str,
+ cmp_result);
+
+ ap_rputs(buf, r);
+}
+
+/* ------------------------------------------------------------------ */
+
+unsigned long util_ldap_dn_compare_node_hash(void *n)
+{
+ return util_ald_hash_string(1, ((util_dn_compare_node_t *)n)->reqdn);
+}
+
+int util_ldap_dn_compare_node_compare(void *a, void *b)
+{
+ return (strcmp(((util_dn_compare_node_t *)a)->reqdn,
+ ((util_dn_compare_node_t *)b)->reqdn) == 0);
+}
+
+void *util_ldap_dn_compare_node_copy(util_ald_cache_t *cache, void *c)
+{
+ util_dn_compare_node_t *n = (util_dn_compare_node_t *)c;
+ util_dn_compare_node_t *node = (util_dn_compare_node_t *)util_ald_alloc(cache, sizeof(util_dn_compare_node_t));
+ if (node) {
+ if (!(node->reqdn = util_ald_strdup(cache, n->reqdn)) ||
+ !(node->dn = util_ald_strdup(cache, n->dn))) {
+ util_ldap_dn_compare_node_free(cache, node);
+ return NULL;
+ }
+ return node;
+ }
+ else {
+ return NULL;
+ }
+}
+
+void util_ldap_dn_compare_node_free(util_ald_cache_t *cache, void *n)
+{
+ util_dn_compare_node_t *node = (util_dn_compare_node_t *)n;
+ util_ald_free(cache, node->reqdn);
+ util_ald_free(cache, node->dn);
+ util_ald_free(cache, node);
+}
+
+void util_ldap_dn_compare_node_display(request_rec *r, util_ald_cache_t *cache, void *n)
+{
+ util_dn_compare_node_t *node = (util_dn_compare_node_t *)n;
+ char *buf;
+
+ buf = apr_psprintf(r->pool,
+ "<tr valign='top'>"
+ "<td nowrap>%s</td>"
+ "<td nowrap>%s</td>"
+ "<tr>",
+ node->reqdn,
+ node->dn);
+
+ ap_rputs(buf, r);
+}
+
+
+/* ------------------------------------------------------------------ */
+apr_status_t util_ldap_cache_child_kill(void *data);
+apr_status_t util_ldap_cache_module_kill(void *data);
+
+apr_status_t util_ldap_cache_module_kill(void *data)
+{
+ util_ldap_state_t *st = (util_ldap_state_t *)data;
+
+ util_ald_destroy_cache(st->util_ldap_cache);
+#if APR_HAS_SHARED_MEMORY
+ if (st->cache_rmm != NULL) {
+ apr_rmm_destroy (st->cache_rmm);
+ st->cache_rmm = NULL;
+ }
+ if (st->cache_shm != NULL) {
+ apr_status_t result = apr_shm_destroy(st->cache_shm);
+ st->cache_shm = NULL;
+ apr_file_remove(st->cache_file, st->pool);
+ return result;
+ }
+#endif
+ return APR_SUCCESS;
+}
+
+apr_status_t util_ldap_cache_init(apr_pool_t *pool, util_ldap_state_t *st)
+{
+#if APR_HAS_SHARED_MEMORY
+ apr_status_t result;
+ apr_size_t size;
+
+ size = APR_ALIGN_DEFAULT(st->cache_bytes);
+
+ result = apr_shm_create(&st->cache_shm, size, st->cache_file, st->pool);
+ if (result == APR_EEXIST) {
+ /*
+ * The cache could have already been created (i.e. we may be a child process). See
+ * if we can attach to the existing shared memory
+ */
+ result = apr_shm_attach(&st->cache_shm, st->cache_file, st->pool);
+ }
+ if (result != APR_SUCCESS) {
+ return result;
+ }
+
+ /* Determine the usable size of the shm segment. */
+ size = apr_shm_size_get(st->cache_shm);
+
+ /* This will create a rmm "handler" to get into the shared memory area */
+ result = apr_rmm_init(&st->cache_rmm, NULL,
+ apr_shm_baseaddr_get(st->cache_shm), size,
+ st->pool);
+ if (result != APR_SUCCESS) {
+ return result;
+ }
+
+#endif
+
+ apr_pool_cleanup_register(st->pool, st , util_ldap_cache_module_kill, apr_pool_cleanup_null);
+
+ st->util_ldap_cache =
+ util_ald_create_cache(st,
+ util_ldap_url_node_hash,
+ util_ldap_url_node_compare,
+ util_ldap_url_node_copy,
+ util_ldap_url_node_free,
+ util_ldap_url_node_display);
+ return APR_SUCCESS;
+}
+
+
+#endif /* APU_HAS_LDAP */
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.h b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.h
new file mode 100644
index 00000000..2c1c09c1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache.h
@@ -0,0 +1,193 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef APU_LDAP_CACHE_H
+#define APU_LDAP_CACHE_H
+
+/*
+ * This switches LDAP support on or off.
+ */
+
+/* this whole thing disappears if LDAP is not enabled */
+#ifdef APU_HAS_LDAP
+
+
+/*
+ * LDAP Cache Manager
+ */
+
+#if APR_HAS_SHARED_MEMORY
+#include <apr_shm.h>
+#include <apr_rmm.h> /* EDD */
+#endif
+
+typedef struct util_cache_node_t {
+ void *payload; /* Pointer to the payload */
+ apr_time_t add_time; /* Time node was added to cache */
+ struct util_cache_node_t *next;
+} util_cache_node_t;
+
+typedef struct util_ald_cache util_ald_cache_t;
+
+struct util_ald_cache {
+ unsigned long size; /* Size of cache array */
+ unsigned long maxentries; /* Maximum number of cache entries */
+ unsigned long numentries; /* Current number of cache entries */
+ unsigned long fullmark; /* Used to keep track of when cache becomes 3/4 full */
+ apr_time_t marktime; /* Time that the cache became 3/4 full */
+ unsigned long (*hash)(void *); /* Func to hash the payload */
+ int (*compare)(void *, void *); /* Func to compare two payloads */
+ void * (*copy)(util_ald_cache_t *cache, void *); /* Func to alloc mem and copy payload to new mem */
+ void (*free)(util_ald_cache_t *cache, void *); /* Func to free mem used by the payload */
+ void (*display)(request_rec *r, util_ald_cache_t *cache, void *); /* Func to display the payload contents */
+ util_cache_node_t **nodes;
+
+ unsigned long numpurges; /* No. of times the cache has been purged */
+ double avg_purgetime; /* Average time to purge the cache */
+ apr_time_t last_purge; /* Time of the last purge */
+ unsigned long npurged; /* Number of elements purged in last purge. This is not
+ obvious: it won't be 3/4 the size of the cache if
+ there were a lot of expired entries. */
+
+ unsigned long fetches; /* Number of fetches */
+ unsigned long hits; /* Number of cache hits */
+ unsigned long inserts; /* Number of inserts */
+ unsigned long removes; /* Number of removes */
+
+#if APR_HAS_SHARED_MEMORY
+ apr_shm_t *shm_addr;
+ apr_rmm_t *rmm_addr;
+#endif
+
+};
+
+#ifndef WIN32
+#define ALD_MM_FILE_MODE ( S_IRUSR|S_IWUSR )
+#else
+#define ALD_MM_FILE_MODE ( _S_IREAD|_S_IWRITE )
+#endif
+
+
+/*
+ * LDAP Cache
+ */
+
+/*
+ * Maintain a cache of LDAP URLs that the server handles. Each node in
+ * the cache contains the search cache for that URL, and a compare cache
+ * for the URL. The compare cash is populated when doing require group
+ * compares.
+ */
+typedef struct util_url_node_t {
+ const char *url;
+ util_ald_cache_t *search_cache;
+ util_ald_cache_t *compare_cache;
+ util_ald_cache_t *dn_compare_cache;
+} util_url_node_t;
+
+/*
+ * We cache every successful search and bind operation, using the username
+ * as the key. Each node in the cache contains the returned DN, plus the
+ * password used to bind.
+ */
+typedef struct util_search_node_t {
+ const char *username; /* Cache key */
+ const char *dn; /* DN returned from search */
+ const char *bindpw; /* The most recently used bind password;
+ NULL if the bind failed */
+ apr_time_t lastbind; /* Time of last successful bind */
+ const char **vals; /* Values of queried attributes */
+ int numvals; /* Number of queried attributes */
+} util_search_node_t;
+
+/*
+ * We cache every successful compare operation, using the DN, attrib, and
+ * value as the key.
+ */
+typedef struct util_compare_node_t {
+ const char *dn; /* DN, attrib and value combine to be the key */
+ const char *attrib;
+ const char *value;
+ apr_time_t lastcompare;
+ int result;
+} util_compare_node_t;
+
+/*
+ * We cache every successful compare dn operation, using the dn in the require
+ * statement and the dn fetched based on the client-provided username.
+ */
+typedef struct util_dn_compare_node_t {
+ const char *reqdn; /* The DN in the require dn statement */
+ const char *dn; /* The DN found in the search */
+} util_dn_compare_node_t;
+
+
+/*
+ * Function prototypes for LDAP cache
+ */
+
+/* util_ldap_cache.c */
+unsigned long util_ldap_url_node_hash(void *n);
+int util_ldap_url_node_compare(void *a, void *b);
+void *util_ldap_url_node_copy(util_ald_cache_t *cache, void *c);
+void util_ldap_url_node_free(util_ald_cache_t *cache, void *n);
+void util_ldap_url_node_display(request_rec *r, util_ald_cache_t *cache, void *n);
+
+unsigned long util_ldap_search_node_hash(void *n);
+int util_ldap_search_node_compare(void *a, void *b);
+void *util_ldap_search_node_copy(util_ald_cache_t *cache, void *c);
+void util_ldap_search_node_free(util_ald_cache_t *cache, void *n);
+void util_ldap_search_node_display(request_rec *r, util_ald_cache_t *cache, void *n);
+
+unsigned long util_ldap_compare_node_hash(void *n);
+int util_ldap_compare_node_compare(void *a, void *b);
+void *util_ldap_compare_node_copy(util_ald_cache_t *cache, void *c);
+void util_ldap_compare_node_free(util_ald_cache_t *cache, void *n);
+void util_ldap_compare_node_display(request_rec *r, util_ald_cache_t *cache, void *n);
+
+unsigned long util_ldap_dn_compare_node_hash(void *n);
+int util_ldap_dn_compare_node_compare(void *a, void *b);
+void *util_ldap_dn_compare_node_copy(util_ald_cache_t *cache, void *c);
+void util_ldap_dn_compare_node_free(util_ald_cache_t *cache, void *n);
+void util_ldap_dn_compare_node_display(request_rec *r, util_ald_cache_t *cache, void *n);
+
+
+/* util_ldap_cache_mgr.c */
+
+/* Cache alloc and free function, dealing or not with shm */
+void util_ald_free(util_ald_cache_t *cache, const void *ptr);
+void *util_ald_alloc(util_ald_cache_t *cache, unsigned long size);
+const char *util_ald_strdup(util_ald_cache_t *cache, const char *s);
+
+/* Cache managing function */
+unsigned long util_ald_hash_string(int nstr, ...);
+void util_ald_cache_purge(util_ald_cache_t *cache);
+util_url_node_t *util_ald_create_caches(util_ldap_state_t *s, const char *url);
+util_ald_cache_t *util_ald_create_cache(util_ldap_state_t *st,
+ unsigned long (*hashfunc)(void *),
+ int (*comparefunc)(void *, void *),
+ void * (*copyfunc)(util_ald_cache_t *cache, void *),
+ void (*freefunc)(util_ald_cache_t *cache, void *),
+ void (*displayfunc)(request_rec *r, util_ald_cache_t *cache, void *));
+
+void util_ald_destroy_cache(util_ald_cache_t *cache);
+void *util_ald_cache_fetch(util_ald_cache_t *cache, void *payload);
+void *util_ald_cache_insert(util_ald_cache_t *cache, void *payload);
+void util_ald_cache_remove(util_ald_cache_t *cache, void *payload);
+char *util_ald_cache_display_stats(request_rec *r, util_ald_cache_t *cache, char *name, char *id);
+
+#endif /* APU_HAS_LDAP */
+#endif /* APU_LDAP_CACHE_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache_mgr.c b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache_mgr.c
new file mode 100644
index 00000000..178ac185
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/experimental/util_ldap_cache_mgr.c
@@ -0,0 +1,762 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * util_ldap_cache_mgr.c: LDAP cache manager things
+ *
+ * Original code from auth_ldap module for Apache v1.3:
+ * Copyright 1998, 1999 Enbridge Pipelines Inc.
+ * Copyright 1999-2001 Dave Carrigan
+ */
+
+#include <apr_ldap.h>
+#include "util_ldap.h"
+#include "util_ldap_cache.h"
+#include <apr_strings.h>
+
+#ifdef APU_HAS_LDAP
+
+/* only here until strdup is gone */
+#include <string.h>
+
+/* here till malloc is gone */
+#include <stdlib.h>
+
+static const unsigned long primes[] =
+{
+ 11,
+ 19,
+ 37,
+ 73,
+ 109,
+ 163,
+ 251,
+ 367,
+ 557,
+ 823,
+ 1237,
+ 1861,
+ 2777,
+ 4177,
+ 6247,
+ 9371,
+ 14057,
+ 21089,
+ 31627,
+ 47431,
+ 71143,
+ 106721,
+ 160073,
+ 240101,
+ 360163,
+ 540217,
+ 810343,
+ 1215497,
+ 1823231,
+ 2734867,
+ 4102283,
+ 6153409,
+ 9230113,
+ 13845163,
+ 0
+};
+
+void util_ald_free(util_ald_cache_t *cache, const void *ptr)
+{
+#if APR_HAS_SHARED_MEMORY
+ if (cache->rmm_addr) {
+ if (ptr)
+ /* Free in shared memory */
+ apr_rmm_free(cache->rmm_addr, apr_rmm_offset_get(cache->rmm_addr, (void *)ptr));
+ }
+ else {
+ if (ptr)
+ /* Cache shm is not used */
+ free((void *)ptr);
+ }
+#else
+ if (ptr)
+ free((void *)ptr);
+#endif
+}
+
+void *util_ald_alloc(util_ald_cache_t *cache, unsigned long size)
+{
+ if (0 == size)
+ return NULL;
+#if APR_HAS_SHARED_MEMORY
+ if (cache->rmm_addr) {
+ /* allocate from shared memory */
+ apr_rmm_off_t block = apr_rmm_calloc(cache->rmm_addr, size);
+ return block ? (void *)apr_rmm_addr_get(cache->rmm_addr, block) : NULL;
+ }
+ else {
+ /* Cache shm is not used */
+ return (void *)calloc(sizeof(char), size);
+ }
+#else
+ return (void *)calloc(sizeof(char), size);
+#endif
+}
+
+const char *util_ald_strdup(util_ald_cache_t *cache, const char *s)
+{
+#if APR_HAS_SHARED_MEMORY
+ if (cache->rmm_addr) {
+ /* allocate from shared memory */
+ apr_rmm_off_t block = apr_rmm_calloc(cache->rmm_addr, strlen(s)+1);
+ char *buf = block ? (char *)apr_rmm_addr_get(cache->rmm_addr, block) : NULL;
+ if (buf) {
+ strcpy(buf, s);
+ return buf;
+ }
+ else {
+ return NULL;
+ }
+ } else {
+ /* Cache shm is not used */
+ return strdup(s);
+ }
+#else
+ return strdup(s);
+#endif
+}
+
+
+/*
+ * Computes the hash on a set of strings. The first argument is the number
+ * of strings to hash, the rest of the args are strings.
+ * Algorithm taken from glibc.
+ */
+unsigned long util_ald_hash_string(int nstr, ...)
+{
+ int i;
+ va_list args;
+ unsigned long h=0, g;
+ char *str, *p;
+
+ va_start(args, nstr);
+ for (i=0; i < nstr; ++i) {
+ str = va_arg(args, char *);
+ for (p = str; *p; ++p) {
+ h = ( h << 4 ) + *p;
+ if ( ( g = h & 0xf0000000 ) ) {
+ h = h ^ (g >> 24);
+ h = h ^ g;
+ }
+ }
+ }
+ va_end(args);
+
+ return h;
+}
+
+
+/*
+ Purges a cache that has gotten full. We keep track of the time that we
+ added the entry that made the cache 3/4 full, then delete all entries
+ that were added before that time. It's pretty simplistic, but time to
+ purge is only O(n), which is more important.
+*/
+void util_ald_cache_purge(util_ald_cache_t *cache)
+{
+ unsigned long i;
+ util_cache_node_t *p, *q, **pp;
+ apr_time_t t;
+
+ if (!cache)
+ return;
+
+ cache->last_purge = apr_time_now();
+ cache->npurged = 0;
+ cache->numpurges++;
+
+ for (i=0; i < cache->size; ++i) {
+ pp = cache->nodes + i;
+ p = *pp;
+ while (p != NULL) {
+ if (p->add_time < cache->marktime) {
+ q = p->next;
+ (*cache->free)(cache, p->payload);
+ util_ald_free(cache, p);
+ cache->numentries--;
+ cache->npurged++;
+ p = *pp = q;
+ }
+ else {
+ pp = &(p->next);
+ p = *pp;
+ }
+ }
+ }
+
+ t = apr_time_now();
+ cache->avg_purgetime =
+ ((t - cache->last_purge) + (cache->avg_purgetime * (cache->numpurges-1))) /
+ cache->numpurges;
+}
+
+
+/*
+ * create caches
+ */
+util_url_node_t *util_ald_create_caches(util_ldap_state_t *st, const char *url)
+{
+ util_url_node_t curl, *newcurl = NULL;
+ util_ald_cache_t *search_cache;
+ util_ald_cache_t *compare_cache;
+ util_ald_cache_t *dn_compare_cache;
+
+ /* create the three caches */
+ search_cache = util_ald_create_cache(st,
+ util_ldap_search_node_hash,
+ util_ldap_search_node_compare,
+ util_ldap_search_node_copy,
+ util_ldap_search_node_free,
+ util_ldap_search_node_display);
+ compare_cache = util_ald_create_cache(st,
+ util_ldap_compare_node_hash,
+ util_ldap_compare_node_compare,
+ util_ldap_compare_node_copy,
+ util_ldap_compare_node_free,
+ util_ldap_compare_node_display);
+ dn_compare_cache = util_ald_create_cache(st,
+ util_ldap_dn_compare_node_hash,
+ util_ldap_dn_compare_node_compare,
+ util_ldap_dn_compare_node_copy,
+ util_ldap_dn_compare_node_free,
+ util_ldap_dn_compare_node_display);
+
+ /* check that all the caches initialised successfully */
+ if (search_cache && compare_cache && dn_compare_cache) {
+
+ /* The contents of this structure will be duplicated in shared
+ memory during the insert. So use stack memory rather than
+ pool memory to avoid a memory leak. */
+ memset (&curl, 0, sizeof(util_url_node_t));
+ curl.url = url;
+ curl.search_cache = search_cache;
+ curl.compare_cache = compare_cache;
+ curl.dn_compare_cache = dn_compare_cache;
+
+ newcurl = util_ald_cache_insert(st->util_ldap_cache, &curl);
+
+ }
+
+ return newcurl;
+}
+
+
+util_ald_cache_t *util_ald_create_cache(util_ldap_state_t *st,
+ unsigned long (*hashfunc)(void *),
+ int (*comparefunc)(void *, void *),
+ void * (*copyfunc)(util_ald_cache_t *cache, void *),
+ void (*freefunc)(util_ald_cache_t *cache, void *),
+ void (*displayfunc)(request_rec *r, util_ald_cache_t *cache, void *))
+{
+ util_ald_cache_t *cache;
+ unsigned long i;
+
+ if (st->search_cache_size <= 0)
+ return NULL;
+
+#if APR_HAS_SHARED_MEMORY
+ if (!st->cache_rmm) {
+ return NULL;
+ }
+ else {
+ apr_rmm_off_t block = apr_rmm_calloc(st->cache_rmm, sizeof(util_ald_cache_t));
+ cache = block ? (util_ald_cache_t *)apr_rmm_addr_get(st->cache_rmm, block) : NULL;
+ }
+#else
+ cache = (util_ald_cache_t *)calloc(sizeof(util_ald_cache_t), 1);
+#endif
+ if (!cache)
+ return NULL;
+
+#if APR_HAS_SHARED_MEMORY
+ cache->rmm_addr = st->cache_rmm;
+ cache->shm_addr = st->cache_shm;
+#endif
+ cache->maxentries = st->search_cache_size;
+ cache->numentries = 0;
+ cache->size = st->search_cache_size / 3;
+ if (cache->size < 64) cache->size = 64;
+ for (i = 0; primes[i] && primes[i] < cache->size; ++i) ;
+ cache->size = primes[i]? primes[i] : primes[i-1];
+
+ cache->nodes = (util_cache_node_t **)util_ald_alloc(cache, cache->size * sizeof(util_cache_node_t *));
+ if (!cache->nodes) {
+ util_ald_free(cache, cache);
+ return NULL;
+ }
+
+ for (i=0; i < cache->size; ++i)
+ cache->nodes[i] = NULL;
+
+ cache->hash = hashfunc;
+ cache->compare = comparefunc;
+ cache->copy = copyfunc;
+ cache->free = freefunc;
+ cache->display = displayfunc;
+
+ cache->fullmark = cache->maxentries / 4 * 3;
+ cache->marktime = 0;
+ cache->avg_purgetime = 0.0;
+ cache->numpurges = 0;
+ cache->last_purge = 0;
+ cache->npurged = 0;
+
+ cache->fetches = 0;
+ cache->hits = 0;
+ cache->inserts = 0;
+ cache->removes = 0;
+
+ return cache;
+}
+
+void util_ald_destroy_cache(util_ald_cache_t *cache)
+{
+ unsigned long i;
+ util_cache_node_t *p, *q;
+
+ if (cache == NULL)
+ return;
+
+ for (i = 0; i < cache->size; ++i) {
+ p = cache->nodes[i];
+ q = NULL;
+ while (p != NULL) {
+ q = p->next;
+ (*cache->free)(cache, p->payload);
+ util_ald_free(cache, p);
+ p = q;
+ }
+ }
+ util_ald_free(cache, cache->nodes);
+ util_ald_free(cache, cache);
+}
+
+void *util_ald_cache_fetch(util_ald_cache_t *cache, void *payload)
+{
+ int hashval;
+ util_cache_node_t *p;
+
+ if (cache == NULL)
+ return NULL;
+
+ cache->fetches++;
+
+ hashval = (*cache->hash)(payload) % cache->size;
+ for (p = cache->nodes[hashval];
+ p && !(*cache->compare)(p->payload, payload);
+ p = p->next) ;
+
+ if (p != NULL) {
+ cache->hits++;
+ return p->payload;
+ }
+ else {
+ return NULL;
+ }
+}
+
+/*
+ * Insert an item into the cache.
+ * *** Does not catch duplicates!!! ***
+ */
+void *util_ald_cache_insert(util_ald_cache_t *cache, void *payload)
+{
+ int hashval;
+ util_cache_node_t *node;
+
+ /* sanity check */
+ if (cache == NULL || payload == NULL) {
+ return NULL;
+ }
+
+ /* check if we are full - if so, try purge */
+ if (cache->numentries >= cache->maxentries) {
+ util_ald_cache_purge(cache);
+ if (cache->numentries >= cache->maxentries) {
+ /* if the purge was not effective, we leave now to avoid an overflow */
+ return NULL;
+ }
+ }
+
+ /* should be safe to add an entry */
+ if ((node = (util_cache_node_t *)util_ald_alloc(cache, sizeof(util_cache_node_t))) == NULL) {
+ return NULL;
+ }
+
+ /* Take a copy of the payload before proceeeding. */
+ payload = (*cache->copy)(cache, payload);
+ if (!payload) {
+ util_ald_free(cache, node);
+ return NULL;
+ }
+
+ /* populate the entry */
+ cache->inserts++;
+ hashval = (*cache->hash)(payload) % cache->size;
+ node->add_time = apr_time_now();
+ node->payload = payload;
+ node->next = cache->nodes[hashval];
+ cache->nodes[hashval] = node;
+
+ /* if we reach the full mark, note the time we did so
+ * for the benefit of the purge function
+ */
+ if (++cache->numentries == cache->fullmark) {
+ cache->marktime=apr_time_now();
+ }
+
+ return node->payload;
+}
+
+void util_ald_cache_remove(util_ald_cache_t *cache, void *payload)
+{
+ int hashval;
+ util_cache_node_t *p, *q;
+
+ if (cache == NULL)
+ return;
+
+ cache->removes++;
+ hashval = (*cache->hash)(payload) % cache->size;
+ for (p = cache->nodes[hashval], q=NULL;
+ p && !(*cache->compare)(p->payload, payload);
+ p = p->next) {
+ q = p;
+ }
+
+ /* If p is null, it means that we couldn't find the node, so just return */
+ if (p == NULL)
+ return;
+
+ if (q == NULL) {
+ /* We found the node, and it's the first in the list */
+ cache->nodes[hashval] = p->next;
+ }
+ else {
+ /* We found the node and it's not the first in the list */
+ q->next = p->next;
+ }
+ (*cache->free)(cache, p->payload);
+ util_ald_free(cache, p);
+ cache->numentries--;
+}
+
+char *util_ald_cache_display_stats(request_rec *r, util_ald_cache_t *cache, char *name, char *id)
+{
+ unsigned long i;
+ int totchainlen = 0;
+ int nchains = 0;
+ double chainlen;
+ util_cache_node_t *n;
+ char *buf, *buf2;
+ apr_pool_t *p = r->pool;
+
+ if (cache == NULL) {
+ return "";
+ }
+
+ for (i=0; i < cache->size; ++i) {
+ if (cache->nodes[i] != NULL) {
+ nchains++;
+ for (n = cache->nodes[i];
+ n != NULL && n != n->next;
+ n = n->next) {
+ totchainlen++;
+ }
+ }
+ }
+ chainlen = nchains? (double)totchainlen / (double)nchains : 0;
+
+ if (id) {
+ buf2 = apr_psprintf(p,
+ "<a href=\"%s?%s\">%s</a>",
+ r->uri,
+ id,
+ name);
+ }
+ else {
+ buf2 = name;
+ }
+
+ buf = apr_psprintf(p,
+ "<tr valign='top'>"
+ "<td nowrap>%s</td>"
+ "<td align='right' nowrap>%lu (%.0f%% full)</td>"
+ "<td align='right'>%.1f</td>"
+ "<td align='right'>%lu/%lu</td>"
+ "<td align='right'>%.0f%%</td>"
+ "<td align='right'>%lu/%lu</td>",
+ buf2,
+ cache->numentries,
+ (double)cache->numentries / (double)cache->maxentries * 100.0,
+ chainlen,
+ cache->hits,
+ cache->fetches,
+ (cache->fetches > 0 ? (double)(cache->hits) / (double)(cache->fetches) * 100.0 : 100.0),
+ cache->inserts,
+ cache->removes);
+
+ if (cache->numpurges) {
+ char str_ctime[APR_CTIME_LEN];
+
+ apr_ctime(str_ctime, cache->last_purge);
+ buf = apr_psprintf(p,
+ "%s"
+ "<td align='right'>%lu</td>\n"
+ "<td align='right' nowrap>%s</td>\n",
+ buf,
+ cache->numpurges,
+ str_ctime);
+ }
+ else {
+ buf = apr_psprintf(p,
+ "%s<td colspan='2' align='center'>(none)</td>\n",
+ buf);
+ }
+
+ buf = apr_psprintf(p, "%s<td align='right'>%.2g</td>\n</tr>", buf, cache->avg_purgetime);
+
+ return buf;
+}
+
+char *util_ald_cache_display(request_rec *r, util_ldap_state_t *st)
+{
+ unsigned long i,j;
+ char *buf, *t1, *t2, *t3;
+ char *id1, *id2, *id3;
+ char *argfmt = "cache=%s&id=%d&off=%d";
+ char *scanfmt = "cache=%4s&id=%u&off=%u%1s";
+ apr_pool_t *pool = r->pool;
+ util_cache_node_t *p = NULL;
+ util_url_node_t *n = NULL;
+
+ util_ald_cache_t *util_ldap_cache = st->util_ldap_cache;
+
+
+ if (!util_ldap_cache) {
+ return "<tr valign='top'><td nowrap colspan=7>Cache has not been enabled/initialised.</td></tr>";
+ }
+
+ if (r->args && strlen(r->args)) {
+ char cachetype[5], lint[2];
+ unsigned int id, off;
+ char date_str[APR_CTIME_LEN+1];
+
+ if ((3 == sscanf(r->args, scanfmt, cachetype, &id, &off, lint)) &&
+ (id < util_ldap_cache->size)) {
+
+ if ((p = util_ldap_cache->nodes[id]) != NULL) {
+ n = (util_url_node_t *)p->payload;
+ buf = (char*)n->url;
+ }
+ else {
+ buf = "";
+ }
+
+ ap_rputs(apr_psprintf(r->pool,
+ "<p>\n"
+ "<table border='0'>\n"
+ "<tr>\n"
+ "<td bgcolor='#000000'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Cache Name:</b></font></td>"
+ "<td bgcolor='#ffffff'><font size='-1' face='Arial,Helvetica' color='#000000'><b>%s (%s)</b></font></td>"
+ "</tr>\n"
+ "</table>\n</p>\n",
+ buf,
+ cachetype[0] == 'm'? "Main" :
+ (cachetype[0] == 's' ? "Search" :
+ (cachetype[0] == 'c' ? "Compares" : "DNCompares"))), r);
+
+ switch (cachetype[0]) {
+ case 'm':
+ if (util_ldap_cache->marktime) {
+ apr_ctime(date_str, util_ldap_cache->marktime);
+ }
+ else
+ date_str[0] = 0;
+
+ ap_rputs(apr_psprintf(r->pool,
+ "<p>\n"
+ "<table border='0'>\n"
+ "<tr>\n"
+ "<td bgcolor='#000000'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Size:</b></font></td>"
+ "<td bgcolor='#ffffff'><font size='-1' face='Arial,Helvetica' color='#000000'><b>%ld</b></font></td>"
+ "</tr>\n"
+ "<tr>\n"
+ "<td bgcolor='#000000'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Max Entries:</b></font></td>"
+ "<td bgcolor='#ffffff'><font size='-1' face='Arial,Helvetica' color='#000000'><b>%ld</b></font></td>"
+ "</tr>\n"
+ "<tr>\n"
+ "<td bgcolor='#000000'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b># Entries:</b></font></td>"
+ "<td bgcolor='#ffffff'><font size='-1' face='Arial,Helvetica' color='#000000'><b>%ld</b></font></td>"
+ "</tr>\n"
+ "<tr>\n"
+ "<td bgcolor='#000000'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Full Mark:</b></font></td>"
+ "<td bgcolor='#ffffff'><font size='-1' face='Arial,Helvetica' color='#000000'><b>%ld</b></font></td>"
+ "</tr>\n"
+ "<tr>\n"
+ "<td bgcolor='#000000'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Full Mark Time:</b></font></td>"
+ "<td bgcolor='#ffffff'><font size='-1' face='Arial,Helvetica' color='#000000'><b>%s</b></font></td>"
+ "</tr>\n"
+ "</table>\n</p>\n",
+ util_ldap_cache->size,
+ util_ldap_cache->maxentries,
+ util_ldap_cache->numentries,
+ util_ldap_cache->fullmark,
+ date_str), r);
+
+ ap_rputs("<p>\n"
+ "<table border='0'>\n"
+ "<tr bgcolor='#000000'>\n"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>LDAP URL</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Size</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Max Entries</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b># Entries</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Full Mark</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Full Mark Time</b></font></td>"
+ "</tr>\n", r
+ );
+ for (i=0; i < util_ldap_cache->size; ++i) {
+ for (p = util_ldap_cache->nodes[i]; p != NULL; p = p->next) {
+
+ (*util_ldap_cache->display)(r, util_ldap_cache, p->payload);
+ }
+ }
+ ap_rputs("</table>\n</p>\n", r);
+
+
+ break;
+ case 's':
+ ap_rputs("<p>\n"
+ "<table border='0'>\n"
+ "<tr bgcolor='#000000'>\n"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>LDAP Filter</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>User Name</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Last Bind</b></font></td>"
+ "</tr>\n", r
+ );
+ if (n) {
+ for (i=0; i < n->search_cache->size; ++i) {
+ for (p = n->search_cache->nodes[i]; p != NULL; p = p->next) {
+
+ (*n->search_cache->display)(r, n->search_cache, p->payload);
+ }
+ }
+ }
+ ap_rputs("</table>\n</p>\n", r);
+ break;
+ case 'c':
+ ap_rputs("<p>\n"
+ "<table border='0'>\n"
+ "<tr bgcolor='#000000'>\n"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>DN</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Attribute</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Value</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Last Compare</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Result</b></font></td>"
+ "</tr>\n", r
+ );
+ if (n) {
+ for (i=0; i < n->compare_cache->size; ++i) {
+ for (p = n->compare_cache->nodes[i]; p != NULL; p = p->next) {
+
+ (*n->compare_cache->display)(r, n->compare_cache, p->payload);
+ }
+ }
+ }
+ ap_rputs("</table>\n</p>\n", r);
+ break;
+ case 'd':
+ ap_rputs("<p>\n"
+ "<table border='0'>\n"
+ "<tr bgcolor='#000000'>\n"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Require DN</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Actual DN</b></font></td>"
+ "</tr>\n", r
+ );
+ if (n) {
+ for (i=0; i < n->dn_compare_cache->size; ++i) {
+ for (p = n->dn_compare_cache->nodes[i]; p != NULL; p = p->next) {
+
+ (*n->dn_compare_cache->display)(r, n->dn_compare_cache, p->payload);
+ }
+ }
+ }
+ ap_rputs("</table>\n</p>\n", r);
+ break;
+ default:
+ break;
+ }
+
+ }
+ else {
+ buf = "";
+ }
+ }
+ else {
+ ap_rputs("<p>\n"
+ "<table border='0'>\n"
+ "<tr bgcolor='#000000'>\n"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Cache Name</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Entries</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Avg. Chain Len.</b></font></td>"
+ "<td colspan='2'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Hits</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Ins/Rem</b></font></td>"
+ "<td colspan='2'><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Purges</b></font></td>"
+ "<td><font size='-1' face='Arial,Helvetica' color='#ffffff'><b>Avg Purge Time</b></font></td>"
+ "</tr>\n", r
+ );
+
+
+ id1 = apr_psprintf(pool, argfmt, "main", 0, 0);
+ buf = util_ald_cache_display_stats(r, st->util_ldap_cache, "LDAP URL Cache", id1);
+
+ for (i=0; i < util_ldap_cache->size; ++i) {
+ for (p = util_ldap_cache->nodes[i],j=0; p != NULL; p = p->next,j++) {
+
+ n = (util_url_node_t *)p->payload;
+
+ t1 = apr_psprintf(pool, "%s (Searches)", n->url);
+ t2 = apr_psprintf(pool, "%s (Compares)", n->url);
+ t3 = apr_psprintf(pool, "%s (DNCompares)", n->url);
+ id1 = apr_psprintf(pool, argfmt, "srch", i, j);
+ id2 = apr_psprintf(pool, argfmt, "cmpr", i, j);
+ id3 = apr_psprintf(pool, argfmt, "dncp", i, j);
+
+ buf = apr_psprintf(pool, "%s\n\n"
+ "%s\n\n"
+ "%s\n\n"
+ "%s\n\n",
+ buf,
+ util_ald_cache_display_stats(r, n->search_cache, t1, id1),
+ util_ald_cache_display_stats(r, n->compare_cache, t2, id2),
+ util_ald_cache_display_stats(r, n->dn_compare_cache, t3, id3)
+ );
+ }
+ }
+ ap_rputs(buf, r);
+ ap_rputs("</table>\n</p>\n", r);
+ }
+
+ return buf;
+}
+
+#endif /* APU_HAS_LDAP */
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/.deps b/rubbos/app/httpd-2.0.64/modules/filters/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/.indent.pro b/rubbos/app/httpd-2.0.64/modules/filters/.indent.pro
new file mode 100644
index 00000000..a9fbe9f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/.libs/mod_include.a b/rubbos/app/httpd-2.0.64/modules/filters/.libs/mod_include.a
new file mode 100644
index 00000000..02aa56ff
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/.libs/mod_include.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/.libs/mod_include.la b/rubbos/app/httpd-2.0.64/modules/filters/.libs/mod_include.la
new file mode 100644
index 00000000..602915a6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/.libs/mod_include.la
@@ -0,0 +1,35 @@
+# mod_include.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_include.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_include.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/.libs/mod_include.o b/rubbos/app/httpd-2.0.64/modules/filters/.libs/mod_include.o
new file mode 100644
index 00000000..d08f85f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/.libs/mod_include.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/Makefile b/rubbos/app/httpd-2.0.64/modules/filters/Makefile
new file mode 100644
index 00000000..918a8cbb
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/filters
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/filters
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/filters
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/Makefile.in b/rubbos/app/httpd-2.0.64/modules/filters/Makefile.in
new file mode 100644
index 00000000..167b343d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/Makefile.in
@@ -0,0 +1,3 @@
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/NWGNUdeflate b/rubbos/app/httpd-2.0.64/modules/filters/NWGNUdeflate
new file mode 100644
index 00000000..4d9cbfa3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/NWGNUdeflate
@@ -0,0 +1,278 @@
+#
+# The MOD_DEFLATE module requires the ZLib source which
+# can be downloaded from http://www.gzip.org/zlib/
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = deflate
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Deflate Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Deflate Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/deflate.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_deflate.o \
+ $(OBJDIR)/adler32.o \
+ $(OBJDIR)/crc32.o \
+ $(OBJDIR)/deflate.o \
+ $(OBJDIR)/inflate.o \
+ $(OBJDIR)/inffast.o \
+ $(OBJDIR)/inftrees.o \
+ $(OBJDIR)/trees.o \
+ $(OBJDIR)/zutil.o \
+ $(EOLIST)
+
+ifeq "$(wildcard $(ZLIBSDK)/infblock.c)" "$(ZLIBSDK)/infblock.c"
+FILES_nlm_objs += \
+ $(OBJDIR)/infblock.o \
+ $(OBJDIR)/infcodes.o \
+ $(OBJDIR)/infutil.o \
+ $(EOLIST)
+endif
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ deflate_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+vpath %.c $(ZLIBSDK)
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/NWGNUextfiltr b/rubbos/app/httpd-2.0.64/modules/filters/NWGNUextfiltr
new file mode 100644
index 00000000..8aa5b208
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/NWGNUextfiltr
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = extfiltr
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) External Filter Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = ExtFilter Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/extfiltr.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_ext_filter.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ ext_filter_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/filters/NWGNUmakefile
new file mode 100644
index 00000000..c8509428
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/NWGNUmakefile
@@ -0,0 +1,255 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME =
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION =
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME =
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE =
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/extfiltr.nlm \
+ $(EOLIST)
+
+# If the zlib libraries source exists then build the mod_deflate module
+ifneq "$(ZLIBSDK)" ""
+ifeq "$(wildcard $(ZLIBSDK))" "$(ZLIBSDK)"
+TARGET_nlm += $(OBJDIR)/deflate.nlm \
+ $(EOLIST)
+endif
+else
+TARGET_nlm += $(OBJDIR)/extfiltr.nlm \
+ $(EOLIST)
+endif
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/config.m4 b/rubbos/app/httpd-2.0.64/modules/filters/config.m4
new file mode 100644
index 00000000..726dfe29
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/config.m4
@@ -0,0 +1,65 @@
+dnl modules enabled in this directory by default
+
+dnl APACHE_MODULE(name, helptext[, objects[, structname[, default[, config]]]])
+
+APACHE_MODPATH_INIT(filters)
+
+APACHE_MODULE(ext_filter, external filter module, , , most)
+APACHE_MODULE(include, Server Side Includes, , , yes)
+
+APR_ADDTO(LT_LDFLAGS,-export-dynamic)
+
+APACHE_MODULE(deflate, Deflate transfer encoding support, , , no, [
+ AC_ARG_WITH(z, APACHE_HELP_STRING(--with-z=DIR,use a specific zlib library),
+ [
+ if test "x$withval" != "xyes" && test "x$withval" != "x"; then
+ ap_zlib_base="$withval"
+ fi
+ ])
+ if test "x$ap_zlib_base" = "x"; then
+ AC_MSG_CHECKING([for zlib location])
+ AC_CACHE_VAL(ap_cv_zlib,[
+ for dir in /usr/local /usr ; do
+ if test -d $dir && test -f $dir/include/zlib.h; then
+ ap_cv_zlib=$dir
+ break
+ fi
+ done
+ ])
+ ap_zlib_base=$ap_cv_zlib
+ if test "x$ap_zlib_base" = "x"; then
+ enable_deflate=no
+ AC_MSG_RESULT([not found])
+ else
+ AC_MSG_RESULT([$ap_zlib_base])
+ fi
+ fi
+ if test "$enable_deflate" != "no"; then
+ ap_save_includes=$INCLUDE
+ ap_save_ldflags=$LDFLAGS
+ ap_save_libs=$LIBS
+ ap_save_cppflags=$CPPFLAGS
+ if test "$ap_zlib_base" != "/usr"; then
+ APR_ADDTO(INCLUDES, [-I${ap_zlib_base}/include])
+ dnl put in CPPFLAGS temporarily so that AC_TRY_LINK below will work
+ CPPFLAGS="$CPPFLAGS $INCLUDES"
+ APR_ADDTO(LDFLAGS, [-L${ap_zlib_base}/lib])
+ if test "x$ap_platform_runtime_link_flag" != "x"; then
+ APR_ADDTO(LDFLAGS, [$ap_platform_runtime_link_flag${ap_zlib_base}/lib])
+ fi
+ fi
+ APR_ADDTO(LIBS, [-lz])
+ AC_MSG_CHECKING([for zlib library])
+ AC_TRY_LINK([#include <zlib.h>], [int i = Z_OK;],
+ [AC_MSG_RESULT(found)
+ AC_CHECK_HEADERS(zutil.h)],
+ [AC_MSG_RESULT(not found)
+ enable_deflate=no
+ INCLUDES=$ap_save_includes
+ LDFLAGS=$ap_save_ldflags
+ LIBS=$ap_save_libs])
+ CPPFLAGS=$ap_save_cppflags
+ fi
+])
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_deflate.c b/rubbos/app/httpd-2.0.64/modules/filters/mod_deflate.c
new file mode 100644
index 00000000..610be52d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_deflate.c
@@ -0,0 +1,875 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Portions of this software are based upon public domain software
+ * (zlib functions gz_open and gzwrite)
+ */
+
+/*
+ * mod_deflate.c: Perform deflate transfer-encoding on the fly
+ *
+ * Written by Ian Holsman
+ *
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "apr_strings.h"
+#include "apr_general.h"
+#include "util_filter.h"
+#include "apr_buckets.h"
+#include "http_request.h"
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "zlib.h"
+
+#ifdef HAVE_ZUTIL_H
+#include "zutil.h"
+#else
+/* As part of the encoding process, we must send what our OS_CODE is
+ * (or so it seems based on what I can tell of how gzip encoding works).
+ *
+ * zutil.h is not always included with zlib distributions (it is a private
+ * header), so this is straight from zlib 1.1.3's zutil.h.
+ */
+#ifdef OS2
+#define OS_CODE 0x06
+#endif
+
+#ifdef WIN32 /* Window 95 & Windows NT */
+#define OS_CODE 0x0b
+#endif
+
+#if defined(VAXC) || defined(VMS)
+#define OS_CODE 0x02
+#endif
+
+#ifdef AMIGA
+#define OS_CODE 0x01
+#endif
+
+#if defined(ATARI) || defined(atarist)
+#define OS_CODE 0x05
+#endif
+
+#if defined(MACOS) || defined(TARGET_OS_MAC)
+#define OS_CODE 0x07
+#endif
+
+#ifdef __50SERIES /* Prime/PRIMOS */
+#define OS_CODE 0x0F
+#endif
+
+#ifdef TOPS20
+#define OS_CODE 0x0a
+#endif
+
+#ifndef OS_CODE
+#define OS_CODE 0x03 /* assume Unix */
+#endif
+#endif
+
+static const char deflateFilterName[] = "DEFLATE";
+module AP_MODULE_DECLARE_DATA deflate_module;
+
+typedef struct deflate_filter_config_t
+{
+ int windowSize;
+ int memlevel;
+ int compressionlevel;
+ apr_size_t bufferSize;
+ char *note_ratio_name;
+ char *note_input_name;
+ char *note_output_name;
+} deflate_filter_config;
+
+/* windowsize is negative to suppress Zlib header */
+#define DEFAULT_COMPRESSION Z_DEFAULT_COMPRESSION
+#define DEFAULT_WINDOWSIZE -15
+#define DEFAULT_MEMLEVEL 9
+#define DEFAULT_BUFFERSIZE 8096
+
+/* Outputs a long in LSB order to the given file
+ * only the bottom 4 bits are required for the deflate file format.
+ */
+static void putLong(unsigned char *string, unsigned long x)
+{
+ string[0] = (unsigned char)(x & 0xff);
+ string[1] = (unsigned char)((x & 0xff00) >> 8);
+ string[2] = (unsigned char)((x & 0xff0000) >> 16);
+ string[3] = (unsigned char)((x & 0xff000000) >> 24);
+}
+
+/* Inputs a string and returns a long.
+ */
+static unsigned long getLong(unsigned char *string)
+{
+ return ((unsigned long)string[0])
+ | (((unsigned long)string[1]) << 8)
+ | (((unsigned long)string[2]) << 16)
+ | (((unsigned long)string[3]) << 24);
+}
+
+static void *create_deflate_server_config(apr_pool_t *p, server_rec *s)
+{
+ deflate_filter_config *c = apr_pcalloc(p, sizeof *c);
+
+ c->memlevel = DEFAULT_MEMLEVEL;
+ c->windowSize = DEFAULT_WINDOWSIZE;
+ c->bufferSize = DEFAULT_BUFFERSIZE;
+ c->compressionlevel = DEFAULT_COMPRESSION;
+
+ return c;
+}
+
+static const char *deflate_set_window_size(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ deflate_filter_config *c = ap_get_module_config(cmd->server->module_config,
+ &deflate_module);
+ int i;
+
+ i = atoi(arg);
+
+ if (i < 1 || i > 15)
+ return "DeflateWindowSize must be between 1 and 15";
+
+ c->windowSize = i * -1;
+
+ return NULL;
+}
+
+static const char *deflate_set_buffer_size(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ deflate_filter_config *c = ap_get_module_config(cmd->server->module_config,
+ &deflate_module);
+ int n = atoi(arg);
+
+ if (n <= 0) {
+ return "DeflateBufferSize should be positive";
+ }
+
+ c->bufferSize = (apr_size_t)n;
+
+ return NULL;
+}
+static const char *deflate_set_note(cmd_parms *cmd, void *dummy,
+ const char *arg1, const char *arg2)
+{
+ deflate_filter_config *c = ap_get_module_config(cmd->server->module_config,
+ &deflate_module);
+
+ if (arg2 == NULL) {
+ c->note_ratio_name = apr_pstrdup(cmd->pool, arg1);
+ }
+ else if (!strcasecmp(arg1, "ratio")) {
+ c->note_ratio_name = apr_pstrdup(cmd->pool, arg2);
+ }
+ else if (!strcasecmp(arg1, "input")) {
+ c->note_input_name = apr_pstrdup(cmd->pool, arg2);
+ }
+ else if (!strcasecmp(arg1, "output")) {
+ c->note_output_name = apr_pstrdup(cmd->pool, arg2);
+ }
+ else {
+ return apr_psprintf(cmd->pool, "Unknown note type %s", arg1);
+ }
+
+ return NULL;
+}
+
+static const char *deflate_set_memlevel(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ deflate_filter_config *c = ap_get_module_config(cmd->server->module_config,
+ &deflate_module);
+ int i;
+
+ i = atoi(arg);
+
+ if (i < 1 || i > 9)
+ return "DeflateMemLevel must be between 1 and 9";
+
+ c->memlevel = i;
+
+ return NULL;
+}
+
+static const char *deflate_set_compressionlevel(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ deflate_filter_config *c = ap_get_module_config(cmd->server->module_config,
+ &deflate_module);
+ int i;
+
+ i = atoi(arg);
+
+ if (i < 1 || i > 9)
+ return "Compression Level must be between 1 and 9";
+
+ c->compressionlevel = i;
+
+ return NULL;
+}
+
+/* magic header */
+static char deflate_magic[2] = { '\037', '\213' };
+
+typedef struct deflate_ctx_t
+{
+ z_stream stream;
+ unsigned char *buffer;
+ unsigned long crc;
+ apr_bucket_brigade *bb, *proc_bb;
+} deflate_ctx;
+
+static apr_status_t deflate_out_filter(ap_filter_t *f,
+ apr_bucket_brigade *bb)
+{
+ apr_bucket *e;
+ request_rec *r = f->r;
+ deflate_ctx *ctx = f->ctx;
+ int zRC;
+ deflate_filter_config *c = ap_get_module_config(r->server->module_config,
+ &deflate_module);
+
+ /* If we don't have a context, we need to ensure that it is okay to send
+ * the deflated content. If we have a context, that means we've done
+ * this before and we liked it.
+ * This could be not so nice if we always fail. But, if we succeed,
+ * we're in better shape.
+ */
+ if (!ctx) {
+ char *buf, *token;
+ const char *encoding, *accepts;
+
+ /* only work on main request/no subrequests */
+ if (r->main) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ /* some browsers might have problems, so set no-gzip
+ * (with browsermatch) for them
+ */
+ if (apr_table_get(r->subprocess_env, "no-gzip")) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ /* Some browsers might have problems with content types
+ * other than text/html, so set gzip-only-text/html
+ * (with browsermatch) for them
+ */
+ if (r->content_type == NULL
+ || strncmp(r->content_type, "text/html", 9)) {
+ const char *env_value = apr_table_get(r->subprocess_env,
+ "gzip-only-text/html");
+ if ( env_value && (strcmp(env_value,"1") == 0) ) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+ }
+
+ /* Let's see what our current Content-Encoding is.
+ * If it's already encoded, don't compress again.
+ * (We could, but let's not.)
+ */
+ encoding = apr_table_get(r->headers_out, "Content-Encoding");
+ if (encoding) {
+ const char *err_enc;
+
+ err_enc = apr_table_get(r->err_headers_out, "Content-Encoding");
+ if (err_enc) {
+ encoding = apr_pstrcat(r->pool, encoding, ",", err_enc, NULL);
+ }
+ }
+ else {
+ encoding = apr_table_get(r->err_headers_out, "Content-Encoding");
+ }
+
+ if (r->content_encoding) {
+ encoding = encoding ? apr_pstrcat(r->pool, encoding, ",",
+ r->content_encoding, NULL)
+ : r->content_encoding;
+ }
+
+ if (encoding) {
+ const char *tmp = encoding;
+
+ token = ap_get_token(r->pool, &tmp, 0);
+ while (token && *token) {
+ /* stolen from mod_negotiation: */
+ if (strcmp(token, "identity") && strcmp(token, "7bit") &&
+ strcmp(token, "8bit") && strcmp(token, "binary")) {
+
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ /* Otherwise, skip token */
+ if (*tmp) {
+ ++tmp;
+ }
+ token = (*tmp) ? ap_get_token(r->pool, &tmp, 0) : NULL;
+ }
+ }
+
+ /* Even if we don't accept this request based on it not having
+ * the Accept-Encoding, we need to note that we were looking
+ * for this header and downstream proxies should be aware of that.
+ */
+ apr_table_mergen(r->headers_out, "Vary", "Accept-Encoding");
+
+ /* if they don't have the line, then they can't play */
+ accepts = apr_table_get(r->headers_in, "Accept-Encoding");
+ if (accepts == NULL) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ token = ap_get_token(r->pool, &accepts, 0);
+ while (token && token[0] && strcasecmp(token, "gzip")) {
+ /* skip parameters, XXX: ;q=foo evaluation? */
+ while (*accepts == ';') {
+ ++accepts;
+ token = ap_get_token(r->pool, &accepts, 1);
+ }
+
+ /* retrieve next token */
+ if (*accepts == ',') {
+ ++accepts;
+ }
+ token = (*accepts) ? ap_get_token(r->pool, &accepts, 0) : NULL;
+ }
+
+ /* No acceptable token found. */
+ if (token == NULL || token[0] == '\0') {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ /* We're cool with filtering this. */
+ ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx));
+ ctx->bb = apr_brigade_create(r->pool, f->c->bucket_alloc);
+ ctx->buffer = apr_palloc(r->pool, c->bufferSize);
+
+ zRC = deflateInit2(&ctx->stream, c->compressionlevel, Z_DEFLATED,
+ c->windowSize, c->memlevel,
+ Z_DEFAULT_STRATEGY);
+
+ if (zRC != Z_OK) {
+ f->ctx = NULL;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unable to init Zlib: "
+ "deflateInit2 returned %d: URL %s",
+ zRC, r->uri);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ /* RFC 1952 Section 2.3 dictates the gzip header:
+ *
+ * +---+---+---+---+---+---+---+---+---+---+
+ * |ID1|ID2|CM |FLG| MTIME |XFL|OS |
+ * +---+---+---+---+---+---+---+---+---+---+
+ *
+ * If we wish to populate in MTIME (as hinted in RFC 1952), do:
+ * putLong(date_array, apr_time_now() / APR_USEC_PER_SEC);
+ * where date_array is a char[4] and then print date_array in the
+ * MTIME position. WARNING: ENDIANNESS ISSUE HERE.
+ */
+ buf = apr_psprintf(r->pool, "%c%c%c%c%c%c%c%c%c%c", deflate_magic[0],
+ deflate_magic[1], Z_DEFLATED, 0 /* flags */,
+ 0, 0, 0, 0 /* 4 chars for mtime */,
+ 0 /* xflags */, OS_CODE);
+ e = apr_bucket_pool_create(buf, 10, r->pool, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, e);
+
+ /* If the entire Content-Encoding is "identity", we can replace it. */
+ if (!encoding || !strcasecmp(encoding, "identity")) {
+ apr_table_setn(r->headers_out, "Content-Encoding", "gzip");
+ }
+ else {
+ apr_table_mergen(r->headers_out, "Content-Encoding", "gzip");
+ }
+ apr_table_unset(r->headers_out, "Content-Length");
+
+ /* initialize deflate output buffer */
+ ctx->stream.next_out = ctx->buffer;
+ ctx->stream.avail_out = c->bufferSize;
+ }
+
+ while (!APR_BRIGADE_EMPTY(bb))
+ {
+ const char *data;
+ apr_bucket *b;
+ apr_size_t len;
+ int done = 0;
+
+ e = APR_BRIGADE_FIRST(bb);
+
+ if (APR_BUCKET_IS_EOS(e)) {
+ char *buf;
+ unsigned int deflate_len;
+
+ ctx->stream.avail_in = 0; /* should be zero already anyway */
+ for (;;) {
+ deflate_len = c->bufferSize - ctx->stream.avail_out;
+
+ if (deflate_len != 0) {
+ b = apr_bucket_heap_create((char *)ctx->buffer,
+ deflate_len, NULL,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
+ ctx->stream.next_out = ctx->buffer;
+ ctx->stream.avail_out = c->bufferSize;
+ }
+
+ if (done) {
+ break;
+ }
+
+ zRC = deflate(&ctx->stream, Z_FINISH);
+
+ if (deflate_len == 0 && zRC == Z_BUF_ERROR) {
+ zRC = Z_OK;
+ }
+
+ done = (ctx->stream.avail_out != 0 || zRC == Z_STREAM_END);
+
+ if (zRC != Z_OK && zRC != Z_STREAM_END) {
+ break;
+ }
+ }
+
+ buf = apr_palloc(r->pool, 8);
+ putLong((unsigned char *)&buf[0], ctx->crc);
+ putLong((unsigned char *)&buf[4], ctx->stream.total_in);
+
+ b = apr_bucket_pool_create(buf, 8, r->pool, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "Zlib: Compressed %ld to %ld : URL %s",
+ ctx->stream.total_in, ctx->stream.total_out, r->uri);
+
+ /* leave notes for logging */
+ if (c->note_input_name) {
+ apr_table_setn(r->notes, c->note_input_name,
+ (ctx->stream.total_in > 0)
+ ? apr_off_t_toa(r->pool,
+ ctx->stream.total_in)
+ : "-");
+ }
+
+ if (c->note_output_name) {
+ apr_table_setn(r->notes, c->note_output_name,
+ (ctx->stream.total_in > 0)
+ ? apr_off_t_toa(r->pool,
+ ctx->stream.total_out)
+ : "-");
+ }
+
+ if (c->note_ratio_name) {
+ apr_table_setn(r->notes, c->note_ratio_name,
+ (ctx->stream.total_in > 0)
+ ? apr_itoa(r->pool,
+ (int)(ctx->stream.total_out
+ * 100
+ / ctx->stream.total_in))
+ : "-");
+ }
+
+ deflateEnd(&ctx->stream);
+
+ /* Remove EOS from the old list, and insert into the new. */
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, e);
+
+ /* Okay, we've seen the EOS.
+ * Time to pass it along down the chain.
+ */
+ return ap_pass_brigade(f->next, ctx->bb);
+ }
+
+ if (APR_BUCKET_IS_FLUSH(e)) {
+ apr_bucket *bkt;
+ apr_status_t rv;
+
+ apr_bucket_delete(e);
+
+ if (ctx->stream.avail_in > 0) {
+ zRC = deflate(&(ctx->stream), Z_SYNC_FLUSH);
+ if (zRC != Z_OK) {
+ return APR_EGENERAL;
+ }
+ }
+
+ ctx->stream.next_out = ctx->buffer;
+ len = c->bufferSize - ctx->stream.avail_out;
+
+ b = apr_bucket_heap_create((char *)ctx->buffer, len,
+ NULL, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
+ ctx->stream.avail_out = c->bufferSize;
+
+ bkt = apr_bucket_flush_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, bkt);
+ rv = ap_pass_brigade(f->next, ctx->bb);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ continue;
+ }
+
+ /* read */
+ apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
+
+ /* This crc32 function is from zlib. */
+ ctx->crc = crc32(ctx->crc, (const Bytef *)data, len);
+
+ /* write */
+ ctx->stream.next_in = (unsigned char *)data; /* We just lost const-ness,
+ * but we'll just have to
+ * trust zlib */
+ ctx->stream.avail_in = len;
+
+ while (ctx->stream.avail_in != 0) {
+ if (ctx->stream.avail_out == 0) {
+ apr_status_t rv;
+
+ ctx->stream.next_out = ctx->buffer;
+ len = c->bufferSize - ctx->stream.avail_out;
+
+ b = apr_bucket_heap_create((char *)ctx->buffer, len,
+ NULL, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
+ ctx->stream.avail_out = c->bufferSize;
+ /* Send what we have right now to the next filter. */
+ rv = ap_pass_brigade(f->next, ctx->bb);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+
+ zRC = deflate(&(ctx->stream), Z_NO_FLUSH);
+
+ if (zRC != Z_OK)
+ return APR_EGENERAL;
+ }
+
+ apr_bucket_delete(e);
+ }
+
+ apr_brigade_cleanup(bb);
+ return APR_SUCCESS;
+}
+
+/* This is the deflate input filter (inflates). */
+static apr_status_t deflate_in_filter(ap_filter_t *f,
+ apr_bucket_brigade *bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ apr_bucket *bkt;
+ request_rec *r = f->r;
+ deflate_ctx *ctx = f->ctx;
+ int zRC;
+ apr_status_t rv;
+ deflate_filter_config *c;
+
+ /* just get out of the way of things we don't want. */
+ if (mode != AP_MODE_READBYTES) {
+ return ap_get_brigade(f->next, bb, mode, block, readbytes);
+ }
+
+ c = ap_get_module_config(r->server->module_config, &deflate_module);
+
+ if (!ctx) {
+ int found = 0;
+ char *token, deflate_hdr[10];
+ const char *encoding;
+ apr_size_t len;
+
+ /* only work on main request/no subrequests */
+ if (r->main) {
+ ap_remove_input_filter(f);
+ return ap_get_brigade(f->next, bb, mode, block, readbytes);
+ }
+
+ /* Let's see what our current Content-Encoding is.
+ * If gzip is present, don't gzip again. (We could, but let's not.)
+ */
+ encoding = apr_table_get(r->headers_in, "Content-Encoding");
+ if (encoding) {
+ const char *tmp = encoding;
+
+ token = ap_get_token(r->pool, &tmp, 0);
+ while (token && token[0]) {
+ if (!strcasecmp(token, "gzip")) {
+ found = 1;
+ break;
+ }
+ /* Otherwise, skip token */
+ tmp++;
+ token = ap_get_token(r->pool, &tmp, 0);
+ }
+ }
+
+ if (found == 0) {
+ ap_remove_input_filter(f);
+ return ap_get_brigade(f->next, bb, mode, block, readbytes);
+ }
+
+ f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
+ ctx->bb = apr_brigade_create(r->pool, f->c->bucket_alloc);
+ ctx->proc_bb = apr_brigade_create(r->pool, f->c->bucket_alloc);
+ ctx->buffer = apr_palloc(r->pool, c->bufferSize);
+
+ rv = ap_get_brigade(f->next, ctx->bb, AP_MODE_READBYTES, block, 10);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ len = 10;
+ rv = apr_brigade_flatten(ctx->bb, deflate_hdr, &len);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ /* We didn't get the magic bytes. */
+ if (len != 10 ||
+ deflate_hdr[0] != deflate_magic[0] ||
+ deflate_hdr[1] != deflate_magic[1]) {
+ return APR_EGENERAL;
+ }
+
+ /* We can't handle flags for now. */
+ if (deflate_hdr[3] != 0) {
+ return APR_EGENERAL;
+ }
+
+ zRC = inflateInit2(&ctx->stream, c->windowSize);
+
+ if (zRC != Z_OK) {
+ f->ctx = NULL;
+ inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unable to init Zlib: "
+ "inflateInit2 returned %d: URL %s",
+ zRC, r->uri);
+ ap_remove_input_filter(f);
+ return ap_get_brigade(f->next, bb, mode, block, readbytes);
+ }
+
+ /* initialize deflate output buffer */
+ ctx->stream.next_out = ctx->buffer;
+ ctx->stream.avail_out = c->bufferSize;
+
+ apr_brigade_cleanup(ctx->bb);
+ }
+
+ if (APR_BRIGADE_EMPTY(ctx->proc_bb)) {
+ rv = ap_get_brigade(f->next, ctx->bb, mode, block, readbytes);
+
+ if (rv != APR_SUCCESS) {
+ /* What about APR_EAGAIN errors? */
+ inflateEnd(&ctx->stream);
+ return rv;
+ }
+
+ APR_BRIGADE_FOREACH(bkt, ctx->bb) {
+ const char *data;
+ apr_size_t len;
+
+ /* If we actually see the EOS, that means we screwed up! */
+ if (APR_BUCKET_IS_EOS(bkt)) {
+ inflateEnd(&ctx->stream);
+ return APR_EGENERAL;
+ }
+
+ if (APR_BUCKET_IS_FLUSH(bkt)) {
+ apr_bucket *tmp_heap;
+ zRC = inflate(&(ctx->stream), Z_SYNC_FLUSH);
+ if (zRC != Z_OK) {
+ inflateEnd(&ctx->stream);
+ return APR_EGENERAL;
+ }
+
+ ctx->stream.next_out = ctx->buffer;
+ len = c->bufferSize - ctx->stream.avail_out;
+
+ ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len);
+ tmp_heap = apr_bucket_heap_create((char *)ctx->buffer, len,
+ NULL, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, tmp_heap);
+ ctx->stream.avail_out = c->bufferSize;
+
+ /* Move everything to the returning brigade. */
+ APR_BUCKET_REMOVE(bkt);
+ APR_BRIGADE_CONCAT(bb, ctx->bb);
+ break;
+ }
+
+ /* read */
+ apr_bucket_read(bkt, &data, &len, APR_BLOCK_READ);
+
+ /* pass through zlib inflate. */
+ ctx->stream.next_in = (unsigned char *)data;
+ ctx->stream.avail_in = len;
+
+ zRC = Z_OK;
+
+ while (ctx->stream.avail_in != 0) {
+ if (ctx->stream.avail_out == 0) {
+ apr_bucket *tmp_heap;
+ ctx->stream.next_out = ctx->buffer;
+ len = c->bufferSize - ctx->stream.avail_out;
+
+ ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len);
+ tmp_heap = apr_bucket_heap_create((char *)ctx->buffer, len,
+ NULL, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, tmp_heap);
+ ctx->stream.avail_out = c->bufferSize;
+ }
+
+ zRC = inflate(&ctx->stream, Z_NO_FLUSH);
+
+ if (zRC == Z_STREAM_END) {
+ break;
+ }
+
+ if (zRC != Z_OK) {
+ inflateEnd(&ctx->stream);
+ return APR_EGENERAL;
+ }
+ }
+ if (zRC == Z_STREAM_END) {
+ apr_bucket *tmp_heap, *eos;
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "Zlib: Inflated %ld to %ld : URL %s",
+ ctx->stream.total_in, ctx->stream.total_out,
+ r->uri);
+
+ len = c->bufferSize - ctx->stream.avail_out;
+
+ ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len);
+ tmp_heap = apr_bucket_heap_create((char *)ctx->buffer, len,
+ NULL, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, tmp_heap);
+ ctx->stream.avail_out = c->bufferSize;
+
+ /* Is the remaining 8 bytes already in the avail stream? */
+ if (ctx->stream.avail_in >= 8) {
+ unsigned long compCRC, compLen;
+ compCRC = getLong(ctx->stream.next_in);
+ if (ctx->crc != compCRC) {
+ inflateEnd(&ctx->stream);
+ return APR_EGENERAL;
+ }
+ ctx->stream.next_in += 4;
+ compLen = getLong(ctx->stream.next_in);
+ if (ctx->stream.total_out != compLen) {
+ inflateEnd(&ctx->stream);
+ return APR_EGENERAL;
+ }
+ }
+ else {
+ /* FIXME: We need to grab the 8 verification bytes
+ * from the wire! */
+ inflateEnd(&ctx->stream);
+ return APR_EGENERAL;
+ }
+
+ inflateEnd(&ctx->stream);
+
+ eos = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, eos);
+ break;
+ }
+
+ }
+ apr_brigade_cleanup(ctx->bb);
+ }
+
+ /* If we are about to return nothing for a 'blocking' read and we have
+ * some data in our zlib buffer, flush it out so we can return something.
+ */
+ if (block == APR_BLOCK_READ &&
+ APR_BRIGADE_EMPTY(ctx->proc_bb) &&
+ ctx->stream.avail_out < c->bufferSize) {
+ apr_bucket *tmp_heap;
+ apr_size_t len;
+ ctx->stream.next_out = ctx->buffer;
+ len = c->bufferSize - ctx->stream.avail_out;
+
+ ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len);
+ tmp_heap = apr_bucket_heap_create((char *)ctx->buffer, len,
+ NULL, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, tmp_heap);
+ ctx->stream.avail_out = c->bufferSize;
+ }
+
+ if (!APR_BRIGADE_EMPTY(ctx->proc_bb)) {
+ apr_bucket_brigade *newbb;
+
+ /* May return APR_INCOMPLETE which is fine by us. */
+ apr_brigade_partition(ctx->proc_bb, readbytes, &bkt);
+
+ newbb = apr_brigade_split(ctx->proc_bb, bkt);
+ APR_BRIGADE_CONCAT(bb, ctx->proc_bb);
+ APR_BRIGADE_CONCAT(ctx->proc_bb, newbb);
+ }
+
+ return APR_SUCCESS;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_register_output_filter(deflateFilterName, deflate_out_filter, NULL,
+ AP_FTYPE_CONTENT_SET);
+ ap_register_input_filter(deflateFilterName, deflate_in_filter, NULL,
+ AP_FTYPE_CONTENT_SET);
+}
+
+static const command_rec deflate_filter_cmds[] = {
+ AP_INIT_TAKE12("DeflateFilterNote", deflate_set_note, NULL, RSRC_CONF,
+ "Set a note to report on compression ratio"),
+ AP_INIT_TAKE1("DeflateWindowSize", deflate_set_window_size, NULL,
+ RSRC_CONF, "Set the Deflate window size (1-15)"),
+ AP_INIT_TAKE1("DeflateBufferSize", deflate_set_buffer_size, NULL, RSRC_CONF,
+ "Set the Deflate Buffer Size"),
+ AP_INIT_TAKE1("DeflateMemLevel", deflate_set_memlevel, NULL, RSRC_CONF,
+ "Set the Deflate Memory Level (1-9)"),
+ AP_INIT_TAKE1("DeflateCompressionLevel", deflate_set_compressionlevel, NULL, RSRC_CONF,
+ "Set the Deflate Compression Level (1-9)"),
+ {NULL}
+};
+
+module AP_MODULE_DECLARE_DATA deflate_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ create_deflate_server_config, /* server config */
+ NULL, /* merge server config */
+ deflate_filter_cmds, /* command table */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_deflate.dsp b/rubbos/app/httpd-2.0.64/modules/filters/mod_deflate.dsp
new file mode 100644
index 00000000..e26600ca
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_deflate.dsp
@@ -0,0 +1,127 @@
+# Microsoft Developer Studio Project File - Name="mod_deflate" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_deflate - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_deflate.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_deflate.mak" CFG="mod_deflate - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_deflate - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_deflate - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_deflate - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "HAVE_ZUTIL_H" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/zlib" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_deflate_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:".\Release\mod_deflate.so" /base:@..\..\os\win32\BaseAddr.ref,mod_deflate.so
+# ADD LINK32 kernel32.lib zdll.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Release\mod_deflate.so" /libpath:"../../srclib/zlib" /base:@..\..\os\win32\BaseAddr.ref,mod_deflate.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_deflate - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/zlib" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "HAVE_ZUTIL_H" /Fd"Debug\mod_deflate_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_deflate.so" /base:@..\..\os\win32\BaseAddr.ref,mod_deflate.so
+# ADD LINK32 kernel32.lib zdll.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_deflate.so" /libpath:"../../srclib/zlib" /base:@..\..\os\win32\BaseAddr.ref,mod_deflate.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_deflate - Win32 Release"
+# Name "mod_deflate - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_deflate.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_deflate.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_deflate - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_deflate.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_deflate.so "deflate_module for Apache" ../../include/ap_release.h > .\mod_deflate.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_deflate - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_deflate.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_deflate.so "deflate_module for Apache" ../../include/ap_release.h > .\mod_deflate.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_deflate.exp b/rubbos/app/httpd-2.0.64/modules/filters/mod_deflate.exp
new file mode 100644
index 00000000..9ec76883
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_deflate.exp
@@ -0,0 +1 @@
+deflate_module
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_ext_filter.c b/rubbos/app/httpd-2.0.64/modules/filters/mod_ext_filter.c
new file mode 100644
index 00000000..cc77b40a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_ext_filter.c
@@ -0,0 +1,890 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_ext_filter allows Unix-style filters to filter http content.
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#define CORE_PRIVATE
+#include "http_core.h"
+#include "apr_buckets.h"
+#include "util_filter.h"
+#include "util_script.h"
+#include "util_time.h"
+#include "apr_strings.h"
+#include "apr_hash.h"
+#include "apr_lib.h"
+#include "apr_poll.h"
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+typedef struct ef_server_t {
+ apr_pool_t *p;
+ apr_hash_t *h;
+} ef_server_t;
+
+typedef struct ef_filter_t {
+ const char *name;
+ enum {INPUT_FILTER=1, OUTPUT_FILTER} mode;
+ ap_filter_type ftype;
+ const char *command;
+ const char *enable_env;
+ const char *disable_env;
+ char **args;
+ const char *intype; /* list of IMTs we process (well, just one for now) */
+#define INTYPE_ALL (char *)1
+ const char *outtype; /* IMT of filtered output */
+#define OUTTYPE_UNCHANGED (char *)1
+ int preserves_content_length;
+} ef_filter_t;
+
+typedef struct ef_dir_t {
+ int debug;
+ int log_stderr;
+} ef_dir_t;
+
+typedef struct ef_ctx_t {
+ apr_pool_t *p;
+ apr_proc_t *proc;
+ apr_procattr_t *procattr;
+ ef_dir_t *dc;
+ ef_filter_t *filter;
+ int noop;
+#if APR_FILES_AS_SOCKETS
+ apr_pollfd_t *pollset;
+#endif
+} ef_ctx_t;
+
+module AP_MODULE_DECLARE_DATA ext_filter_module;
+static const server_rec *main_server;
+
+static apr_status_t ef_output_filter(ap_filter_t *, apr_bucket_brigade *);
+
+#define DBGLVL_SHOWOPTIONS 1
+#define DBGLVL_ERRORCHECK 2
+#define DBGLVL_GORY 9
+
+#define ERRFN_USERDATA_KEY "EXTFILTCHILDERRFN"
+
+static void *create_ef_dir_conf(apr_pool_t *p, char *dummy)
+{
+ ef_dir_t *dc = (ef_dir_t *)apr_pcalloc(p, sizeof(ef_dir_t));
+
+ dc->debug = -1;
+ dc->log_stderr = -1;
+
+ return dc;
+}
+
+static void *create_ef_server_conf(apr_pool_t *p, server_rec *s)
+{
+ ef_server_t *conf;
+
+ conf = (ef_server_t *)apr_pcalloc(p, sizeof(ef_server_t));
+ conf->p = p;
+ conf->h = apr_hash_make(conf->p);
+ return conf;
+}
+
+static void *merge_ef_dir_conf(apr_pool_t *p, void *basev, void *overridesv)
+{
+ ef_dir_t *a = (ef_dir_t *)apr_pcalloc (p, sizeof(ef_dir_t));
+ ef_dir_t *base = (ef_dir_t *)basev, *over = (ef_dir_t *)overridesv;
+
+ if (over->debug != -1) { /* if admin coded something... */
+ a->debug = over->debug;
+ }
+ else {
+ a->debug = base->debug;
+ }
+
+ if (over->log_stderr != -1) { /* if admin coded something... */
+ a->log_stderr = over->log_stderr;
+ }
+ else {
+ a->log_stderr = base->log_stderr;
+ }
+
+ return a;
+}
+
+static const char *add_options(cmd_parms *cmd, void *in_dc,
+ const char *arg)
+{
+ ef_dir_t *dc = in_dc;
+
+ if (!strncasecmp(arg, "DebugLevel=", 11)) {
+ dc->debug = atoi(arg + 11);
+ }
+ else if (!strcasecmp(arg, "LogStderr")) {
+ dc->log_stderr = 1;
+ }
+ else if (!strcasecmp(arg, "NoLogStderr")) {
+ dc->log_stderr = 0;
+ }
+ else {
+ return apr_pstrcat(cmd->temp_pool,
+ "Invalid ExtFilterOptions option: ",
+ arg,
+ NULL);
+ }
+
+ return NULL;
+}
+
+static const char *parse_cmd(apr_pool_t *p, const char **args, ef_filter_t *filter)
+{
+ if (**args == '"') {
+ const char *start = *args + 1;
+ char *parms;
+ int escaping = 0;
+ apr_status_t rv;
+
+ ++*args; /* move past leading " */
+ /* find true end of args string (accounting for escaped quotes) */
+ while (**args && (**args != '"' || (**args == '"' && escaping))) {
+ if (escaping) {
+ escaping = 0;
+ }
+ else if (**args == '\\') {
+ escaping = 1;
+ }
+ ++*args;
+ }
+ if (**args != '"') {
+ return "Expected cmd= delimiter";
+ }
+ /* copy *just* the arg string for parsing, */
+ parms = apr_pstrndup(p, start, *args - start);
+ ++*args; /* move past trailing " */
+
+ /* parse and tokenize the args. */
+ rv = apr_tokenize_to_argv(parms, &(filter->args), p);
+ if (rv != APR_SUCCESS) {
+ return "cmd= parse error";
+ }
+ }
+ else
+ {
+ /* simple path */
+ /* Allocate space for two argv pointers and parse the args. */
+ filter->args = (char **)apr_palloc(p, 2 * sizeof(char *));
+ filter->args[0] = ap_getword_white(p, args);
+ filter->args[1] = NULL; /* end of args */
+ }
+ if (!filter->args[0]) {
+ return "Invalid cmd= parameter";
+ }
+ filter->command = filter->args[0];
+
+ return NULL;
+}
+
+static const char *define_filter(cmd_parms *cmd, void *dummy, const char *args)
+{
+ ef_server_t *conf = ap_get_module_config(cmd->server->module_config,
+ &ext_filter_module);
+ const char *token;
+ const char *name;
+ ef_filter_t *filter;
+
+ name = ap_getword_white(cmd->pool, &args);
+ if (!name) {
+ return "Filter name not found";
+ }
+
+ if (apr_hash_get(conf->h, name, APR_HASH_KEY_STRING)) {
+ return apr_psprintf(cmd->pool, "ExtFilter %s is already defined",
+ name);
+ }
+
+ filter = (ef_filter_t *)apr_pcalloc(conf->p, sizeof(ef_filter_t));
+ filter->name = name;
+ filter->mode = OUTPUT_FILTER;
+ filter->ftype = AP_FTYPE_RESOURCE;
+ apr_hash_set(conf->h, name, APR_HASH_KEY_STRING, filter);
+
+ while (*args) {
+ while (apr_isspace(*args)) {
+ ++args;
+ }
+
+ /* Nasty parsing... I wish I could simply use ap_getword_white()
+ * here and then look at the token, but ap_getword_white() doesn't
+ * do the right thing when we have cmd="word word word"
+ */
+ if (!strncasecmp(args, "preservescontentlength", 22)) {
+ token = ap_getword_white(cmd->pool, &args);
+ if (!strcasecmp(token, "preservescontentlength")) {
+ filter->preserves_content_length = 1;
+ }
+ else {
+ return apr_psprintf(cmd->pool,
+ "mangled argument `%s'",
+ token);
+ }
+ continue;
+ }
+
+ if (!strncasecmp(args, "mode=", 5)) {
+ args += 5;
+ token = ap_getword_white(cmd->pool, &args);
+ if (!strcasecmp(token, "output")) {
+ filter->mode = OUTPUT_FILTER;
+ }
+ else if (!strcasecmp(token, "input")) {
+ filter->mode = INPUT_FILTER;
+ }
+ else {
+ return apr_psprintf(cmd->pool, "Invalid mode: `%s'",
+ token);
+ }
+ continue;
+ }
+
+ if (!strncasecmp(args, "ftype=", 6)) {
+ args += 6;
+ token = ap_getword_white(cmd->pool, &args);
+ filter->ftype = atoi(token);
+ continue;
+ }
+
+ if (!strncasecmp(args, "enableenv=", 10)) {
+ args += 10;
+ token = ap_getword_white(cmd->pool, &args);
+ filter->enable_env = token;
+ continue;
+ }
+
+ if (!strncasecmp(args, "disableenv=", 11)) {
+ args += 11;
+ token = ap_getword_white(cmd->pool, &args);
+ filter->disable_env = token;
+ continue;
+ }
+
+ if (!strncasecmp(args, "intype=", 7)) {
+ args += 7;
+ filter->intype = ap_getword_white(cmd->pool, &args);
+ continue;
+ }
+
+ if (!strncasecmp(args, "outtype=", 8)) {
+ args += 8;
+ filter->outtype = ap_getword_white(cmd->pool, &args);
+ continue;
+ }
+
+ if (!strncasecmp(args, "cmd=", 4)) {
+ args += 4;
+ if ((token = parse_cmd(cmd->pool, &args, filter))) {
+ return token;
+ }
+ continue;
+ }
+
+ return apr_psprintf(cmd->pool, "Unexpected parameter: `%s'",
+ args);
+ }
+
+ /* parsing is done... register the filter
+ */
+ if (filter->mode == OUTPUT_FILTER) {
+ /* XXX need a way to ensure uniqueness among all filters */
+ ap_register_output_filter(filter->name, ef_output_filter, NULL, filter->ftype);
+ }
+#if 0 /* no input filters yet */
+ else if (filter->mode == INPUT_FILTER) {
+ /* XXX need a way to ensure uniqueness among all filters */
+ ap_register_input_filter(filter->name, ef_input_filter, NULL, AP_FTYPE_RESOURCE);
+ }
+#endif
+ else {
+ ap_assert(1 != 1); /* we set the field wrong somehow */
+ }
+
+ return NULL;
+}
+
+static const command_rec cmds[] =
+{
+ AP_INIT_ITERATE("ExtFilterOptions",
+ add_options,
+ NULL,
+ ACCESS_CONF, /* same as SetInputFilter/SetOutputFilter */
+ "valid options: DebugLevel=n, LogStderr, NoLogStderr"),
+ AP_INIT_RAW_ARGS("ExtFilterDefine",
+ define_filter,
+ NULL,
+ RSRC_CONF,
+ "Define an external filter"),
+ {NULL}
+};
+
+static int ef_init(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *main_s)
+{
+ main_server = main_s;
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(ef_init, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+static apr_status_t set_resource_limits(request_rec *r,
+ apr_procattr_t *procattr)
+{
+#if defined(RLIMIT_CPU) || defined(RLIMIT_NPROC) || \
+ defined(RLIMIT_DATA) || defined(RLIMIT_VMEM) || defined (RLIMIT_AS)
+ core_dir_config *conf =
+ (core_dir_config *)ap_get_module_config(r->per_dir_config,
+ &core_module);
+ apr_status_t rv;
+
+#ifdef RLIMIT_CPU
+ rv = apr_procattr_limit_set(procattr, APR_LIMIT_CPU, conf->limit_cpu);
+ ap_assert(rv == APR_SUCCESS); /* otherwise, we're out of sync with APR */
+#endif
+#if defined(RLIMIT_DATA) || defined(RLIMIT_VMEM) || defined(RLIMIT_AS)
+ rv = apr_procattr_limit_set(procattr, APR_LIMIT_MEM, conf->limit_mem);
+ ap_assert(rv == APR_SUCCESS); /* otherwise, we're out of sync with APR */
+#endif
+#ifdef RLIMIT_NPROC
+ rv = apr_procattr_limit_set(procattr, APR_LIMIT_NPROC, conf->limit_nproc);
+ ap_assert(rv == APR_SUCCESS); /* otherwise, we're out of sync with APR */
+#endif
+
+#endif /* if at least one limit defined */
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t ef_close_file(void *vfile)
+{
+ return apr_file_close(vfile);
+}
+
+static void child_errfn(apr_pool_t *pool, apr_status_t err, const char *description)
+{
+ request_rec *r;
+ void *vr;
+ apr_file_t *stderr_log;
+ char errbuf[200];
+ char time_str[APR_CTIME_LEN];
+
+ apr_pool_userdata_get(&vr, ERRFN_USERDATA_KEY, pool);
+ r = vr;
+ apr_file_open_stderr(&stderr_log, pool);
+ ap_recent_ctime(time_str, apr_time_now());
+ apr_file_printf(stderr_log,
+ "[%s] [client %s] mod_ext_filter (%d)%s: %s\n",
+ time_str,
+ r->connection->remote_ip,
+ err,
+ apr_strerror(err, errbuf, sizeof(errbuf)),
+ description);
+}
+
+/* init_ext_filter_process: get the external filter process going
+ * This is per-filter-instance (i.e., per-request) initialization.
+ */
+static apr_status_t init_ext_filter_process(ap_filter_t *f)
+{
+ ef_ctx_t *ctx = f->ctx;
+ apr_status_t rc;
+ ef_dir_t *dc = ctx->dc;
+ const char * const *env;
+
+ ctx->proc = apr_pcalloc(ctx->p, sizeof(*ctx->proc));
+
+ rc = apr_procattr_create(&ctx->procattr, ctx->p);
+ ap_assert(rc == APR_SUCCESS);
+
+ rc = apr_procattr_io_set(ctx->procattr,
+ APR_CHILD_BLOCK,
+ APR_CHILD_BLOCK,
+ APR_CHILD_BLOCK);
+ ap_assert(rc == APR_SUCCESS);
+
+ rc = set_resource_limits(f->r, ctx->procattr);
+ ap_assert(rc == APR_SUCCESS);
+
+ if (dc->log_stderr > 0) {
+ rc = apr_procattr_child_err_set(ctx->procattr,
+ f->r->server->error_log, /* stderr in child */
+ NULL);
+ ap_assert(rc == APR_SUCCESS);
+ }
+
+ rc = apr_procattr_child_errfn_set(ctx->procattr, child_errfn);
+ ap_assert(rc == APR_SUCCESS);
+ apr_pool_userdata_set(f->r, ERRFN_USERDATA_KEY, apr_pool_cleanup_null, ctx->p);
+
+ if (dc->debug >= DBGLVL_ERRORCHECK) {
+ rc = apr_procattr_error_check_set(ctx->procattr, 1);
+ ap_assert(rc == APR_SUCCESS);
+ }
+
+ /* add standard CGI variables as well as DOCUMENT_URI, DOCUMENT_PATH_INFO,
+ * and QUERY_STRING_UNESCAPED
+ */
+ ap_add_cgi_vars(f->r);
+ ap_add_common_vars(f->r);
+ apr_table_setn(f->r->subprocess_env, "DOCUMENT_URI", f->r->uri);
+ apr_table_setn(f->r->subprocess_env, "DOCUMENT_PATH_INFO", f->r->path_info);
+ if (f->r->args) {
+ /* QUERY_STRING is added by ap_add_cgi_vars */
+ char *arg_copy = apr_pstrdup(f->r->pool, f->r->args);
+ ap_unescape_url(arg_copy);
+ apr_table_setn(f->r->subprocess_env, "QUERY_STRING_UNESCAPED",
+ ap_escape_shell_cmd(f->r->pool, arg_copy));
+ }
+
+ env = (const char * const *) ap_create_environment(ctx->p,
+ f->r->subprocess_env);
+
+ rc = apr_proc_create(ctx->proc,
+ ctx->filter->command,
+ (const char * const *)ctx->filter->args,
+ env, /* environment */
+ ctx->procattr,
+ ctx->p);
+ if (rc != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, f->r,
+ "couldn't create child process to run `%s'",
+ ctx->filter->command);
+ return rc;
+ }
+
+ apr_pool_note_subprocess(ctx->p, ctx->proc, APR_KILL_AFTER_TIMEOUT);
+
+ /* We don't want the handle to the child's stdin inherited by any
+ * other processes created by httpd. Otherwise, when we close our
+ * handle, the child won't see EOF because another handle will still
+ * be open.
+ */
+
+ apr_pool_cleanup_register(ctx->p, ctx->proc->in,
+ apr_pool_cleanup_null, /* other mechanism */
+ ef_close_file);
+
+#if APR_FILES_AS_SOCKETS
+ {
+ apr_socket_t *newsock;
+
+ rc = apr_poll_setup(&ctx->pollset, 2, ctx->p);
+ ap_assert(rc == APR_SUCCESS);
+ rc = apr_socket_from_file(&newsock, ctx->proc->in);
+ ap_assert(rc == APR_SUCCESS);
+ rc = apr_poll_socket_add(ctx->pollset, newsock, APR_POLLOUT);
+ ap_assert(rc == APR_SUCCESS);
+ rc = apr_socket_from_file(&newsock, ctx->proc->out);
+ ap_assert(rc == APR_SUCCESS);
+ rc = apr_poll_socket_add(ctx->pollset, newsock, APR_POLLIN);
+ ap_assert(rc == APR_SUCCESS);
+ }
+#endif
+
+ return APR_SUCCESS;
+}
+
+static const char *get_cfg_string(ef_dir_t *dc, ef_filter_t *filter, apr_pool_t *p)
+{
+ const char *debug_str = dc->debug == -1 ?
+ "DebugLevel=0" : apr_psprintf(p, "DebugLevel=%d", dc->debug);
+ const char *log_stderr_str = dc->log_stderr < 1 ?
+ "NoLogStderr" : "LogStderr";
+ const char *preserve_content_length_str = filter->preserves_content_length ?
+ "PreservesContentLength" : "!PreserveContentLength";
+ const char *intype_str = !filter->intype ?
+ "*/*" : filter->intype;
+ const char *outtype_str = !filter->outtype ?
+ "(unchanged)" : filter->outtype;
+
+ return apr_psprintf(p,
+ "ExtFilterOptions %s %s %s ExtFilterInType %s "
+ "ExtFilterOuttype %s",
+ debug_str, log_stderr_str, preserve_content_length_str,
+ intype_str, outtype_str);
+}
+
+static ef_filter_t *find_filter_def(const server_rec *s, const char *fname)
+{
+ ef_server_t *sc;
+ ef_filter_t *f;
+
+ sc = ap_get_module_config(s->module_config, &ext_filter_module);
+ f = apr_hash_get(sc->h, fname, APR_HASH_KEY_STRING);
+ if (!f && s != main_server) {
+ s = main_server;
+ sc = ap_get_module_config(s->module_config, &ext_filter_module);
+ f = apr_hash_get(sc->h, fname, APR_HASH_KEY_STRING);
+ }
+ return f;
+}
+
+static apr_status_t init_filter_instance(ap_filter_t *f)
+{
+ ef_ctx_t *ctx;
+ ef_dir_t *dc;
+ apr_status_t rv;
+
+ f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(ef_ctx_t));
+ dc = ap_get_module_config(f->r->per_dir_config,
+ &ext_filter_module);
+ ctx->dc = dc;
+ /* look for the user-defined filter */
+ ctx->filter = find_filter_def(f->r->server, f->frec->name);
+ if (!ctx->filter) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
+ "couldn't find definition of filter '%s'",
+ f->frec->name);
+ return APR_EINVAL;
+ }
+ ctx->p = f->r->pool;
+ if (ctx->filter->intype &&
+ ctx->filter->intype != INTYPE_ALL) {
+ if (!f->r->content_type) {
+ ctx->noop = 1;
+ }
+ else {
+ const char *ctypes = f->r->content_type;
+ const char *ctype = ap_getword(f->r->pool, &ctypes, ';');
+
+ if (strcasecmp(ctx->filter->intype, ctype)) {
+ /* wrong IMT for us; don't mess with the output */
+ ctx->noop = 1;
+ }
+ }
+ }
+ if (ctx->filter->enable_env &&
+ !apr_table_get(f->r->subprocess_env, ctx->filter->enable_env)) {
+ /* an environment variable that enables the filter isn't set; bail */
+ ctx->noop = 1;
+ }
+ if (ctx->filter->disable_env &&
+ apr_table_get(f->r->subprocess_env, ctx->filter->disable_env)) {
+ /* an environment variable that disables the filter is set; bail */
+ ctx->noop = 1;
+ }
+ if (!ctx->noop) {
+ rv = init_ext_filter_process(f);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ if (ctx->filter->outtype &&
+ ctx->filter->outtype != OUTTYPE_UNCHANGED) {
+ ap_set_content_type(f->r, ctx->filter->outtype);
+ }
+ if (ctx->filter->preserves_content_length != 1) {
+ /* nasty, but needed to avoid confusing the browser
+ */
+ apr_table_unset(f->r->headers_out, "Content-Length");
+ }
+ }
+
+ if (dc->debug >= DBGLVL_SHOWOPTIONS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
+ "%sfiltering `%s' of type `%s' through `%s', cfg %s",
+ ctx->noop ? "NOT " : "",
+ f->r->uri ? f->r->uri : f->r->filename,
+ f->r->content_type ? f->r->content_type : "(unspecified)",
+ ctx->filter->command,
+ get_cfg_string(dc, ctx->filter, f->r->pool));
+ }
+
+ return APR_SUCCESS;
+}
+
+/* drain_available_output():
+ *
+ * if any data is available from the filter, read it and pass it
+ * to the next filter
+ */
+static apr_status_t drain_available_output(ap_filter_t *f)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ ef_ctx_t *ctx = f->ctx;
+ ef_dir_t *dc = ctx->dc;
+ apr_size_t len;
+ char buf[4096];
+ apr_status_t rv;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+
+ while (1) {
+ len = sizeof(buf);
+ rv = apr_file_read(ctx->proc->out,
+ buf,
+ &len);
+ if ((rv && !APR_STATUS_IS_EAGAIN(rv)) ||
+ dc->debug >= DBGLVL_GORY) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
+ "apr_file_read(child output), len %" APR_SIZE_T_FMT,
+ !rv ? len : -1);
+ }
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = apr_bucket_transient_create(buf, len, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ if ((rv = ap_pass_brigade(f->next, bb)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "ap_pass_brigade()");
+ return rv;
+ }
+ }
+ /* we should never get here; if we do, a bogus error message would be
+ * the least of our problems
+ */
+ return APR_ANONYMOUS;
+}
+
+static apr_status_t pass_data_to_filter(ap_filter_t *f, const char *data,
+ apr_size_t len)
+{
+ ef_ctx_t *ctx = f->ctx;
+ ef_dir_t *dc = ctx->dc;
+ apr_status_t rv;
+ apr_size_t bytes_written = 0;
+ apr_size_t tmplen;
+
+ do {
+ tmplen = len - bytes_written;
+ rv = apr_file_write(ctx->proc->in,
+ (const char *)data + bytes_written,
+ &tmplen);
+ bytes_written += tmplen;
+ if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r,
+ "apr_file_write(child input), len %" APR_SIZE_T_FMT,
+ tmplen);
+ return rv;
+ }
+ if (APR_STATUS_IS_EAGAIN(rv)) {
+ /* XXX handle blocking conditions here... if we block, we need
+ * to read data from the child process and pass it down to the
+ * next filter!
+ */
+ rv = drain_available_output(f);
+ if (APR_STATUS_IS_EAGAIN(rv)) {
+#if APR_FILES_AS_SOCKETS
+ int num_events;
+
+ rv = apr_poll(ctx->pollset, 2,
+ &num_events, f->r->server->timeout);
+ if (rv || dc->debug >= DBGLVL_GORY) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG,
+ rv, f->r, "apr_poll()");
+ }
+ if (rv != APR_SUCCESS && !APR_STATUS_IS_EINTR(rv)) {
+ /* some error such as APR_TIMEUP */
+ return rv;
+ }
+#else /* APR_FILES_AS_SOCKETS */
+ /* Yuck... I'd really like to wait until I can read
+ * or write, but instead I have to sleep and try again
+ */
+ apr_sleep(100000); /* 100 milliseconds */
+ if (dc->debug >= DBGLVL_GORY) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG,
+ 0, f->r, "apr_sleep()");
+ }
+#endif /* APR_FILES_AS_SOCKETS */
+ }
+ else if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+ } while (bytes_written < len);
+ return rv;
+}
+
+static apr_status_t ef_output_filter(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ ef_ctx_t *ctx = f->ctx;
+ apr_bucket *b;
+ ef_dir_t *dc;
+ apr_size_t len;
+ const char *data;
+ apr_status_t rv;
+ char buf[4096];
+ apr_bucket *eos = NULL;
+
+ if (!ctx) {
+ if ((rv = init_filter_instance(f)) != APR_SUCCESS) {
+ return rv;
+ }
+ ctx = f->ctx;
+ }
+ if (ctx->noop) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+ dc = ctx->dc;
+
+ APR_BRIGADE_FOREACH(b, bb) {
+
+ if (APR_BUCKET_IS_EOS(b)) {
+ eos = b;
+ break;
+ }
+
+ rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "apr_bucket_read()");
+ return rv;
+ }
+
+ /* Good cast, we just tested len isn't negative */
+ if (len > 0 &&
+ (rv = pass_data_to_filter(f, data, (apr_size_t)len))
+ != APR_SUCCESS) {
+ return rv;
+ }
+ }
+
+ apr_brigade_destroy(bb);
+
+ /* XXX What we *really* need to do once we've hit eos is create a pipe bucket
+ * from the child output pipe and pass down the pipe bucket + eos.
+ */
+ if (eos) {
+ /* close the child's stdin to signal that no more data is coming;
+ * that will cause the child to finish generating output
+ */
+ if ((rv = apr_file_close(ctx->proc->in)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "apr_file_close(child input)");
+ return rv;
+ }
+ /* since we've seen eos and closed the child's stdin, set the proper pipe
+ * timeout; we don't care if we don't return from apr_file_read() for a while...
+ */
+ rv = apr_file_pipe_timeout_set(ctx->proc->out,
+ r->server->timeout);
+ if (rv) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "apr_file_pipe_timeout_set(child output)");
+ return rv;
+ }
+ }
+
+ do {
+ len = sizeof(buf);
+ rv = apr_file_read(ctx->proc->out,
+ buf,
+ &len);
+ if ((rv && !APR_STATUS_IS_EOF(rv) && !APR_STATUS_IS_EAGAIN(rv)) ||
+ dc->debug >= DBGLVL_GORY) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
+ "apr_file_read(child output), len %" APR_SIZE_T_FMT,
+ !rv ? len : -1);
+ }
+ if (APR_STATUS_IS_EAGAIN(rv)) {
+ if (eos) {
+ /* should not occur, because we have an APR timeout in place */
+ AP_DEBUG_ASSERT(1 != 1);
+ }
+ return APR_SUCCESS;
+ }
+
+ if (rv == APR_SUCCESS) {
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = apr_bucket_transient_create(buf, len, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ if ((rv = ap_pass_brigade(f->next, bb)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "ap_pass_brigade(filtered buffer) failed");
+ return rv;
+ }
+ }
+ } while (rv == APR_SUCCESS);
+
+ if (!APR_STATUS_IS_EOF(rv)) {
+ return rv;
+ }
+
+ if (eos) {
+ /* pass down eos */
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ if ((rv = ap_pass_brigade(f->next, bb)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "ap_pass_brigade(eos) failed");
+ return rv;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+#if 0
+static int ef_input_filter(ap_filter_t *f, apr_bucket_brigade *bb,
+ ap_input_mode_t mode, apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ apr_status_t rv;
+ apr_bucket *b;
+ char *buf;
+ apr_ssize_t len;
+ char *zero;
+
+ rv = ap_get_brigade(f->next, bb, mode, block, readbytes);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ APR_BRIGADE_FOREACH(b, bb) {
+ if (!APR_BUCKET_IS_EOS(b)) {
+ if ((rv = apr_bucket_read(b, (const char **)&buf, &len, APR_BLOCK_READ)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, "apr_bucket_read() failed");
+ return rv;
+ }
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL, "apr_bucket_read -> %d bytes",
+ len);
+ while ((zero = memchr(buf, '0', len))) {
+ *zero = 'a';
+ }
+ }
+ else
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL, "got eos bucket");
+ }
+
+ return rv;
+}
+#endif
+
+module AP_MODULE_DECLARE_DATA ext_filter_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_ef_dir_conf,
+ merge_ef_dir_conf,
+ create_ef_server_conf,
+ NULL,
+ cmds,
+ register_hooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_ext_filter.dsp b/rubbos/app/httpd-2.0.64/modules/filters/mod_ext_filter.dsp
new file mode 100644
index 00000000..fccf6e8b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_ext_filter.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_ext_filter" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_ext_filter - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_ext_filter.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_ext_filter.mak" CFG="mod_ext_filter - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_ext_filter - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_ext_filter - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_ext_filter - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_ext_filter_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_ext_filter.so" /base:@..\..\os\win32\BaseAddr.ref,mod_ext_filter.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_ext_filter.so" /base:@..\..\os\win32\BaseAddr.ref,mod_ext_filter.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_ext_filter - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_ext_filter_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_ext_filter.so" /base:@..\..\os\win32\BaseAddr.ref,mod_ext_filter.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_ext_filter.so" /base:@..\..\os\win32\BaseAddr.ref,mod_ext_filter.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_ext_filter - Win32 Release"
+# Name "mod_ext_filter - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_ext_filter.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_ext_filter.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_ext_filter - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_ext_filter.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_ext_filter.so "ext_filter_module for Apache" ../../include/ap_release.h > .\mod_ext_filter.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_ext_filter - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_ext_filter.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_ext_filter.so "ext_filter_module for Apache" ../../include/ap_release.h > .\mod_ext_filter.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_ext_filter.exp b/rubbos/app/httpd-2.0.64/modules/filters/mod_ext_filter.exp
new file mode 100644
index 00000000..ed3b8fc6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_ext_filter.exp
@@ -0,0 +1 @@
+ext_filter_module
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_include.c b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.c
new file mode 100644
index 00000000..38dc3213
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.c
@@ -0,0 +1,3751 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_include.c: Handles the server-parsed HTML documents
+ *
+ * Original by Rob McCool; substantial fixups by David Robinson;
+ * incorporated into the Apache module framework by rst.
+ *
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_thread_proc.h"
+#include "apr_hash.h"
+#include "apr_user.h"
+#include "apr_lib.h"
+#include "apr_optional.h"
+
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#define CORE_PRIVATE
+
+#include "ap_config.h"
+#include "util_filter.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_request.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "util_script.h"
+#include "http_core.h"
+
+#define MOD_INCLUDE_REDESIGN
+#include "mod_include.h"
+#include "util_ebcdic.h"
+
+module AP_MODULE_DECLARE_DATA include_module;
+static apr_hash_t *include_hash;
+static APR_OPTIONAL_FN_TYPE(ap_register_include_handler) *ssi_pfn_register;
+
+/*****************************************************************
+ *
+ * XBITHACK. Sigh... NB it's configurable per-directory; the compile-time
+ * option only changes the default.
+ */
+
+enum xbithack {
+ xbithack_off, xbithack_on, xbithack_full
+};
+
+struct bndm_t {
+ unsigned int T[256];
+ unsigned int x;
+} ;
+
+typedef struct {
+ char *default_error_msg;
+ char *default_time_fmt;
+ enum xbithack *xbithack;
+} include_dir_config;
+
+typedef struct {
+ char *default_start_tag;
+ char *default_end_tag;
+ int start_tag_len;
+ bndm_t start_seq_pat;
+ char *undefinedEcho;
+ int undefinedEchoLen;
+} include_server_config;
+
+/* main parser states */
+typedef enum {
+ PARSE_PRE_HEAD,
+ PARSE_HEAD,
+ PARSE_DIRECTIVE,
+ PARSE_DIRECTIVE_POSTNAME,
+ PARSE_DIRECTIVE_TAIL,
+ PARSE_DIRECTIVE_POSTTAIL,
+ PARSE_PRE_ARG,
+ PARSE_ARG,
+ PARSE_ARG_NAME,
+ PARSE_ARG_POSTNAME,
+ PARSE_ARG_EQ,
+ PARSE_ARG_PREVAL,
+ PARSE_ARG_VAL,
+ PARSE_ARG_VAL_ESC,
+ PARSE_ARG_POSTVAL,
+ PARSE_TAIL,
+ PARSE_TAIL_SEQ,
+ PARSE_EXECUTE
+} parse_state_t;
+
+typedef struct ssi_arg_item {
+ struct ssi_arg_item *next;
+ char *name;
+ apr_size_t name_len;
+ char *value;
+ apr_size_t value_len;
+} ssi_arg_item_t;
+
+typedef struct {
+ parse_state_t state;
+ int seen_eos;
+ int error;
+ char quote; /* quote character value (or \0) */
+
+ apr_bucket_brigade *tmp_bb;
+
+ apr_size_t end_seq_len;
+ char *directive; /* name of the current directive */
+
+ unsigned argc; /* argument counter (of the current
+ * directive)
+ */
+ ssi_arg_item_t *argv; /* all arguments */
+ ssi_arg_item_t *current_arg; /* currently parsed argument */
+ request_rec *r;
+ include_ctx_t *ctx; /* public part of the context structure */
+
+ apr_pool_t *dpool;
+} ssi_ctx_t;
+
+#ifdef XBITHACK
+#define DEFAULT_XBITHACK xbithack_full
+#else
+#define DEFAULT_XBITHACK xbithack_off
+#endif
+
+#define BYTE_COUNT_THRESHOLD AP_MIN_BYTES_TO_WRITE
+
+#define SSI_CREATE_ERROR_BUCKET(ctx, f, bb) APR_BRIGADE_INSERT_TAIL((bb), \
+ apr_bucket_pool_create(apr_pstrdup((ctx)->pool, (ctx)->error_str), \
+ strlen((ctx)->error_str), (ctx)->pool, \
+ (f)->c->bucket_alloc))
+
+/* ------------------------ Environment function -------------------------- */
+
+/* Sentinel value to store in subprocess_env for items that
+ * shouldn't be evaluated until/unless they're actually used
+ */
+static const char lazy_eval_sentinel;
+#define LAZY_VALUE (&lazy_eval_sentinel)
+
+static void add_include_vars(request_rec *r, char *timefmt)
+{
+ apr_table_t *e = r->subprocess_env;
+ char *t;
+
+ apr_table_setn(e, "DATE_LOCAL", LAZY_VALUE);
+ apr_table_setn(e, "DATE_GMT", LAZY_VALUE);
+ apr_table_setn(e, "LAST_MODIFIED", LAZY_VALUE);
+ apr_table_setn(e, "DOCUMENT_URI", r->uri);
+ if (r->path_info && *r->path_info) {
+ apr_table_setn(e, "DOCUMENT_PATH_INFO", r->path_info);
+ }
+ apr_table_setn(e, "USER_NAME", LAZY_VALUE);
+ if (r->filename && (t = strrchr(r->filename, '/'))) {
+ apr_table_setn(e, "DOCUMENT_NAME", ++t);
+ }
+ else {
+ apr_table_setn(e, "DOCUMENT_NAME", r->uri);
+ }
+ if (r->args) {
+ char *arg_copy = apr_pstrdup(r->pool, r->args);
+
+ ap_unescape_url(arg_copy);
+ apr_table_setn(e, "QUERY_STRING_UNESCAPED",
+ ap_escape_shell_cmd(r->pool, arg_copy));
+ }
+}
+
+static const char *add_include_vars_lazy(request_rec *r, const char *var)
+{
+ char *val;
+ if (!strcasecmp(var, "DATE_LOCAL")) {
+ include_dir_config *conf =
+ (include_dir_config *)ap_get_module_config(r->per_dir_config,
+ &include_module);
+ val = ap_ht_time(r->pool, r->request_time, conf->default_time_fmt, 0);
+ }
+ else if (!strcasecmp(var, "DATE_GMT")) {
+ include_dir_config *conf =
+ (include_dir_config *)ap_get_module_config(r->per_dir_config,
+ &include_module);
+ val = ap_ht_time(r->pool, r->request_time, conf->default_time_fmt, 1);
+ }
+ else if (!strcasecmp(var, "LAST_MODIFIED")) {
+ include_dir_config *conf =
+ (include_dir_config *)ap_get_module_config(r->per_dir_config,
+ &include_module);
+ val = ap_ht_time(r->pool, r->finfo.mtime, conf->default_time_fmt, 0);
+ }
+ else if (!strcasecmp(var, "USER_NAME")) {
+ if (apr_get_username(&val, r->finfo.user, r->pool) != APR_SUCCESS) {
+ val = "<unknown>";
+ }
+ }
+ else {
+ val = NULL;
+ }
+
+ if (val) {
+ apr_table_setn(r->subprocess_env, var, val);
+ }
+ return val;
+}
+
+static const char *get_include_var(request_rec *r, include_ctx_t *ctx,
+ const char *var)
+{
+ const char *val;
+ if (apr_isdigit(*var) && !var[1]) {
+ /* Handle $0 .. $9 from the last regex evaluated.
+ * The choice of returning NULL strings on not-found,
+ * v.s. empty strings on an empty match is deliberate.
+ */
+ if (!ctx->re_result || !ctx->re_string) {
+ return NULL;
+ }
+ else {
+ int idx = atoi(var);
+ apr_size_t len = (*ctx->re_result)[idx].rm_eo
+ - (*ctx->re_result)[idx].rm_so;
+ if ( (*ctx->re_result)[idx].rm_so < 0
+ || (*ctx->re_result)[idx].rm_eo < 0) {
+ return NULL;
+ }
+ val = apr_pstrmemdup(r->pool, ctx->re_string
+ + (*ctx->re_result)[idx].rm_so, len);
+ }
+ }
+ else {
+ val = apr_table_get(r->subprocess_env, var);
+
+ if (val == LAZY_VALUE)
+ val = add_include_vars_lazy(r, var);
+ }
+ return val;
+}
+
+/* --------------------------- Parser functions --------------------------- */
+
+/* This is an implementation of the BNDM search algorithm.
+ *
+ * Fast and Flexible String Matching by Combining Bit-parallelism and
+ * Suffix Automata (2001)
+ * Gonzalo Navarro, Mathieu Raffinot
+ *
+ * http://www-igm.univ-mlv.fr/~raffinot/ftp/jea2001.ps.gz
+ *
+ * Initial code submitted by Sascha Schumann.
+ */
+
+/* Precompile the bndm_t data structure. */
+static void bndm_compile(bndm_t *t, const char *n, apr_size_t nl)
+{
+ unsigned int x;
+ const char *ne = n + nl;
+
+ memset(t->T, 0, sizeof(unsigned int) * 256);
+
+ for (x = 1; n < ne; x <<= 1)
+ t->T[(unsigned char) *n++] |= x;
+
+ t->x = x - 1;
+}
+
+/* Implements the BNDM search algorithm (as described above).
+ *
+ * n - the pattern to search for
+ * nl - length of the pattern to search for
+ * h - the string to look in
+ * hl - length of the string to look for
+ * t - precompiled bndm structure against the pattern
+ *
+ * Returns the count of character that is the first match or hl if no
+ * match is found.
+ */
+static apr_size_t bndm(const char *n, apr_size_t nl, const char *h,
+ apr_size_t hl, bndm_t *t)
+{
+ const char *skip;
+ const char *he, *p, *pi;
+ unsigned int *T, x, d;
+
+ he = h + hl;
+
+ T = t->T;
+ x = t->x;
+
+ pi = h - 1; /* pi: p initial */
+ p = pi + nl; /* compare window right to left. point to the first char */
+
+ while (p < he) {
+ skip = p;
+ d = x;
+ do {
+ d &= T[(unsigned char) *p--];
+ if (!d) {
+ break;
+ }
+ if ((d & 1)) {
+ if (p != pi)
+ skip = p;
+ else
+ return p - h + 1;
+ }
+ d >>= 1;
+ } while (d);
+
+ pi = skip;
+ p = pi + nl;
+ }
+
+ return hl;
+}
+
+/*
+ * decodes a string containing html entities or numeric character references.
+ * 's' is overwritten with the decoded string.
+ * If 's' is syntatically incorrect, then the followed fixups will be made:
+ * unknown entities will be left undecoded;
+ * references to unused numeric characters will be deleted.
+ * In particular, &#00; will not be decoded, but will be deleted.
+ *
+ * drtr
+ */
+
+/* maximum length of any ISO-LATIN-1 HTML entity name. */
+#define MAXENTLEN (6)
+
+/* The following is a shrinking transformation, therefore safe. */
+
+static void decodehtml(char *s)
+{
+ int val, i, j;
+ char *p;
+ const char *ents;
+ static const char * const entlist[MAXENTLEN + 1] =
+ {
+ NULL, /* 0 */
+ NULL, /* 1 */
+ "lt\074gt\076", /* 2 */
+ "amp\046ETH\320eth\360", /* 3 */
+ "quot\042Auml\304Euml\313Iuml\317Ouml\326Uuml\334auml\344euml\353\
+iuml\357ouml\366uuml\374yuml\377", /* 4 */
+ "Acirc\302Aring\305AElig\306Ecirc\312Icirc\316Ocirc\324Ucirc\333\
+THORN\336szlig\337acirc\342aring\345aelig\346ecirc\352icirc\356ocirc\364\
+ucirc\373thorn\376", /* 5 */
+ "Agrave\300Aacute\301Atilde\303Ccedil\307Egrave\310Eacute\311\
+Igrave\314Iacute\315Ntilde\321Ograve\322Oacute\323Otilde\325Oslash\330\
+Ugrave\331Uacute\332Yacute\335agrave\340aacute\341atilde\343ccedil\347\
+egrave\350eacute\351igrave\354iacute\355ntilde\361ograve\362oacute\363\
+otilde\365oslash\370ugrave\371uacute\372yacute\375" /* 6 */
+ };
+
+ /* Do a fast scan through the string until we find anything
+ * that needs more complicated handling
+ */
+ for (; *s != '&'; s++) {
+ if (*s == '\0') {
+ return;
+ }
+ }
+
+ for (p = s; *s != '\0'; s++, p++) {
+ if (*s != '&') {
+ *p = *s;
+ continue;
+ }
+ /* find end of entity */
+ for (i = 1; s[i] != ';' && s[i] != '\0'; i++) {
+ continue;
+ }
+
+ if (s[i] == '\0') { /* treat as normal data */
+ *p = *s;
+ continue;
+ }
+
+ /* is it numeric ? */
+ if (s[1] == '#') {
+ for (j = 2, val = 0; j < i && apr_isdigit(s[j]); j++) {
+ val = val * 10 + s[j] - '0';
+ }
+ s += i;
+ if (j < i || val <= 8 || (val >= 11 && val <= 31) ||
+ (val >= 127 && val <= 160) || val >= 256) {
+ p--; /* no data to output */
+ }
+ else {
+ *p = RAW_ASCII_CHAR(val);
+ }
+ }
+ else {
+ j = i - 1;
+ if (j > MAXENTLEN || entlist[j] == NULL) {
+ /* wrong length */
+ *p = '&';
+ continue; /* skip it */
+ }
+ for (ents = entlist[j]; *ents != '\0'; ents += i) {
+ if (strncmp(s + 1, ents, j) == 0) {
+ break;
+ }
+ }
+
+ if (*ents == '\0') {
+ *p = '&'; /* unknown */
+ }
+ else {
+ *p = RAW_ASCII_CHAR(((const unsigned char *) ents)[j]);
+ s += i;
+ }
+ }
+ }
+
+ *p = '\0';
+}
+
+/*
+ * Extract the next tag name and value.
+ * If there are no more tags, set the tag name to NULL.
+ * The tag value is html decoded if dodecode is non-zero.
+ * The tag value may be NULL if there is no tag value..
+ * format:
+ * [WS]<Tag>[WS]=[WS]['|"|`]<Value>[['|"|`|]|WS]
+ */
+
+#define SKIP_TAG_WHITESPACE(ptr) while ((*ptr != '\0') && (apr_isspace (*ptr))) ptr++
+
+static void ap_ssi_get_tag_and_value(include_ctx_t *ctx, char **tag,
+ char **tag_val, int dodecode)
+{
+ *tag_val = NULL;
+ if (ctx->curr_tag_pos >= ctx->combined_tag + ctx->tag_length) {
+ *tag = NULL;
+ return;
+ }
+
+ *tag = ctx->curr_tag_pos;
+ if (!**tag) {
+ *tag = NULL;
+ /* finitio */
+ ctx->curr_tag_pos = ctx->combined_tag + ctx->tag_length;
+ return;
+ }
+
+ *tag_val = ap_strchr(*tag, '=');
+ if (!*tag_val) {
+ ctx->curr_tag_pos = ctx->combined_tag + ctx->tag_length;
+ return;
+ }
+
+ /* if it starts with '=' there was no tag name, just a value */
+ if (*tag_val == *tag) {
+ *tag = NULL;
+ }
+
+ *(*tag_val)++ = '\0';
+ ctx->curr_tag_pos = *tag_val + strlen(*tag_val) + 1; /* skip \0 byte */
+
+ if (dodecode) {
+ decodehtml(*tag_val);
+ }
+
+ return;
+}
+
+/* initial buffer size for power-of-two allocator in ap_ssi_parse_string */
+#define PARSE_STRING_INITIAL_SIZE 64
+
+/*
+ * Do variable substitution on strings
+ * (Note: If out==NULL, this function allocs a buffer for the resulting
+ * string from r->pool. The return value is the parsed string)
+ */
+static char *ap_ssi_parse_string(request_rec *r, include_ctx_t *ctx,
+ const char *in, char *out,
+ apr_size_t length, int leave_name)
+{
+ char ch;
+ char *next;
+ char *end_out;
+ apr_size_t out_size;
+
+ /* allocate an output buffer if needed */
+ if (!out) {
+ out_size = PARSE_STRING_INITIAL_SIZE;
+ if (out_size > length) {
+ out_size = length;
+ }
+ out = apr_palloc(r->pool, out_size);
+ }
+ else {
+ out_size = length;
+ }
+
+ /* leave room for nul terminator */
+ end_out = out + out_size - 1;
+
+ next = out;
+ while ((ch = *in++) != '\0') {
+ switch (ch) {
+ case '\\':
+ if (next == end_out) {
+ if (out_size < length) {
+ /* double the buffer size */
+ apr_size_t new_out_size = out_size * 2;
+ apr_size_t current_length = next - out;
+ char *new_out;
+ if (new_out_size > length) {
+ new_out_size = length;
+ }
+ new_out = apr_palloc(r->pool, new_out_size);
+ memcpy(new_out, out, current_length);
+ out = new_out;
+ out_size = new_out_size;
+ end_out = out + out_size - 1;
+ next = out + current_length;
+ }
+ else {
+ /* truncated */
+ *next = '\0';
+ return out;
+ }
+ }
+ if (*in == '$') {
+ *next++ = *in++;
+ }
+ else {
+ *next++ = ch;
+ }
+ break;
+ case '$':
+ {
+ const char *start_of_var_name;
+ char *end_of_var_name; /* end of var name + 1 */
+ const char *expansion, *temp_end, *val;
+ char tmp_store;
+ apr_size_t l;
+
+ /* guess that the expansion won't happen */
+ expansion = in - 1;
+ if (*in == '{') {
+ ++in;
+ start_of_var_name = in;
+ in = ap_strchr_c(in, '}');
+ if (in == NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR,
+ 0, r, "Missing '}' on variable \"%s\"",
+ expansion);
+ *next = '\0';
+ return out;
+ }
+ temp_end = in;
+ end_of_var_name = (char *)temp_end;
+ ++in;
+ }
+ else {
+ start_of_var_name = in;
+ while (apr_isalnum(*in) || *in == '_') {
+ ++in;
+ }
+ temp_end = in;
+ end_of_var_name = (char *)temp_end;
+ }
+ /* what a pain, too bad there's no table_getn where you can
+ * pass a non-nul terminated string */
+ l = end_of_var_name - start_of_var_name;
+ if (l != 0) {
+ tmp_store = *end_of_var_name;
+ *end_of_var_name = '\0';
+ val = get_include_var(r, ctx, start_of_var_name);
+ *end_of_var_name = tmp_store;
+
+ if (val) {
+ expansion = val;
+ l = strlen(expansion);
+ }
+ else if (leave_name) {
+ l = in - expansion;
+ }
+ else {
+ /* no expansion to be done */
+ break;
+ }
+ }
+ else {
+ /* zero-length variable name causes just the $ to be
+ * copied */
+ l = 1;
+ }
+ if ((next + l > end_out) && (out_size < length)) {
+ /* increase the buffer size to accommodate l more chars */
+ apr_size_t new_out_size = out_size;
+ apr_size_t current_length = next - out;
+ char *new_out;
+ do {
+ new_out_size *= 2;
+ } while (new_out_size < current_length + l + 1); /* +1 for NUL */
+ if (new_out_size > length) {
+ new_out_size = length;
+ }
+ new_out = apr_palloc(r->pool, new_out_size);
+ memcpy(new_out, out, current_length);
+ out = new_out;
+ out_size = new_out_size;
+ end_out = out + out_size - 1;
+ next = out + current_length;
+ }
+ l = ((int)l > end_out - next) ? (end_out - next) : l;
+ memcpy(next, expansion, l);
+ next += l;
+ break;
+ }
+ default:
+ if (next == end_out) {
+ if (out_size < length) {
+ /* double the buffer size */
+ apr_size_t new_out_size = out_size * 2;
+ apr_size_t current_length = next - out;
+ char *new_out;
+ if (new_out_size > length) {
+ new_out_size = length;
+ }
+ new_out = apr_palloc(r->pool, new_out_size);
+ memcpy(new_out, out, current_length);
+ out = new_out;
+ out_size = new_out_size;
+ end_out = out + out_size - 1;
+ next = out + current_length;
+ }
+ else {
+ /* truncated */
+ *next = '\0';
+ return out;
+ }
+ }
+ *next++ = ch;
+ break;
+ }
+ }
+ *next = '\0';
+ return out;
+}
+
+/* --------------------------- Action handlers ---------------------------- */
+
+/* ensure that path is relative, and does not contain ".." elements
+ * ensentially ensure that it does not match the regex:
+ * (^/|(^|/)\.\.(/|$))
+ * XXX: Simply replace with apr_filepath_merge
+ */
+static int is_only_below(const char *path)
+{
+#ifdef HAVE_DRIVE_LETTERS
+ if (path[1] == ':')
+ return 0;
+#endif
+#ifdef NETWARE
+ if (ap_strchr_c(path, ':'))
+ return 0;
+#endif
+ if (path[0] == '/') {
+ return 0;
+ }
+ while (*path) {
+ int dots = 0;
+ while (path[dots] == '.')
+ ++dots;
+#if defined(WIN32)
+ /* If the name is canonical this is redundant
+ * but in security, redundancy is worthwhile.
+ * Does OS2 belong here (accepts ... for ..)?
+ */
+ if (dots > 1 && (!path[dots] || path[dots] == '/'))
+ return 0;
+#else
+ if (dots == 2 && (!path[dots] || path[dots] == '/'))
+ return 0;
+#endif
+ path += dots;
+ /* Advance to either the null byte at the end of the
+ * string or the character right after the next slash,
+ * whichever comes first
+ */
+ while (*path && (*path++ != '/')) {
+ continue;
+ }
+ }
+ return 1;
+}
+
+static int handle_include(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f, apr_bucket *head_ptr,
+ apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ apr_bucket *tmp_buck;
+ char *parsed_string;
+ int loglevel = APLOG_ERR;
+
+ *inserted_head = NULL;
+ if (ctx->flags & FLAG_PRINTING) {
+ while (1) {
+ ap_ssi_get_tag_and_value(ctx, &tag, &tag_val, 1);
+ if (tag_val == NULL) {
+ if (tag == NULL) {
+ return (0);
+ }
+ else {
+ return (1);
+ }
+ }
+ if (!strcmp(tag, "virtual") || !strcmp(tag, "file")) {
+ request_rec *rr = NULL;
+ char *error_fmt = NULL;
+ apr_status_t rc = APR_SUCCESS;
+
+ SPLIT_AND_PASS_PRETAG_BUCKETS(*bb, ctx, f->next, rc);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+
+ parsed_string = ap_ssi_parse_string(r, ctx, tag_val, NULL,
+ MAX_STRING_LEN, 0);
+ if (tag[0] == 'f') {
+ /* XXX: Port to apr_filepath_merge
+ * be safe; only files in this directory or below allowed
+ */
+ if (!is_only_below(parsed_string)) {
+ error_fmt = "unable to include file \"%s\" "
+ "in parsed file %s";
+ }
+ else {
+ rr = ap_sub_req_lookup_uri(parsed_string, r, f->next);
+ }
+ }
+ else {
+ rr = ap_sub_req_lookup_uri(parsed_string, r, f->next);
+ }
+
+ if (!error_fmt && rr->status != HTTP_OK) {
+ error_fmt = "unable to include \"%s\" in parsed file %s";
+ }
+
+ if (!error_fmt && (ctx->flags & FLAG_NO_EXEC) &&
+ rr->content_type &&
+ (strncmp(rr->content_type, "text/", 5))) {
+ error_fmt = "unable to include potential exec \"%s\" "
+ "in parsed file %s";
+ }
+
+ /* See the Kludge in send_parsed_file for why */
+ /* Basically, it puts a bread crumb in here, then looks */
+ /* for the crumb later to see if its been here. */
+ if (rr)
+ ap_set_module_config(rr->request_config,
+ &include_module, r);
+
+ if (!error_fmt && ap_run_sub_req(rr)) {
+ error_fmt = "unable to include \"%s\" in parsed file %s";
+ }
+ if (error_fmt) {
+ ap_log_rerror(APLOG_MARK, loglevel,
+ 0, r, error_fmt, tag_val, r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr,
+ *inserted_head);
+ }
+
+ /* Do *not* destroy the subrequest here; it may have allocated
+ * variables in this r->subprocess_env in the subrequest's
+ * r->pool, so that pool must survive as long as this request.
+ * Yes, this is a memory leak. */
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unknown parameter \"%s\" to tag include in %s",
+ tag, r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+
+static int handle_echo(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f, apr_bucket *head_ptr,
+ apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ const char *echo_text = NULL;
+ apr_bucket *tmp_buck;
+ apr_size_t e_len;
+ enum {E_NONE, E_URL, E_ENTITY} encode;
+
+ encode = E_ENTITY;
+
+ *inserted_head = NULL;
+ if (ctx->flags & FLAG_PRINTING) {
+ while (1) {
+ ap_ssi_get_tag_and_value(ctx, &tag, &tag_val, 1);
+ if (tag_val == NULL) {
+ if (tag != NULL) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+ }
+ if (!strcmp(tag, "var")) {
+ conn_rec *c = r->connection;
+ const char *val =
+ get_include_var(r, ctx,
+ ap_ssi_parse_string(r, ctx, tag_val, NULL,
+ MAX_STRING_LEN, 0));
+ if (val) {
+ switch(encode) {
+ case E_NONE:
+ echo_text = val;
+ break;
+ case E_URL:
+ echo_text = ap_escape_uri(r->pool, val);
+ break;
+ case E_ENTITY:
+ echo_text = ap_escape_html(r->pool, val);
+ break;
+ }
+
+ e_len = strlen(echo_text);
+ tmp_buck = apr_bucket_pool_create(echo_text, e_len,
+ r->pool, c->bucket_alloc);
+ }
+ else {
+ include_server_config *sconf=
+ ap_get_module_config(r->server->module_config,
+ &include_module);
+ tmp_buck = apr_bucket_pool_create(sconf->undefinedEcho,
+ sconf->undefinedEchoLen,
+ r->pool, c->bucket_alloc);
+ }
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp_buck);
+ if (*inserted_head == NULL) {
+ *inserted_head = tmp_buck;
+ }
+ }
+ else if (!strcmp(tag, "encoding")) {
+ if (!strcasecmp(tag_val, "none")) encode = E_NONE;
+ else if (!strcasecmp(tag_val, "url")) encode = E_URL;
+ else if (!strcasecmp(tag_val, "entity")) encode = E_ENTITY;
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unknown value \"%s\" to parameter \"encoding\" of "
+ "tag echo in %s", tag_val, r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr,
+ *inserted_head);
+ return 1;
+ }
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unknown parameter \"%s\" in tag echo of %s",
+ tag, r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ return 1;
+ }
+
+ }
+ }
+ return 0;
+}
+
+/* error and tf must point to a string with room for at
+ * least MAX_STRING_LEN characters
+ */
+static int handle_config(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f, apr_bucket *head_ptr,
+ apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ char *parsed_string;
+ apr_table_t *env = r->subprocess_env;
+
+ *inserted_head = NULL;
+ if (ctx->flags & FLAG_PRINTING) {
+ while (1) {
+ ap_ssi_get_tag_and_value(ctx, &tag, &tag_val, 0);
+ if (tag_val == NULL) {
+ if (tag == NULL) {
+ return 0; /* Reached the end of the string. */
+ }
+ else {
+ return 1; /* tags must have values. */
+ }
+ }
+ if (!strcmp(tag, "errmsg")) {
+ if (ctx->error_str_override == NULL) {
+ ctx->error_str_override = (char *)apr_palloc(ctx->pool,
+ MAX_STRING_LEN);
+ ctx->error_str = ctx->error_str_override;
+ }
+ ap_ssi_parse_string(r, ctx, tag_val, ctx->error_str_override,
+ MAX_STRING_LEN, 0);
+ }
+ else if (!strcmp(tag, "timefmt")) {
+ apr_time_t date = r->request_time;
+ if (ctx->time_str_override == NULL) {
+ ctx->time_str_override = (char *)apr_palloc(ctx->pool,
+ MAX_STRING_LEN);
+ ctx->time_str = ctx->time_str_override;
+ }
+ ap_ssi_parse_string(r, ctx, tag_val, ctx->time_str_override,
+ MAX_STRING_LEN, 0);
+ apr_table_setn(env, "DATE_LOCAL", ap_ht_time(r->pool, date,
+ ctx->time_str, 0));
+ apr_table_setn(env, "DATE_GMT", ap_ht_time(r->pool, date,
+ ctx->time_str, 1));
+ apr_table_setn(env, "LAST_MODIFIED",
+ ap_ht_time(r->pool, r->finfo.mtime,
+ ctx->time_str, 0));
+ }
+ else if (!strcmp(tag, "sizefmt")) {
+ parsed_string = ap_ssi_parse_string(r, ctx, tag_val, NULL,
+ MAX_STRING_LEN, 0);
+ decodehtml(parsed_string);
+ if (!strcmp(parsed_string, "bytes")) {
+ ctx->flags |= FLAG_SIZE_IN_BYTES;
+ }
+ else if (!strcmp(parsed_string, "abbrev")) {
+ ctx->flags &= FLAG_SIZE_ABBREV;
+ }
+ }
+ else {
+ apr_bucket *tmp_buck;
+
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unknown parameter \"%s\" to tag config in %s",
+ tag, r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+
+static int find_file(request_rec *r, const char *directive, const char *tag,
+ char *tag_val, apr_finfo_t *finfo)
+{
+ char *to_send = tag_val;
+ request_rec *rr = NULL;
+ int ret=0;
+ char *error_fmt = NULL;
+ apr_status_t rv = APR_SUCCESS;
+
+ if (!strcmp(tag, "file")) {
+ /* XXX: Port to apr_filepath_merge
+ * be safe; only files in this directory or below allowed
+ */
+ if (!is_only_below(tag_val)) {
+ error_fmt = "unable to access file \"%s\" "
+ "in parsed file %s";
+ }
+ else {
+ ap_getparents(tag_val); /* get rid of any nasties */
+
+ /* note: it is okay to pass NULL for the "next filter" since
+ we never attempt to "run" this sub request. */
+ rr = ap_sub_req_lookup_file(tag_val, r, NULL);
+
+ if (rr->status == HTTP_OK && rr->finfo.filetype != 0) {
+ to_send = rr->filename;
+ if ((rv = apr_stat(finfo, to_send,
+ APR_FINFO_GPROT | APR_FINFO_MIN, rr->pool)) != APR_SUCCESS
+ && rv != APR_INCOMPLETE) {
+ error_fmt = "unable to get information about \"%s\" "
+ "in parsed file %s";
+ }
+ }
+ else {
+ error_fmt = "unable to lookup information about \"%s\" "
+ "in parsed file %s";
+ }
+ }
+
+ if (error_fmt) {
+ ret = -1;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR,
+ rv, r, error_fmt, to_send, r->filename);
+ }
+
+ if (rr) ap_destroy_sub_req(rr);
+
+ return ret;
+ }
+ else if (!strcmp(tag, "virtual")) {
+ /* note: it is okay to pass NULL for the "next filter" since
+ we never attempt to "run" this sub request. */
+ rr = ap_sub_req_lookup_uri(tag_val, r, NULL);
+
+ if (rr->status == HTTP_OK && rr->finfo.filetype != 0) {
+ memcpy((char *) finfo, (const char *) &rr->finfo,
+ sizeof(rr->finfo));
+ ap_destroy_sub_req(rr);
+ return 0;
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unable to get information about \"%s\" "
+ "in parsed file %s",
+ tag_val, r->filename);
+ ap_destroy_sub_req(rr);
+ return -1;
+ }
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unknown parameter \"%s\" to tag %s in %s",
+ tag, directive, r->filename);
+ return -1;
+ }
+}
+
+static int handle_fsize(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f, apr_bucket *head_ptr,
+ apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ apr_finfo_t finfo;
+ apr_size_t s_len;
+ apr_bucket *tmp_buck;
+ char *parsed_string;
+
+ *inserted_head = NULL;
+ if (ctx->flags & FLAG_PRINTING) {
+ while (1) {
+ ap_ssi_get_tag_and_value(ctx, &tag, &tag_val, 1);
+ if (tag_val == NULL) {
+ if (tag == NULL) {
+ return 0;
+ }
+ else {
+ return 1;
+ }
+ }
+ else {
+ parsed_string = ap_ssi_parse_string(r, ctx, tag_val, NULL,
+ MAX_STRING_LEN, 0);
+ if (!find_file(r, "fsize", tag, parsed_string, &finfo)) {
+ /* XXX: if we *know* we're going to have to copy the
+ * thing off of the stack anyway, why not palloc buff
+ * instead of sticking it on the stack; then we can just
+ * use a pool bucket and skip the copy
+ */
+ char buff[50];
+
+ if (!(ctx->flags & FLAG_SIZE_IN_BYTES)) {
+ apr_strfsize(finfo.size, buff);
+ s_len = strlen (buff);
+ }
+ else {
+ int l, x, pos = 0;
+ char tmp_buff[50];
+
+ apr_snprintf(tmp_buff, sizeof(tmp_buff),
+ "%" APR_OFF_T_FMT, finfo.size);
+ l = strlen(tmp_buff); /* grrr */
+ for (x = 0; x < l; x++) {
+ if (x && (!((l - x) % 3))) {
+ buff[pos++] = ',';
+ }
+ buff[pos++] = tmp_buff[x];
+ }
+ buff[pos] = '\0';
+ s_len = pos;
+ }
+
+ tmp_buck = apr_bucket_heap_create(buff, s_len, NULL,
+ r->connection->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp_buck);
+ if (*inserted_head == NULL) {
+ *inserted_head = tmp_buck;
+ }
+ }
+ else {
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr,
+ *inserted_head);
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int handle_flastmod(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f,
+ apr_bucket *head_ptr, apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ apr_finfo_t finfo;
+ apr_size_t t_len;
+ apr_bucket *tmp_buck;
+ char *parsed_string;
+
+ *inserted_head = NULL;
+ if (ctx->flags & FLAG_PRINTING) {
+ while (1) {
+ ap_ssi_get_tag_and_value(ctx, &tag, &tag_val, 1);
+ if (tag_val == NULL) {
+ if (tag == NULL) {
+ return 0;
+ }
+ else {
+ return 1;
+ }
+ }
+ else {
+ parsed_string = ap_ssi_parse_string(r, ctx, tag_val, NULL,
+ MAX_STRING_LEN, 0);
+ if (!find_file(r, "flastmod", tag, parsed_string, &finfo)) {
+ char *t_val;
+
+ t_val = ap_ht_time(r->pool, finfo.mtime, ctx->time_str, 0);
+ t_len = strlen(t_val);
+
+ tmp_buck = apr_bucket_pool_create(t_val, t_len, r->pool,
+ r->connection->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp_buck);
+ if (*inserted_head == NULL) {
+ *inserted_head = tmp_buck;
+ }
+ }
+ else {
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr,
+ *inserted_head);
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int re_check(request_rec *r, include_ctx_t *ctx,
+ char *string, char *rexp)
+{
+ regex_t *compiled;
+ const apr_size_t nres = sizeof(*ctx->re_result) / sizeof(regmatch_t);
+ int regex_error;
+
+ compiled = ap_pregcomp(r->pool, rexp, REG_EXTENDED | REG_NOSUB);
+ if (compiled == NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unable to compile pattern \"%s\"", rexp);
+ return -1;
+ }
+ if (!ctx->re_result) {
+ ctx->re_result = apr_pcalloc(r->pool, sizeof(*ctx->re_result));
+ }
+ ctx->re_string = string;
+ regex_error = ap_regexec(compiled, string, nres, *ctx->re_result, 0);
+ ap_pregfree(r->pool, compiled);
+ return (!regex_error);
+}
+
+enum token_type {
+ token_string, token_re,
+ token_and, token_or, token_not, token_eq, token_ne,
+ token_rbrace, token_lbrace, token_group,
+ token_ge, token_le, token_gt, token_lt
+};
+struct token {
+ enum token_type type;
+ char* value;
+};
+
+static const char *get_ptoken(request_rec *r, const char *string,
+ struct token *token, int *unmatched)
+{
+ char ch;
+ int next = 0;
+ char qs = 0;
+ int tkn_fnd = 0;
+
+ token->value = NULL;
+
+ /* Skip leading white space */
+ if (string == (char *) NULL) {
+ return (char *) NULL;
+ }
+ while ((ch = *string++)) {
+ if (!apr_isspace(ch)) {
+ break;
+ }
+ }
+ if (ch == '\0') {
+ return (char *) NULL;
+ }
+
+ token->type = token_string; /* the default type */
+ switch (ch) {
+ case '(':
+ token->type = token_lbrace;
+ return (string);
+ case ')':
+ token->type = token_rbrace;
+ return (string);
+ case '=':
+ token->type = token_eq;
+ return (string);
+ case '!':
+ if (*string == '=') {
+ token->type = token_ne;
+ return (string + 1);
+ }
+ else {
+ token->type = token_not;
+ return (string);
+ }
+ case '\'':
+ /* already token->type == token_string */
+ qs = '\'';
+ break;
+ case '/':
+ token->type = token_re;
+ qs = '/';
+ break;
+ case '|':
+ if (*string == '|') {
+ token->type = token_or;
+ return (string + 1);
+ }
+ break;
+ case '&':
+ if (*string == '&') {
+ token->type = token_and;
+ return (string + 1);
+ }
+ break;
+ case '>':
+ if (*string == '=') {
+ token->type = token_ge;
+ return (string + 1);
+ }
+ else {
+ token->type = token_gt;
+ return (string);
+ }
+ case '<':
+ if (*string == '=') {
+ token->type = token_le;
+ return (string + 1);
+ }
+ else {
+ token->type = token_lt;
+ return (string);
+ }
+ default:
+ /* already token->type == token_string */
+ break;
+ }
+ /* We should only be here if we are in a string */
+ token->value = apr_palloc(r->pool, strlen(string) + 2); /* 2 for ch plus
+ trailing null */
+ if (!qs) {
+ --string;
+ }
+
+ /*
+ * I used the ++string throughout this section so that string
+ * ends up pointing to the next token and I can just return it
+ */
+ for (ch = *string; ((ch != '\0') && (!tkn_fnd)); ch = *++string) {
+ if (ch == '\\') {
+ if ((ch = *++string) == '\0') {
+ tkn_fnd = 1;
+ }
+ else {
+ token->value[next++] = ch;
+ }
+ }
+ else {
+ if (!qs) {
+ if (apr_isspace(ch)) {
+ tkn_fnd = 1;
+ }
+ else {
+ switch (ch) {
+ case '(':
+ case ')':
+ case '=':
+ case '!':
+ case '<':
+ case '>':
+ tkn_fnd = 1;
+ break;
+ case '|':
+ if (*(string + 1) == '|') {
+ tkn_fnd = 1;
+ }
+ break;
+ case '&':
+ if (*(string + 1) == '&') {
+ tkn_fnd = 1;
+ }
+ break;
+ }
+ if (!tkn_fnd) {
+ token->value[next++] = ch;
+ }
+ }
+ }
+ else {
+ if (ch == qs) {
+ qs = 0;
+ tkn_fnd = 1;
+ string++;
+ }
+ else {
+ token->value[next++] = ch;
+ }
+ }
+ }
+ if (tkn_fnd) {
+ break;
+ }
+ }
+
+ /* If qs is still set, we have an unmatched quote */
+ if (qs) {
+ *unmatched = 1;
+ next = 0;
+ }
+ token->value[next] = '\0';
+
+ return (string);
+}
+
+
+/* there is an implicit assumption here that expr is at most MAX_STRING_LEN-1
+ * characters long...
+ */
+static int parse_expr(request_rec *r, include_ctx_t *ctx, const char *expr,
+ int *was_error, int *was_unmatched, char *debug)
+{
+ struct parse_node {
+ struct parse_node *left, *right, *parent;
+ struct token token;
+ int value, done;
+ } *root, *current, *new;
+ const char *parse;
+ char* buffer;
+ int retval = 0;
+ apr_size_t debug_pos = 0;
+
+ debug[debug_pos] = '\0';
+ *was_error = 0;
+ *was_unmatched = 0;
+ if ((parse = expr) == (char *) NULL) {
+ return (0);
+ }
+ root = current = (struct parse_node *) NULL;
+
+ /* Create Parse Tree */
+ while (1) {
+ new = (struct parse_node *) apr_palloc(r->pool,
+ sizeof(struct parse_node));
+ new->parent = new->left = new->right = (struct parse_node *) NULL;
+ new->done = 0;
+ if ((parse = get_ptoken(r, parse, &new->token, was_unmatched)) ==
+ (char *) NULL) {
+ break;
+ }
+ switch (new->token.type) {
+
+ case token_string:
+#ifdef DEBUG_INCLUDE
+ debug_pos += sprintf (&debug[debug_pos],
+ " Token: string (%s)\n",
+ new->token.value);
+#endif
+ if (current == (struct parse_node *) NULL) {
+ root = current = new;
+ break;
+ }
+ switch (current->token.type) {
+ case token_string:
+ current->token.value = apr_pstrcat(r->pool,
+ current->token.value,
+ current->token.value[0] ? " " : "",
+ new->token.value,
+ NULL);
+
+ break;
+ case token_eq:
+ case token_ne:
+ case token_and:
+ case token_or:
+ case token_lbrace:
+ case token_not:
+ case token_ge:
+ case token_gt:
+ case token_le:
+ case token_lt:
+ new->parent = current;
+ current = current->right = new;
+ break;
+ default:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid expression \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+ }
+ break;
+
+ case token_re:
+#ifdef DEBUG_INCLUDE
+ debug_pos += sprintf (&debug[debug_pos],
+ " Token: regex (%s)\n",
+ new->token.value);
+#endif
+ if (current == (struct parse_node *) NULL) {
+ root = current = new;
+ break;
+ }
+ switch (current->token.type) {
+ case token_eq:
+ case token_ne:
+ case token_and:
+ case token_or:
+ case token_lbrace:
+ case token_not:
+ new->parent = current;
+ current = current->right = new;
+ break;
+ default:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid expression \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+ }
+ break;
+
+ case token_and:
+ case token_or:
+#ifdef DEBUG_INCLUDE
+ memcpy (&debug[debug_pos], " Token: and/or\n",
+ sizeof (" Token: and/or\n"));
+ debug_pos += sizeof (" Token: and/or\n");
+#endif
+ if (current == (struct parse_node *) NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid expression \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+ }
+ /* Percolate upwards */
+ while (current != (struct parse_node *) NULL) {
+ switch (current->token.type) {
+ case token_string:
+ case token_re:
+ case token_group:
+ case token_not:
+ case token_eq:
+ case token_ne:
+ case token_and:
+ case token_or:
+ case token_ge:
+ case token_gt:
+ case token_le:
+ case token_lt:
+ current = current->parent;
+ continue;
+ case token_lbrace:
+ break;
+ default:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid expression \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+ }
+ break;
+ }
+ if (current == (struct parse_node *) NULL) {
+ new->left = root;
+ new->left->parent = new;
+ new->parent = (struct parse_node *) NULL;
+ root = new;
+ }
+ else {
+ new->left = current->right;
+ new->left->parent = new;
+ current->right = new;
+ new->parent = current;
+ }
+ current = new;
+ break;
+
+ case token_not:
+#ifdef DEBUG_INCLUDE
+ memcpy(&debug[debug_pos], " Token: not\n",
+ sizeof(" Token: not\n"));
+ debug_pos += sizeof(" Token: not\n");
+#endif
+ if (current == (struct parse_node *) NULL) {
+ root = current = new;
+ break;
+ }
+ /* Percolate upwards */
+ if (current != (struct parse_node *) NULL) {
+ switch (current->token.type) {
+ case token_not:
+ case token_eq:
+ case token_ne:
+ case token_and:
+ case token_or:
+ case token_lbrace:
+ case token_ge:
+ case token_gt:
+ case token_le:
+ case token_lt:
+ break;
+ default:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid expression \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+ }
+ }
+ if (current == (struct parse_node *) NULL) {
+ new->left = root;
+ new->left->parent = new;
+ new->parent = (struct parse_node *) NULL;
+ root = new;
+ }
+ else {
+ new->left = current->right;
+ current->right = new;
+ new->parent = current;
+ }
+ current = new;
+ break;
+
+ case token_eq:
+ case token_ne:
+ case token_ge:
+ case token_gt:
+ case token_le:
+ case token_lt:
+#ifdef DEBUG_INCLUDE
+ memcpy(&debug[debug_pos], " Token: eq/ne/ge/gt/le/lt\n",
+ sizeof(" Token: eq/ne/ge/gt/le/lt\n"));
+ debug_pos += sizeof(" Token: eq/ne/ge/gt/le/lt\n");
+#endif
+ if (current == (struct parse_node *) NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid expression \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+ }
+ /* Percolate upwards */
+ while (current != (struct parse_node *) NULL) {
+ switch (current->token.type) {
+ case token_string:
+ case token_re:
+ case token_group:
+ current = current->parent;
+ continue;
+ case token_lbrace:
+ case token_and:
+ case token_or:
+ break;
+ case token_not:
+ case token_eq:
+ case token_ne:
+ case token_ge:
+ case token_gt:
+ case token_le:
+ case token_lt:
+ default:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid expression \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+ }
+ break;
+ }
+ if (current == (struct parse_node *) NULL) {
+ new->left = root;
+ new->left->parent = new;
+ new->parent = (struct parse_node *) NULL;
+ root = new;
+ }
+ else {
+ new->left = current->right;
+ new->left->parent = new;
+ current->right = new;
+ new->parent = current;
+ }
+ current = new;
+ break;
+
+ case token_rbrace:
+#ifdef DEBUG_INCLUDE
+ memcpy (&debug[debug_pos], " Token: rbrace\n",
+ sizeof (" Token: rbrace\n"));
+ debug_pos += sizeof (" Token: rbrace\n");
+#endif
+ while (current != (struct parse_node *) NULL) {
+ if (current->token.type == token_lbrace) {
+ current->token.type = token_group;
+ break;
+ }
+ current = current->parent;
+ }
+ if (current == (struct parse_node *) NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Unmatched ')' in \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+ }
+ break;
+
+ case token_lbrace:
+#ifdef DEBUG_INCLUDE
+ memcpy (&debug[debug_pos], " Token: lbrace\n",
+ sizeof (" Token: lbrace\n"));
+ debug_pos += sizeof (" Token: lbrace\n");
+#endif
+ if (current == (struct parse_node *) NULL) {
+ root = current = new;
+ break;
+ }
+ /* Percolate upwards */
+ if (current != (struct parse_node *) NULL) {
+ switch (current->token.type) {
+ case token_not:
+ case token_eq:
+ case token_ne:
+ case token_and:
+ case token_or:
+ case token_lbrace:
+ case token_ge:
+ case token_gt:
+ case token_le:
+ case token_lt:
+ break;
+ case token_string:
+ case token_re:
+ case token_group:
+ default:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid expression \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+ }
+ }
+ if (current == (struct parse_node *) NULL) {
+ new->left = root;
+ new->left->parent = new;
+ new->parent = (struct parse_node *) NULL;
+ root = new;
+ }
+ else {
+ new->left = current->right;
+ current->right = new;
+ new->parent = current;
+ }
+ current = new;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Evaluate Parse Tree */
+ current = root;
+ while (current != (struct parse_node *) NULL) {
+ switch (current->token.type) {
+ case token_string:
+#ifdef DEBUG_INCLUDE
+ memcpy (&debug[debug_pos], " Evaluate string\n",
+ sizeof (" Evaluate string\n"));
+ debug_pos += sizeof (" Evaluate string\n");
+#endif
+ buffer = ap_ssi_parse_string(r, ctx, current->token.value, NULL,
+ MAX_STRING_LEN, 0);
+ current->token.value = buffer;
+ current->value = (current->token.value[0] != '\0');
+ current->done = 1;
+ current = current->parent;
+ break;
+
+ case token_re:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "No operator before regex of expr \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+
+ case token_and:
+ case token_or:
+#ifdef DEBUG_INCLUDE
+ memcpy(&debug[debug_pos], " Evaluate and/or\n",
+ sizeof(" Evaluate and/or\n"));
+ debug_pos += sizeof(" Evaluate and/or\n");
+#endif
+ if (current->left == (struct parse_node *) NULL ||
+ current->right == (struct parse_node *) NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid expression \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+ }
+ if (!current->left->done) {
+ switch (current->left->token.type) {
+ case token_string:
+ buffer = ap_ssi_parse_string(r, ctx, current->left->token.value,
+ NULL, MAX_STRING_LEN, 0);
+ current->left->token.value = buffer;
+ current->left->value =
+ (current->left->token.value[0] != '\0');
+ current->left->done = 1;
+ break;
+ default:
+ current = current->left;
+ continue;
+ }
+ }
+ if (!current->right->done) {
+ switch (current->right->token.type) {
+ case token_string:
+ buffer = ap_ssi_parse_string(r, ctx, current->right->token.value,
+ NULL, MAX_STRING_LEN, 0);
+ current->right->token.value = buffer;
+ current->right->value =
+ (current->right->token.value[0] != '\0');
+ current->right->done = 1;
+ break;
+ default:
+ current = current->right;
+ continue;
+ }
+ }
+#ifdef DEBUG_INCLUDE
+ debug_pos += sprintf (&debug[debug_pos], " Left: %c\n",
+ current->left->value ? '1' : '0');
+ debug_pos += sprintf (&debug[debug_pos], " Right: %c\n",
+ current->right->value ? '1' : '0');
+#endif
+ if (current->token.type == token_and) {
+ current->value = current->left->value && current->right->value;
+ }
+ else {
+ current->value = current->left->value || current->right->value;
+ }
+#ifdef DEBUG_INCLUDE
+ debug_pos += sprintf (&debug[debug_pos], " Returning %c\n",
+ current->value ? '1' : '0');
+#endif
+ current->done = 1;
+ current = current->parent;
+ break;
+
+ case token_eq:
+ case token_ne:
+#ifdef DEBUG_INCLUDE
+ memcpy (&debug[debug_pos], " Evaluate eq/ne\n",
+ sizeof (" Evaluate eq/ne\n"));
+ debug_pos += sizeof (" Evaluate eq/ne\n");
+#endif
+ if ((current->left == (struct parse_node *) NULL) ||
+ (current->right == (struct parse_node *) NULL) ||
+ (current->left->token.type != token_string) ||
+ ((current->right->token.type != token_string) &&
+ (current->right->token.type != token_re))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid expression \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+ }
+ buffer = ap_ssi_parse_string(r, ctx, current->left->token.value,
+ NULL, MAX_STRING_LEN, 0);
+ current->left->token.value = buffer;
+ buffer = ap_ssi_parse_string(r, ctx, current->right->token.value,
+ NULL, MAX_STRING_LEN, 0);
+ current->right->token.value = buffer;
+ if (current->right->token.type == token_re) {
+#ifdef DEBUG_INCLUDE
+ debug_pos += sprintf (&debug[debug_pos],
+ " Re Compare (%s) with /%s/\n",
+ current->left->token.value,
+ current->right->token.value);
+#endif
+ current->value =
+ re_check(r, ctx, current->left->token.value,
+ current->right->token.value);
+ }
+ else {
+#ifdef DEBUG_INCLUDE
+ debug_pos += sprintf (&debug[debug_pos],
+ " Compare (%s) with (%s)\n",
+ current->left->token.value,
+ current->right->token.value);
+#endif
+ current->value =
+ (strcmp(current->left->token.value,
+ current->right->token.value) == 0);
+ }
+ if (current->token.type == token_ne) {
+ current->value = !current->value;
+ }
+#ifdef DEBUG_INCLUDE
+ debug_pos += sprintf (&debug[debug_pos], " Returning %c\n",
+ current->value ? '1' : '0');
+#endif
+ current->done = 1;
+ current = current->parent;
+ break;
+ case token_ge:
+ case token_gt:
+ case token_le:
+ case token_lt:
+#ifdef DEBUG_INCLUDE
+ memcpy (&debug[debug_pos], " Evaluate ge/gt/le/lt\n",
+ sizeof (" Evaluate ge/gt/le/lt\n"));
+ debug_pos += sizeof (" Evaluate ge/gt/le/lt\n");
+#endif
+ if ((current->left == (struct parse_node *) NULL) ||
+ (current->right == (struct parse_node *) NULL) ||
+ (current->left->token.type != token_string) ||
+ (current->right->token.type != token_string)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid expression \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+ }
+ buffer = ap_ssi_parse_string(r, ctx, current->left->token.value,
+ NULL, MAX_STRING_LEN, 0);
+ current->left->token.value = buffer;
+ buffer = ap_ssi_parse_string(r, ctx, current->right->token.value,
+ NULL, MAX_STRING_LEN, 0);
+ current->right->token.value = buffer;
+#ifdef DEBUG_INCLUDE
+ debug_pos += sprintf (&debug[debug_pos],
+ " Compare (%s) with (%s)\n",
+ current->left->token.value,
+ current->right->token.value);
+#endif
+ current->value =
+ strcmp(current->left->token.value,
+ current->right->token.value);
+ if (current->token.type == token_ge) {
+ current->value = current->value >= 0;
+ }
+ else if (current->token.type == token_gt) {
+ current->value = current->value > 0;
+ }
+ else if (current->token.type == token_le) {
+ current->value = current->value <= 0;
+ }
+ else if (current->token.type == token_lt) {
+ current->value = current->value < 0;
+ }
+ else {
+ current->value = 0; /* Don't return -1 if unknown token */
+ }
+#ifdef DEBUG_INCLUDE
+ debug_pos += sprintf (&debug[debug_pos], " Returning %c\n",
+ current->value ? '1' : '0');
+#endif
+ current->done = 1;
+ current = current->parent;
+ break;
+
+ case token_not:
+ if (current->right != (struct parse_node *) NULL) {
+ if (!current->right->done) {
+ current = current->right;
+ continue;
+ }
+ current->value = !current->right->value;
+ }
+ else {
+ current->value = 0;
+ }
+#ifdef DEBUG_INCLUDE
+ debug_pos += sprintf (&debug[debug_pos], " Evaluate !: %c\n",
+ current->value ? '1' : '0');
+#endif
+ current->done = 1;
+ current = current->parent;
+ break;
+
+ case token_group:
+ if (current->right != (struct parse_node *) NULL) {
+ if (!current->right->done) {
+ current = current->right;
+ continue;
+ }
+ current->value = current->right->value;
+ }
+ else {
+ current->value = 1;
+ }
+#ifdef DEBUG_INCLUDE
+ debug_pos += sprintf (&debug[debug_pos], " Evaluate (): %c\n",
+ current->value ? '1' : '0');
+#endif
+ current->done = 1;
+ current = current->parent;
+ break;
+
+ case token_lbrace:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Unmatched '(' in \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+
+ case token_rbrace:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Unmatched ')' in \"%s\" in file %s",
+ expr, r->filename);
+ *was_error = 1;
+ return retval;
+
+ default:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "bad token type");
+ *was_error = 1;
+ return retval;
+ }
+ }
+
+ retval = (root == (struct parse_node *) NULL) ? 0 : root->value;
+ return (retval);
+}
+
+/*-------------------------------------------------------------------------*/
+#ifdef DEBUG_INCLUDE
+
+#define MAX_DEBUG_SIZE MAX_STRING_LEN
+#define LOG_COND_STATUS(cntx, t_buck, h_ptr, ins_head, tag_text) \
+{ \
+ char cond_txt[] = "**** X conditional_status=\"0\"\n"; \
+ \
+ if (cntx->flags & FLAG_COND_TRUE) { \
+ cond_txt[31] = '1'; \
+ } \
+ memcpy(&cond_txt[5], tag_text, sizeof(tag_text)-1); \
+ t_buck = apr_bucket_heap_create(cond_txt, sizeof(cond_txt)-1, \
+ NULL, h_ptr->list); \
+ APR_BUCKET_INSERT_BEFORE(h_ptr, t_buck); \
+ \
+ if (ins_head == NULL) { \
+ ins_head = t_buck; \
+ } \
+}
+#define DUMP_PARSE_EXPR_DEBUG(t_buck, h_ptr, d_buf, ins_head) \
+{ \
+ if (d_buf[0] != '\0') { \
+ t_buck = apr_bucket_heap_create(d_buf, strlen(d_buf), \
+ NULL, h_ptr->list); \
+ APR_BUCKET_INSERT_BEFORE(h_ptr, t_buck); \
+ \
+ if (ins_head == NULL) { \
+ ins_head = t_buck; \
+ } \
+ } \
+}
+#else
+
+#define MAX_DEBUG_SIZE 10
+#define LOG_COND_STATUS(cntx, t_buck, h_ptr, ins_head, tag_text)
+#define DUMP_PARSE_EXPR_DEBUG(t_buck, h_ptr, d_buf, ins_head)
+
+#endif
+/*-------------------------------------------------------------------------*/
+
+/* pjr - These seem to allow expr="fred" expr="joe" where joe overwrites fred. */
+static int handle_if(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f, apr_bucket *head_ptr,
+ apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ char *expr = NULL;
+ int expr_ret, was_error, was_unmatched;
+ apr_bucket *tmp_buck;
+ char debug_buf[MAX_DEBUG_SIZE];
+
+ *inserted_head = NULL;
+ if (!(ctx->flags & FLAG_PRINTING)) {
+ ctx->if_nesting_level++;
+ }
+ else {
+ while (1) {
+ ap_ssi_get_tag_and_value(ctx, &tag, &tag_val, 0);
+ if (tag == NULL) {
+ if (expr == NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "missing expr in if statement: %s",
+ r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr,
+ *inserted_head);
+ return 1;
+ }
+ expr_ret = parse_expr(r, ctx, expr, &was_error,
+ &was_unmatched, debug_buf);
+ if (was_error) {
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr,
+ *inserted_head);
+ return 1;
+ }
+ if (was_unmatched) {
+ DUMP_PARSE_EXPR_DEBUG(tmp_buck, head_ptr,
+ "\nUnmatched '\n", *inserted_head);
+ }
+ DUMP_PARSE_EXPR_DEBUG(tmp_buck, head_ptr, debug_buf,
+ *inserted_head);
+
+ if (expr_ret) {
+ ctx->flags |= (FLAG_PRINTING | FLAG_COND_TRUE);
+ }
+ else {
+ ctx->flags &= FLAG_CLEAR_PRINT_COND;
+ }
+ LOG_COND_STATUS(ctx, tmp_buck, head_ptr, *inserted_head,
+ " if");
+ ctx->if_nesting_level = 0;
+ return 0;
+ }
+ else if (!strcmp(tag, "expr")) {
+ expr = tag_val;
+#ifdef DEBUG_INCLUDE
+ if (1) {
+ apr_size_t d_len = 0;
+ d_len = sprintf(debug_buf, "**** if expr=\"%s\"\n", expr);
+ tmp_buck = apr_bucket_heap_create(debug_buf, d_len, NULL,
+ r->connection->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp_buck);
+
+ if (*inserted_head == NULL) {
+ *inserted_head = tmp_buck;
+ }
+ }
+#endif
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unknown parameter \"%s\" to tag if in %s", tag,
+ r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ return 1;
+ }
+
+ }
+ }
+ return 0;
+}
+
+static int handle_elif(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f, apr_bucket *head_ptr,
+ apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ char *expr = NULL;
+ int expr_ret, was_error, was_unmatched;
+ apr_bucket *tmp_buck;
+ char debug_buf[MAX_DEBUG_SIZE];
+
+ *inserted_head = NULL;
+ if (!ctx->if_nesting_level) {
+ while (1) {
+ ap_ssi_get_tag_and_value(ctx, &tag, &tag_val, 0);
+ if (tag == '\0') {
+ LOG_COND_STATUS(ctx, tmp_buck, head_ptr, *inserted_head,
+ " elif");
+
+ if (ctx->flags & FLAG_COND_TRUE) {
+ ctx->flags &= FLAG_CLEAR_PRINTING;
+ return (0);
+ }
+ if (expr == NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "missing expr in elif statement: %s",
+ r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr,
+ *inserted_head);
+ return 1;
+ }
+ expr_ret = parse_expr(r, ctx, expr, &was_error,
+ &was_unmatched, debug_buf);
+ if (was_error) {
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr,
+ *inserted_head);
+ return 1;
+ }
+ if (was_unmatched) {
+ DUMP_PARSE_EXPR_DEBUG(tmp_buck, head_ptr,
+ "\nUnmatched '\n", *inserted_head);
+ }
+ DUMP_PARSE_EXPR_DEBUG(tmp_buck, head_ptr, debug_buf,
+ *inserted_head);
+
+ if (expr_ret) {
+ ctx->flags |= (FLAG_PRINTING | FLAG_COND_TRUE);
+ }
+ else {
+ ctx->flags &= FLAG_CLEAR_PRINT_COND;
+ }
+ LOG_COND_STATUS(ctx, tmp_buck, head_ptr, *inserted_head,
+ " elif");
+ return (0);
+ }
+ else if (!strcmp(tag, "expr")) {
+ expr = tag_val;
+#ifdef DEBUG_INCLUDE
+ if (1) {
+ apr_size_t d_len = 0;
+ d_len = sprintf(debug_buf, "**** elif expr=\"%s\"\n", expr);
+ tmp_buck = apr_bucket_heap_create(debug_buf, d_len, NULL,
+ r->connection->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp_buck);
+
+ if (*inserted_head == NULL) {
+ *inserted_head = tmp_buck;
+ }
+ }
+#endif
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unknown parameter \"%s\" to tag if in %s", tag,
+ r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int handle_else(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f, apr_bucket *head_ptr,
+ apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ apr_bucket *tmp_buck;
+
+ *inserted_head = NULL;
+ if (!ctx->if_nesting_level) {
+ ap_ssi_get_tag_and_value(ctx, &tag, &tag_val, 1);
+ if ((tag != NULL) || (tag_val != NULL)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "else directive does not take tags in %s", r->filename);
+ if (ctx->flags & FLAG_PRINTING) {
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ }
+ return -1;
+ }
+ else {
+ LOG_COND_STATUS(ctx, tmp_buck, head_ptr, *inserted_head, " else");
+
+ if (ctx->flags & FLAG_COND_TRUE) {
+ ctx->flags &= FLAG_CLEAR_PRINTING;
+ }
+ else {
+ ctx->flags |= (FLAG_PRINTING | FLAG_COND_TRUE);
+ }
+ return 0;
+ }
+ }
+ return 0;
+}
+
+static int handle_endif(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f, apr_bucket *head_ptr,
+ apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ apr_bucket *tmp_buck;
+
+ *inserted_head = NULL;
+ if (!ctx->if_nesting_level) {
+ ap_ssi_get_tag_and_value(ctx, &tag, &tag_val, 1);
+ if ((tag != NULL) || (tag_val != NULL)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "endif directive does not take tags in %s", r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ return -1;
+ }
+ else {
+ LOG_COND_STATUS(ctx, tmp_buck, head_ptr, *inserted_head, "endif");
+ ctx->flags |= (FLAG_PRINTING | FLAG_COND_TRUE);
+ return 0;
+ }
+ }
+ else {
+ ctx->if_nesting_level--;
+ return 0;
+ }
+}
+
+static int handle_set(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f, apr_bucket *head_ptr,
+ apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ char *var = NULL;
+ apr_bucket *tmp_buck;
+ char *parsed_string;
+ request_rec *sub = r->main;
+ apr_pool_t *p = r->pool;
+
+ /* we need to use the 'main' request pool to set notes as that is
+ * a notes lifetime
+ */
+ while (sub) {
+ p = sub->pool;
+ sub = sub->main;
+ }
+
+ *inserted_head = NULL;
+ if (ctx->flags & FLAG_PRINTING) {
+ while (1) {
+ ap_ssi_get_tag_and_value(ctx, &tag, &tag_val, 1);
+ if ((tag == NULL) && (tag_val == NULL)) {
+ return 0;
+ }
+ else if (tag_val == NULL) {
+ return 1;
+ }
+ else if (!strcmp(tag, "var")) {
+ var = ap_ssi_parse_string(r, ctx, tag_val, NULL,
+ MAX_STRING_LEN, 0);
+ }
+ else if (!strcmp(tag, "value")) {
+ if (var == (char *) NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "variable must precede value in set directive in %s",
+ r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr,
+ *inserted_head);
+ return (-1);
+ }
+ parsed_string = ap_ssi_parse_string(r, ctx, tag_val, NULL,
+ MAX_STRING_LEN, 0);
+ apr_table_setn(r->subprocess_env, apr_pstrdup(p, var),
+ apr_pstrdup(p, parsed_string));
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid tag for set directive in %s", r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int handle_printenv(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f,
+ apr_bucket *head_ptr, apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ apr_bucket *tmp_buck;
+
+ if (ctx->flags & FLAG_PRINTING) {
+ ap_ssi_get_tag_and_value(ctx, &tag, &tag_val, 1);
+ if ((tag == NULL) && (tag_val == NULL)) {
+ const apr_array_header_t *arr = apr_table_elts(r->subprocess_env);
+ const apr_table_entry_t *elts = (const apr_table_entry_t *)arr->elts;
+ int i;
+ const char *key_text, *val_text;
+ char *key_val, *next;
+ apr_size_t k_len, v_len, kv_length;
+
+ *inserted_head = NULL;
+ for (i = 0; i < arr->nelts; ++i) {
+ key_text = ap_escape_html(r->pool, elts[i].key);
+ val_text = elts[i].val;
+ if (val_text == LAZY_VALUE) {
+ val_text = add_include_vars_lazy(r, elts[i].key);
+ }
+ val_text = ap_escape_html(r->pool, elts[i].val);
+ k_len = strlen(key_text);
+ v_len = strlen(val_text);
+ kv_length = k_len + v_len + sizeof("=\n");
+ key_val = apr_palloc(r->pool, kv_length);
+ next = key_val;
+ memcpy(next, key_text, k_len);
+ next += k_len;
+ *next++ = '=';
+ memcpy(next, val_text, v_len);
+ next += v_len;
+ *next++ = '\n';
+ *next = 0;
+ tmp_buck = apr_bucket_pool_create(key_val, kv_length - 1,
+ r->pool,
+ r->connection->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp_buck);
+ if (*inserted_head == NULL) {
+ *inserted_head = tmp_buck;
+ }
+ }
+ return 0;
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "printenv directive does not take tags in %s",
+ r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/* -------------------------- The main function --------------------------- */
+
+/*
+ * returns the index position of the first byte of start_seq (or the len of
+ * the buffer as non-match)
+ */
+static apr_size_t find_start_sequence(ssi_ctx_t *ctx, const char *data,
+ apr_size_t len)
+{
+ apr_size_t slen = ctx->ctx->start_seq_len;
+ apr_size_t index;
+ const char *p, *ep;
+
+ if (len < slen) {
+ p = data; /* try partial match at the end of the buffer (below) */
+ }
+ else {
+ /* try fast bndm search over the buffer
+ * (hopefully the whole start sequence can be found in this buffer)
+ */
+ index = bndm(ctx->ctx->start_seq, ctx->ctx->start_seq_len, data, len,
+ ctx->ctx->start_seq_pat);
+
+ /* wow, found it. ready. */
+ if (index < len) {
+ ctx->state = PARSE_DIRECTIVE;
+ return index;
+ }
+ else {
+ /* ok, the pattern can't be found as whole in the buffer,
+ * check the end for a partial match
+ */
+ p = data + len - slen + 1;
+ }
+ }
+
+ ep = data + len;
+ do {
+ while (p < ep && *p != *ctx->ctx->start_seq) {
+ ++p;
+ }
+
+ index = p - data;
+
+ /* found a possible start_seq start */
+ if (p < ep) {
+ apr_size_t pos = 1;
+
+ ++p;
+ while (p < ep && *p == ctx->ctx->start_seq[pos]) {
+ ++p;
+ ++pos;
+ }
+
+ /* partial match found. Store the info for the next round */
+ if (p == ep) {
+ ctx->state = PARSE_HEAD;
+ ctx->ctx->parse_pos = pos;
+ return index;
+ }
+ }
+
+ /* we must try all combinations; consider (e.g.) SSIStartTag "--->"
+ * and a string data of "--.-" and the end of the buffer
+ */
+ p = data + index + 1;
+ } while (p < ep);
+
+ /* no match */
+ return len;
+}
+
+/*
+ * returns the first byte *after* the partial (or final) match.
+ *
+ * If we had to trick with the start_seq start, 'release' returns the
+ * number of chars of the start_seq which appeared not to be part of a
+ * full tag and may have to be passed down the filter chain.
+ */
+static apr_size_t find_partial_start_sequence(ssi_ctx_t *ctx,
+ const char *data,
+ apr_size_t len,
+ apr_size_t *release)
+{
+ apr_size_t pos, spos = 0;
+ apr_size_t slen = ctx->ctx->start_seq_len;
+ const char *p, *ep;
+
+ pos = ctx->ctx->parse_pos;
+ ep = data + len;
+ *release = 0;
+
+ do {
+ p = data;
+
+ while (p < ep && pos < slen && *p == ctx->ctx->start_seq[pos]) {
+ ++p;
+ ++pos;
+ }
+
+ /* full match */
+ if (pos == slen) {
+ ctx->state = PARSE_DIRECTIVE;
+ return (p - data);
+ }
+
+ /* the whole buffer is a partial match */
+ if (p == ep) {
+ ctx->ctx->parse_pos = pos;
+ return (p - data);
+ }
+
+ /* No match so far, but again:
+ * We must try all combinations, since the start_seq is a random
+ * user supplied string
+ *
+ * So: look if the first char of start_seq appears somewhere within
+ * the current partial match. If it does, try to start a match that
+ * begins with this offset. (This can happen, if a strange
+ * start_seq like "---->" spans buffers)
+ */
+ if (spos < ctx->ctx->parse_pos) {
+ do {
+ ++spos;
+ ++*release;
+ p = ctx->ctx->start_seq + spos;
+ pos = ctx->ctx->parse_pos - spos;
+
+ while (pos && *p != *ctx->ctx->start_seq) {
+ ++p;
+ ++spos;
+ ++*release;
+ --pos;
+ }
+
+ /* if a matching beginning char was found, try to match the
+ * remainder of the old buffer.
+ */
+ if (pos > 1) {
+ apr_size_t t = 1;
+
+ ++p;
+ while (t < pos && *p == ctx->ctx->start_seq[t]) {
+ ++p;
+ ++t;
+ }
+
+ if (t == pos) {
+ /* yeah, another partial match found in the *old*
+ * buffer, now test the *current* buffer for
+ * continuing match
+ */
+ break;
+ }
+ }
+ } while (pos > 1);
+
+ if (pos) {
+ continue;
+ }
+ }
+
+ break;
+ } while (1); /* work hard to find a match ;-) */
+
+ /* no match at all, release all (wrongly) matched chars so far */
+ *release = ctx->ctx->parse_pos;
+ ctx->state = PARSE_PRE_HEAD;
+ return 0;
+}
+
+/*
+ * returns the position after the directive
+ */
+static apr_size_t find_directive(ssi_ctx_t *ctx, const char *data,
+ apr_size_t len, char ***store,
+ apr_size_t **store_len)
+{
+ const char *p = data;
+ const char *ep = data + len;
+ apr_size_t pos;
+
+ switch (ctx->state) {
+ case PARSE_DIRECTIVE:
+ while (p < ep && !apr_isspace(*p)) {
+ /* we have to consider the case of missing space between directive
+ * and end_seq (be somewhat lenient), e.g. <!--#printenv-->
+ */
+ if (*p == *ctx->ctx->end_seq) {
+ ctx->state = PARSE_DIRECTIVE_TAIL;
+ ctx->ctx->parse_pos = 1;
+ ++p;
+ return (p - data);
+ }
+ ++p;
+ }
+
+ if (p < ep) { /* found delimiter whitespace */
+ ctx->state = PARSE_DIRECTIVE_POSTNAME;
+ *store = &ctx->directive;
+ *store_len = &ctx->ctx->directive_length;
+ }
+
+ break;
+
+ case PARSE_DIRECTIVE_TAIL:
+ pos = ctx->ctx->parse_pos;
+
+ while (p < ep && pos < ctx->end_seq_len &&
+ *p == ctx->ctx->end_seq[pos]) {
+ ++p;
+ ++pos;
+ }
+
+ /* full match, we're done */
+ if (pos == ctx->end_seq_len) {
+ ctx->state = PARSE_DIRECTIVE_POSTTAIL;
+ *store = &ctx->directive;
+ *store_len = &ctx->ctx->directive_length;
+ break;
+ }
+
+ /* partial match, the buffer is too small to match fully */
+ if (p == ep) {
+ ctx->ctx->parse_pos = pos;
+ break;
+ }
+
+ /* no match. continue normal parsing */
+ ctx->state = PARSE_DIRECTIVE;
+ return 0;
+
+ case PARSE_DIRECTIVE_POSTTAIL:
+ ctx->state = PARSE_EXECUTE;
+ ctx->ctx->directive_length -= ctx->end_seq_len;
+ /* continue immediately with the next state */
+
+ case PARSE_DIRECTIVE_POSTNAME:
+ if (PARSE_DIRECTIVE_POSTNAME == ctx->state) {
+ ctx->state = PARSE_PRE_ARG;
+ }
+ ctx->argc = 0;
+ ctx->argv = NULL;
+
+ if (!ctx->ctx->directive_length) {
+ ctx->error = 1;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, "missing directive "
+ "name in parsed document %s", ctx->r->filename);
+ }
+ else {
+ char *sp = ctx->directive;
+ char *sep = ctx->directive + ctx->ctx->directive_length;
+
+ /* normalize directive name */
+ for (; sp < sep; ++sp) {
+ *sp = apr_tolower(*sp);
+ }
+ }
+
+ return 0;
+
+ default:
+ /* get a rid of a gcc warning about unhandled enumerations */
+ break;
+ }
+
+ return (p - data);
+}
+
+/*
+ * find out whether the next token is (a possible) end_seq or an argument
+ */
+static apr_size_t find_arg_or_tail(ssi_ctx_t *ctx, const char *data,
+ apr_size_t len)
+{
+ const char *p = data;
+ const char *ep = data + len;
+
+ /* skip leading WS */
+ while (p < ep && apr_isspace(*p)) {
+ ++p;
+ }
+
+ /* buffer doesn't consist of whitespaces only */
+ if (p < ep) {
+ ctx->state = (*p == *ctx->ctx->end_seq) ? PARSE_TAIL : PARSE_ARG;
+ }
+
+ return (p - data);
+}
+
+/*
+ * test the stream for end_seq. If it doesn't match at all, it must be an
+ * argument
+ */
+static apr_size_t find_tail(ssi_ctx_t *ctx, const char *data,
+ apr_size_t len)
+{
+ const char *p = data;
+ const char *ep = data + len;
+ apr_size_t pos = ctx->ctx->parse_pos;
+
+ if (PARSE_TAIL == ctx->state) {
+ ctx->state = PARSE_TAIL_SEQ;
+ pos = ctx->ctx->parse_pos = 0;
+ }
+
+ while (p < ep && pos < ctx->end_seq_len && *p == ctx->ctx->end_seq[pos]) {
+ ++p;
+ ++pos;
+ }
+
+ /* bingo, full match */
+ if (pos == ctx->end_seq_len) {
+ ctx->state = PARSE_EXECUTE;
+ return (p - data);
+ }
+
+ /* partial match, the buffer is too small to match fully */
+ if (p == ep) {
+ ctx->ctx->parse_pos = pos;
+ return (p - data);
+ }
+
+ /* no match. It must be an argument string then */
+ ctx->state = PARSE_ARG;
+ return 0;
+}
+
+/*
+ * extract name=value from the buffer
+ * A pcre-pattern could look (similar to):
+ * name\s*(?:=\s*(["'`]?)value\1(?>\s*))?
+ */
+static apr_size_t find_argument(ssi_ctx_t *ctx, const char *data,
+ apr_size_t len, char ***store,
+ apr_size_t **store_len)
+{
+ const char *p = data;
+ const char *ep = data + len;
+
+ switch (ctx->state) {
+ case PARSE_ARG:
+ /*
+ * create argument structure and append it to the current list
+ */
+ ctx->current_arg = apr_palloc(ctx->dpool,
+ sizeof(*ctx->current_arg));
+ ctx->current_arg->next = NULL;
+
+ ++(ctx->argc);
+ if (!ctx->argv) {
+ ctx->argv = ctx->current_arg;
+ }
+ else {
+ ssi_arg_item_t *newarg = ctx->argv;
+
+ while (newarg->next) {
+ newarg = newarg->next;
+ }
+ newarg->next = ctx->current_arg;
+ }
+
+ /* check whether it's a valid one. If it begins with a quote, we
+ * can safely assume, someone forgot the name of the argument
+ */
+ switch (*p) {
+ case '"': case '\'': case '`':
+ *store = NULL;
+
+ ctx->state = PARSE_ARG_VAL;
+ ctx->quote = *p++;
+ ctx->current_arg->name = NULL;
+ ctx->current_arg->name_len = 0;
+ ctx->error = 1;
+
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, "missing argument "
+ "name for value to tag %s in %s",
+ apr_pstrmemdup(ctx->r->pool, ctx->directive,
+ ctx->ctx->directive_length),
+ ctx->r->filename);
+
+ return (p - data);
+
+ default:
+ ctx->state = PARSE_ARG_NAME;
+ }
+ /* continue immediately with next state */
+
+ case PARSE_ARG_NAME:
+ while (p < ep && !apr_isspace(*p) && *p != '=') {
+ ++p;
+ }
+
+ if (p < ep) {
+ ctx->state = PARSE_ARG_POSTNAME;
+ *store = &ctx->current_arg->name;
+ *store_len = &ctx->current_arg->name_len;
+ return (p - data);
+ }
+ break;
+
+ case PARSE_ARG_POSTNAME:
+ ctx->current_arg->name = apr_pstrmemdup(ctx->dpool,
+ ctx->current_arg->name,
+ ctx->current_arg->name_len);
+ if (!ctx->current_arg->name_len) {
+ ctx->error = 1;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, "missing argument "
+ "name for value to tag %s in %s",
+ apr_pstrmemdup(ctx->r->pool, ctx->directive,
+ ctx->ctx->directive_length),
+ ctx->r->filename);
+ }
+ else {
+ char *sp = ctx->current_arg->name;
+
+ /* normalize the name */
+ while (*sp) {
+ *sp = apr_tolower(*sp);
+ ++sp;
+ }
+ }
+
+ ctx->state = PARSE_ARG_EQ;
+ /* continue with next state immediately */
+
+ case PARSE_ARG_EQ:
+ *store = NULL;
+
+ while (p < ep && apr_isspace(*p)) {
+ ++p;
+ }
+
+ if (p < ep) {
+ if (*p == '=') {
+ ctx->state = PARSE_ARG_PREVAL;
+ ++p;
+ }
+ else { /* no value */
+ ctx->current_arg->value = NULL;
+ ctx->state = PARSE_PRE_ARG;
+ }
+
+ return (p - data);
+ }
+ break;
+
+ case PARSE_ARG_PREVAL:
+ *store = NULL;
+
+ while (p < ep && apr_isspace(*p)) {
+ ++p;
+ }
+
+ /* buffer doesn't consist of whitespaces only */
+ if (p < ep) {
+ ctx->state = PARSE_ARG_VAL;
+ switch (*p) {
+ case '"': case '\'': case '`':
+ ctx->quote = *p++;
+ break;
+ default:
+ ctx->quote = '\0';
+ break;
+ }
+
+ return (p - data);
+ }
+ break;
+
+ case PARSE_ARG_VAL_ESC:
+ if (*p == ctx->quote) {
+ ++p;
+ }
+ ctx->state = PARSE_ARG_VAL;
+ /* continue with next state immediately */
+
+ case PARSE_ARG_VAL:
+ for (; p < ep; ++p) {
+ if (ctx->quote && *p == '\\') {
+ ++p;
+ if (p == ep) {
+ ctx->state = PARSE_ARG_VAL_ESC;
+ break;
+ }
+
+ if (*p != ctx->quote) {
+ --p;
+ }
+ }
+ else if (ctx->quote && *p == ctx->quote) {
+ ++p;
+ *store = &ctx->current_arg->value;
+ *store_len = &ctx->current_arg->value_len;
+ ctx->state = PARSE_ARG_POSTVAL;
+ break;
+ }
+ else if (!ctx->quote && apr_isspace(*p)) {
+ ++p;
+ *store = &ctx->current_arg->value;
+ *store_len = &ctx->current_arg->value_len;
+ ctx->state = PARSE_ARG_POSTVAL;
+ break;
+ }
+ }
+
+ return (p - data);
+
+ case PARSE_ARG_POSTVAL:
+ /*
+ * The value is still the raw input string. Finally clean it up.
+ */
+ --(ctx->current_arg->value_len);
+
+ /* strip quote escaping \ from the string */
+ if (ctx->quote) {
+ apr_size_t shift = 0;
+ char *sp;
+
+ sp = ctx->current_arg->value;
+ ep = ctx->current_arg->value + ctx->current_arg->value_len;
+ while (sp < ep && *sp != '\\') {
+ ++sp;
+ }
+ for (; sp < ep; ++sp) {
+ if (*sp == '\\' && sp[1] == ctx->quote) {
+ ++sp;
+ ++shift;
+ }
+ if (shift) {
+ *(sp-shift) = *sp;
+ }
+ }
+
+ ctx->current_arg->value_len -= shift;
+ }
+
+ ctx->current_arg->value[ctx->current_arg->value_len] = '\0';
+ ctx->state = PARSE_PRE_ARG;
+
+ return 0;
+
+ default:
+ /* get a rid of a gcc warning about unhandled enumerations */
+ break;
+ }
+
+ return len; /* partial match of something */
+}
+
+/*
+ * This is the main loop over the current bucket brigade.
+ */
+static apr_status_t send_parsed_content(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ ssi_ctx_t *ctx = f->ctx;
+ request_rec *r = f->r;
+ apr_bucket *b = APR_BRIGADE_FIRST(bb);
+ apr_bucket_brigade *pass_bb;
+ apr_status_t rv = APR_SUCCESS;
+ char *magic; /* magic pointer for sentinel use */
+
+ /* fast exit */
+ if (APR_BRIGADE_EMPTY(bb)) {
+ return APR_SUCCESS;
+ }
+
+ /* we may crash, since already cleaned up; hand over the responsibility
+ * to the next filter;-)
+ */
+ if (ctx->seen_eos) {
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ /* All stuff passed along has to be put into that brigade */
+ pass_bb = apr_brigade_create(ctx->ctx->pool, f->c->bucket_alloc);
+ ctx->ctx->bytes_parsed = 0;
+ ctx->ctx->output_now = 0;
+ ctx->error = 0;
+
+ /* loop over the current bucket brigade */
+ while (b != APR_BRIGADE_SENTINEL(bb)) {
+ const char *data = NULL;
+ apr_size_t len, index, release;
+ apr_bucket *newb = NULL;
+ char **store = &magic;
+ apr_size_t *store_len;
+
+ /* handle meta buckets before reading any data */
+ if (APR_BUCKET_IS_METADATA(b)) {
+ newb = APR_BUCKET_NEXT(b);
+
+ APR_BUCKET_REMOVE(b);
+
+ if (APR_BUCKET_IS_EOS(b)) {
+ ctx->seen_eos = 1;
+
+ /* Hit end of stream, time for cleanup ... But wait!
+ * Perhaps we're not ready yet. We may have to loop one or
+ * two times again to finish our work. In that case, we
+ * just re-insert the EOS bucket to allow for an extra loop.
+ *
+ * PARSE_EXECUTE means, we've hit a directive just before the
+ * EOS, which is now waiting for execution.
+ *
+ * PARSE_DIRECTIVE_POSTTAIL means, we've hit a directive with
+ * no argument and no space between directive and end_seq
+ * just before the EOS. (consider <!--#printenv--> as last
+ * or only string within the stream). This state, however,
+ * just cleans up and turns itself to PARSE_EXECUTE, which
+ * will be passed through within the next (and actually
+ * last) round.
+ */
+ if (PARSE_EXECUTE == ctx->state ||
+ PARSE_DIRECTIVE_POSTTAIL == ctx->state) {
+ APR_BUCKET_INSERT_BEFORE(newb, b);
+ }
+ else {
+ break; /* END OF STREAM */
+ }
+ }
+ else {
+ APR_BRIGADE_INSERT_TAIL(pass_bb, b);
+
+ if (APR_BUCKET_IS_FLUSH(b)) {
+ ctx->ctx->output_now = 1;
+ }
+
+ b = newb;
+ continue;
+ }
+ }
+
+ /* enough is enough ... */
+ if (ctx->ctx->output_now ||
+ ctx->ctx->bytes_parsed > AP_MIN_BYTES_TO_WRITE) {
+
+ if (!APR_BRIGADE_EMPTY(pass_bb)) {
+ rv = ap_pass_brigade(f->next, pass_bb);
+ if (!APR_STATUS_IS_SUCCESS(rv)) {
+ apr_brigade_destroy(pass_bb);
+ return rv;
+ }
+ }
+
+ ctx->ctx->output_now = 0;
+ ctx->ctx->bytes_parsed = 0;
+ }
+
+ /* read the current bucket data */
+ len = 0;
+ if (!ctx->seen_eos) {
+ if (ctx->ctx->bytes_parsed > 0) {
+ rv = apr_bucket_read(b, &data, &len, APR_NONBLOCK_READ);
+ if (APR_STATUS_IS_EAGAIN(rv)) {
+ ctx->ctx->output_now = 1;
+ continue;
+ }
+ }
+
+ if (!len || !APR_STATUS_IS_SUCCESS(rv)) {
+ rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ }
+
+ if (!APR_STATUS_IS_SUCCESS(rv)) {
+ apr_brigade_destroy(pass_bb);
+ return rv;
+ }
+
+ ctx->ctx->bytes_parsed += len;
+ }
+
+ /* zero length bucket, fetch next one */
+ if (!len && !ctx->seen_eos) {
+ b = APR_BUCKET_NEXT(b);
+ continue;
+ }
+
+ /*
+ * it's actually a data containing bucket, start/continue parsing
+ */
+
+ switch (ctx->state) {
+ /* no current tag; search for start sequence */
+ case PARSE_PRE_HEAD:
+ index = find_start_sequence(ctx, data, len);
+
+ if (index < len) {
+ apr_bucket_split(b, index);
+ }
+
+ newb = APR_BUCKET_NEXT(b);
+ if (ctx->ctx->flags & FLAG_PRINTING) {
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(pass_bb, b);
+ }
+ else {
+ apr_bucket_delete(b);
+ }
+
+ if (index < len) {
+ /* now delete the start_seq stuff from the remaining bucket */
+ if (PARSE_DIRECTIVE == ctx->state) { /* full match */
+ apr_bucket_split(newb, ctx->ctx->start_seq_len);
+ ctx->ctx->output_now = 1; /* pass pre-tag stuff */
+ }
+
+ b = APR_BUCKET_NEXT(newb);
+ apr_bucket_delete(newb);
+ }
+ else {
+ b = newb;
+ }
+
+ break;
+
+ /* we're currently looking for the end of the start sequence */
+ case PARSE_HEAD:
+ index = find_partial_start_sequence(ctx, data, len, &release);
+
+ /* check if we mismatched earlier and have to release some chars */
+ if (release && (ctx->ctx->flags & FLAG_PRINTING)) {
+ char *to_release = apr_palloc(ctx->ctx->pool, release);
+
+ memcpy(to_release, ctx->ctx->start_seq, release);
+ newb = apr_bucket_pool_create(to_release, release,
+ ctx->ctx->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(pass_bb, newb);
+ }
+
+ if (index) { /* any match */
+ /* now delete the start_seq stuff from the remaining bucket */
+ if (PARSE_DIRECTIVE == ctx->state) { /* final match */
+ apr_bucket_split(b, index);
+ ctx->ctx->output_now = 1; /* pass pre-tag stuff */
+ }
+ newb = APR_BUCKET_NEXT(b);
+ apr_bucket_delete(b);
+ b = newb;
+ }
+
+ break;
+
+ /* we're currently grabbing the directive name */
+ case PARSE_DIRECTIVE:
+ case PARSE_DIRECTIVE_POSTNAME:
+ case PARSE_DIRECTIVE_TAIL:
+ case PARSE_DIRECTIVE_POSTTAIL:
+ index = find_directive(ctx, data, len, &store, &store_len);
+
+ if (index) {
+ apr_bucket_split(b, index);
+ newb = APR_BUCKET_NEXT(b);
+ }
+
+ if (store) {
+ if (index) {
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(ctx->tmp_bb, b);
+ b = newb;
+ }
+
+ /* time for cleanup? */
+ if (store != &magic) {
+ apr_brigade_pflatten(ctx->tmp_bb, store, store_len,
+ ctx->dpool);
+ apr_brigade_cleanup(ctx->tmp_bb);
+ }
+ }
+ else if (index) {
+ apr_bucket_delete(b);
+ b = newb;
+ }
+
+ break;
+
+ /* skip WS and find out what comes next (arg or end_seq) */
+ case PARSE_PRE_ARG:
+ index = find_arg_or_tail(ctx, data, len);
+
+ if (index) { /* skipped whitespaces */
+ if (index < len) {
+ apr_bucket_split(b, index);
+ }
+ newb = APR_BUCKET_NEXT(b);
+ apr_bucket_delete(b);
+ b = newb;
+ }
+
+ break;
+
+ /* currently parsing name[=val] */
+ case PARSE_ARG:
+ case PARSE_ARG_NAME:
+ case PARSE_ARG_POSTNAME:
+ case PARSE_ARG_EQ:
+ case PARSE_ARG_PREVAL:
+ case PARSE_ARG_VAL:
+ case PARSE_ARG_VAL_ESC:
+ case PARSE_ARG_POSTVAL:
+ index = find_argument(ctx, data, len, &store, &store_len);
+
+ if (index) {
+ apr_bucket_split(b, index);
+ newb = APR_BUCKET_NEXT(b);
+ }
+
+ if (store) {
+ if (index) {
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(ctx->tmp_bb, b);
+ b = newb;
+ }
+
+ /* time for cleanup? */
+ if (store != &magic) {
+ apr_brigade_pflatten(ctx->tmp_bb, store, store_len,
+ ctx->dpool);
+ apr_brigade_cleanup(ctx->tmp_bb);
+ }
+ }
+ else if (index) {
+ apr_bucket_delete(b);
+ b = newb;
+ }
+
+ break;
+
+ /* try to match end_seq at current pos. */
+ case PARSE_TAIL:
+ case PARSE_TAIL_SEQ:
+ index = find_tail(ctx, data, len);
+
+ switch (ctx->state) {
+ case PARSE_EXECUTE: /* full match */
+ apr_bucket_split(b, index);
+ newb = APR_BUCKET_NEXT(b);
+ apr_bucket_delete(b);
+ b = newb;
+ break;
+
+ case PARSE_ARG: /* no match */
+ /* PARSE_ARG must reparse at the beginning */
+ APR_BRIGADE_PREPEND(bb, ctx->tmp_bb);
+ b = APR_BRIGADE_FIRST(bb);
+ break;
+
+ default: /* partial match */
+ newb = APR_BUCKET_NEXT(b);
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(ctx->tmp_bb, b);
+ b = newb;
+ break;
+ }
+
+ break;
+
+ /* now execute the parsed directive, cleanup the space and
+ * start again with PARSE_PRE_HEAD
+ */
+ case PARSE_EXECUTE:
+ /* if there was an error, it was already logged; just stop here */
+ if (ctx->error) {
+ if (ctx->ctx->flags & FLAG_PRINTING) {
+ SSI_CREATE_ERROR_BUCKET(ctx->ctx, f, pass_bb);
+ ctx->error = 0;
+ }
+ }
+ else {
+ include_handler_fn_t *handle_func;
+
+ handle_func =
+ (include_handler_fn_t *) apr_hash_get(include_hash,
+ ctx->directive,
+ ctx->ctx->directive_length);
+ if (handle_func) {
+ apr_bucket *dummy;
+ char *tag;
+ apr_size_t tag_len = 0;
+ ssi_arg_item_t *carg = ctx->argv;
+
+ /* legacy wrapper code */
+ while (carg) {
+ /* +1 \0 byte (either after tag or value)
+ * +1 = byte (before value)
+ */
+ tag_len += (carg->name ? carg->name_len : 0) +
+ (carg->value ? carg->value_len + 1 : 0) + 1;
+ carg = carg->next;
+ }
+
+ tag = ctx->ctx->combined_tag = ctx->ctx->curr_tag_pos =
+ apr_palloc(ctx->dpool, tag_len);
+
+ carg = ctx->argv;
+ while (carg) {
+ if (carg->name) {
+ memcpy(tag, carg->name, carg->name_len);
+ tag += carg->name_len;
+ }
+ if (carg->value) {
+ *tag++ = '=';
+ memcpy(tag, carg->value, carg->value_len);
+ tag += carg->value_len;
+ }
+ *tag++ = '\0';
+ carg = carg->next;
+ }
+ ctx->ctx->tag_length = tag_len;
+
+ /* create dummy buckets for backards compat */
+ ctx->ctx->head_start_bucket =
+ apr_bucket_pool_create(apr_pmemdup(ctx->ctx->pool,
+ ctx->ctx->start_seq,
+ ctx->ctx->start_seq_len),
+ ctx->ctx->start_seq_len,
+ ctx->ctx->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->ctx->ssi_tag_brigade,
+ ctx->ctx->head_start_bucket);
+ ctx->ctx->tag_start_bucket =
+ apr_bucket_pool_create(apr_pmemdup(ctx->ctx->pool,
+ ctx->ctx->combined_tag,
+ ctx->ctx->tag_length),
+ ctx->ctx->tag_length,
+ ctx->ctx->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->ctx->ssi_tag_brigade,
+ ctx->ctx->tag_start_bucket);
+ ctx->ctx->tail_start_bucket =
+ apr_bucket_pool_create(apr_pmemdup(ctx->ctx->pool,
+ ctx->ctx->end_seq,
+ ctx->end_seq_len),
+ ctx->end_seq_len,
+ ctx->ctx->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->ctx->ssi_tag_brigade,
+ ctx->ctx->tail_start_bucket);
+
+ rv = handle_func(ctx->ctx, &bb, r, f, b, &dummy);
+
+ apr_brigade_cleanup(ctx->ctx->ssi_tag_brigade);
+
+ if (rv != 0 && rv != 1 && rv != -1) {
+ apr_brigade_destroy(pass_bb);
+ return rv;
+ }
+
+ if (dummy) {
+ apr_bucket_brigade *remain;
+
+ remain = apr_brigade_split(bb, b);
+ APR_BRIGADE_CONCAT(pass_bb, bb);
+ bb = remain;
+ }
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unknown directive \"%s\" in parsed doc %s",
+ apr_pstrmemdup(r->pool, ctx->directive,
+ ctx->ctx->directive_length),
+ r->filename);
+ if (ctx->ctx->flags & FLAG_PRINTING) {
+ SSI_CREATE_ERROR_BUCKET(ctx->ctx, f, pass_bb);
+ }
+ }
+ }
+
+ /* cleanup */
+ apr_pool_clear(ctx->dpool);
+ apr_brigade_cleanup(ctx->tmp_bb);
+
+ /* Oooof. Done here, start next round */
+ ctx->state = PARSE_PRE_HEAD;
+ break;
+ }
+
+ } /* while (brigade) */
+
+ /* End of stream. Final cleanup */
+ if (ctx->seen_eos) {
+ if (PARSE_HEAD == ctx->state) {
+ if (ctx->ctx->flags & FLAG_PRINTING) {
+ char *to_release = apr_palloc(ctx->ctx->pool,
+ ctx->ctx->parse_pos);
+
+ memcpy(to_release, ctx->ctx->start_seq, ctx->ctx->parse_pos);
+ APR_BRIGADE_INSERT_TAIL(pass_bb,
+ apr_bucket_pool_create(to_release,
+ ctx->ctx->parse_pos, ctx->ctx->pool,
+ f->c->bucket_alloc));
+ }
+ }
+ else if (PARSE_PRE_HEAD != ctx->state) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "SSI directive was not properly finished at the end "
+ "of parsed document %s", r->filename);
+ if (ctx->ctx->flags & FLAG_PRINTING) {
+ SSI_CREATE_ERROR_BUCKET(ctx->ctx, f, pass_bb);
+ }
+ }
+
+ if (!(ctx->ctx->flags & FLAG_PRINTING)) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "missing closing endif directive in parsed document"
+ " %s", r->filename);
+ }
+
+ /* cleanup our temporary memory */
+ apr_brigade_destroy(ctx->tmp_bb);
+ apr_pool_destroy(ctx->dpool);
+
+ /* don't forget to finally insert the EOS bucket */
+ APR_BRIGADE_INSERT_TAIL(pass_bb, b);
+ }
+
+ /* if something's left over, pass it along */
+ if (!APR_BRIGADE_EMPTY(pass_bb)) {
+ rv = ap_pass_brigade(f->next, pass_bb);
+ }
+ else {
+ rv = APR_SUCCESS;
+ }
+
+ apr_brigade_destroy(pass_bb);
+ return rv;
+}
+
+static void *create_includes_dir_config(apr_pool_t *p, char *dummy)
+{
+ include_dir_config *result =
+ (include_dir_config *)apr_palloc(p, sizeof(include_dir_config));
+ enum xbithack *xbh = (enum xbithack *) apr_palloc(p, sizeof(enum xbithack));
+ *xbh = DEFAULT_XBITHACK;
+ result->default_error_msg = DEFAULT_ERROR_MSG;
+ result->default_time_fmt = DEFAULT_TIME_FORMAT;
+ result->xbithack = xbh;
+ return result;
+}
+
+static void *create_includes_server_config(apr_pool_t*p, server_rec *server)
+{
+ include_server_config *result =
+ (include_server_config *)apr_palloc(p, sizeof(include_server_config));
+ result->default_end_tag = ENDING_SEQUENCE;
+ result->default_start_tag =STARTING_SEQUENCE;
+ result->start_tag_len = sizeof(STARTING_SEQUENCE)-1;
+ /* compile the pattern used by find_start_sequence */
+ bndm_compile(&result->start_seq_pat, result->default_start_tag,
+ result->start_tag_len);
+
+ result->undefinedEcho = apr_pstrdup(p,"(none)");
+ result->undefinedEchoLen = strlen( result->undefinedEcho);
+ return result;
+}
+static const char *set_xbithack(cmd_parms *cmd, void *xbp, const char *arg)
+{
+ include_dir_config *conf = (include_dir_config *)xbp;
+
+ if (!strcasecmp(arg, "off")) {
+ *conf->xbithack = xbithack_off;
+ }
+ else if (!strcasecmp(arg, "on")) {
+ *conf->xbithack = xbithack_on;
+ }
+ else if (!strcasecmp(arg, "full")) {
+ *conf->xbithack = xbithack_full;
+ }
+ else {
+ return "XBitHack must be set to Off, On, or Full";
+ }
+
+ return NULL;
+}
+
+static int includes_setup(ap_filter_t *f)
+{
+ include_dir_config *conf =
+ (include_dir_config *)ap_get_module_config(f->r->per_dir_config,
+ &include_module);
+
+ /* When our xbithack value isn't set to full or our platform isn't
+ * providing group-level protection bits or our group-level bits do not
+ * have group-execite on, we will set the no_local_copy value to 1 so
+ * that we will not send 304s.
+ */
+ if ((*conf->xbithack != xbithack_full)
+ || !(f->r->finfo.valid & APR_FINFO_GPROT)
+ || !(f->r->finfo.protection & APR_GEXECUTE)) {
+ f->r->no_local_copy = 1;
+ }
+
+ /* Don't allow ETag headers to be generated - see RFC2616 - 13.3.4.
+ * We don't know if we are going to be including a file or executing
+ * a program - in either case a strong ETag header will likely be invalid.
+ */
+ apr_table_setn(f->r->notes, "no-etag", "");
+
+ return OK;
+}
+
+static apr_status_t includes_filter(ap_filter_t *f, apr_bucket_brigade *b)
+{
+ request_rec *r = f->r;
+ ssi_ctx_t *ctx = f->ctx;
+ request_rec *parent;
+ include_dir_config *conf =
+ (include_dir_config *)ap_get_module_config(r->per_dir_config,
+ &include_module);
+
+ include_server_config *sconf= ap_get_module_config(r->server->module_config,
+ &include_module);
+
+ if (!(ap_allow_options(r) & OPT_INCLUDES)) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "mod_include: Options +Includes (or IncludesNoExec) "
+ "wasn't set, INCLUDES filter removed");
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, b);
+ }
+
+ if (!f->ctx) {
+ /* create context for this filter */
+ f->ctx = ctx = apr_palloc(f->c->pool, sizeof(*ctx));
+ ctx->ctx = apr_pcalloc(f->c->pool, sizeof(*ctx->ctx));
+ ctx->ctx->pool = f->r->pool;
+ apr_pool_create(&ctx->dpool, ctx->ctx->pool);
+
+ /* configuration data */
+ ctx->end_seq_len = strlen(sconf->default_end_tag);
+ ctx->r = f->r;
+
+ /* runtime data */
+ ctx->tmp_bb = apr_brigade_create(ctx->ctx->pool, f->c->bucket_alloc);
+ ctx->seen_eos = 0;
+ ctx->state = PARSE_PRE_HEAD;
+ ctx->ctx->flags = (FLAG_PRINTING | FLAG_COND_TRUE);
+ if (ap_allow_options(f->r) & OPT_INCNOEXEC) {
+ ctx->ctx->flags |= FLAG_NO_EXEC;
+ }
+ ctx->ctx->if_nesting_level = 0;
+ ctx->ctx->re_string = NULL;
+ ctx->ctx->error_str_override = NULL;
+ ctx->ctx->time_str_override = NULL;
+
+ ctx->ctx->error_str = conf->default_error_msg;
+ ctx->ctx->time_str = conf->default_time_fmt;
+ ctx->ctx->start_seq_pat = &sconf->start_seq_pat;
+ ctx->ctx->start_seq = sconf->default_start_tag;
+ ctx->ctx->start_seq_len = sconf->start_tag_len;
+ ctx->ctx->end_seq = sconf->default_end_tag;
+
+ /* legacy compat stuff */
+ ctx->ctx->state = PARSED; /* dummy */
+ ctx->ctx->ssi_tag_brigade = apr_brigade_create(f->c->pool,
+ f->c->bucket_alloc);
+ ctx->ctx->status = APR_SUCCESS;
+ ctx->ctx->head_start_index = 0;
+ ctx->ctx->tag_start_index = 0;
+ ctx->ctx->tail_start_index = 0;
+ }
+ else {
+ ctx->ctx->bytes_parsed = 0;
+ }
+
+ if ((parent = ap_get_module_config(r->request_config, &include_module))) {
+ /* Kludge --- for nested includes, we want to keep the subprocess
+ * environment of the base document (for compatibility); that means
+ * torquing our own last_modified date as well so that the
+ * LAST_MODIFIED variable gets reset to the proper value if the
+ * nested document resets <!--#config timefmt -->.
+ */
+ r->subprocess_env = r->main->subprocess_env;
+ apr_pool_join(r->main->pool, r->pool);
+ r->finfo.mtime = r->main->finfo.mtime;
+ }
+ else {
+ /* we're not a nested include, so we create an initial
+ * environment */
+ ap_add_common_vars(r);
+ ap_add_cgi_vars(r);
+ add_include_vars(r, conf->default_time_fmt);
+ }
+ /* Always unset the content-length. There is no way to know if
+ * the content will be modified at some point by send_parsed_content.
+ * It is very possible for us to not find any content in the first
+ * 9k of the file, but still have to modify the content of the file.
+ * If we are going to pass the file through send_parsed_content, then
+ * the content-length should just be unset.
+ */
+ apr_table_unset(f->r->headers_out, "Content-Length");
+
+ /* Always unset the Last-Modified field - see RFC2616 - 13.3.4.
+ * We don't know if we are going to be including a file or executing
+ * a program which may change the Last-Modified header or make the
+ * content completely dynamic. Therefore, we can't support these
+ * headers.
+ * Exception: XBitHack full means we *should* set the Last-Modified field.
+ */
+
+ /* Assure the platform supports Group protections */
+ if ((*conf->xbithack == xbithack_full)
+ && (r->finfo.valid & APR_FINFO_GPROT)
+ && (r->finfo.protection & APR_GEXECUTE)) {
+ ap_update_mtime(r, r->finfo.mtime);
+ ap_set_last_modified(r);
+ }
+ else {
+ apr_table_unset(f->r->headers_out, "Last-Modified");
+ }
+
+ /* add QUERY stuff to env cause it ain't yet */
+ if (r->args) {
+ char *arg_copy = apr_pstrdup(r->pool, r->args);
+
+ apr_table_setn(r->subprocess_env, "QUERY_STRING", r->args);
+ ap_unescape_url(arg_copy);
+ apr_table_setn(r->subprocess_env, "QUERY_STRING_UNESCAPED",
+ ap_escape_shell_cmd(r->pool, arg_copy));
+ }
+
+ return send_parsed_content(f, b);
+}
+
+static void ap_register_include_handler(char *tag, include_handler_fn_t *func)
+{
+ apr_hash_set(include_hash, tag, strlen(tag), (const void *)func);
+}
+
+static int include_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ include_hash = apr_hash_make(p);
+
+ ssi_pfn_register = APR_RETRIEVE_OPTIONAL_FN(ap_register_include_handler);
+
+ if(ssi_pfn_register) {
+ ssi_pfn_register("if", handle_if);
+ ssi_pfn_register("set", handle_set);
+ ssi_pfn_register("else", handle_else);
+ ssi_pfn_register("elif", handle_elif);
+ ssi_pfn_register("echo", handle_echo);
+ ssi_pfn_register("endif", handle_endif);
+ ssi_pfn_register("fsize", handle_fsize);
+ ssi_pfn_register("config", handle_config);
+ ssi_pfn_register("include", handle_include);
+ ssi_pfn_register("flastmod", handle_flastmod);
+ ssi_pfn_register("printenv", handle_printenv);
+ }
+ return OK;
+}
+
+static const char *set_default_error_msg(cmd_parms *cmd, void *mconfig, const char *msg)
+{
+ include_dir_config *conf = (include_dir_config *)mconfig;
+ conf->default_error_msg = apr_pstrdup(cmd->pool, msg);
+ return NULL;
+}
+
+static const char *set_default_start_tag(cmd_parms *cmd, void *mconfig, const char *msg)
+{
+ include_server_config *conf;
+ conf= ap_get_module_config(cmd->server->module_config , &include_module);
+ conf->default_start_tag = apr_pstrdup(cmd->pool, msg);
+ conf->start_tag_len = strlen(conf->default_start_tag );
+ bndm_compile(&conf->start_seq_pat, conf->default_start_tag,
+ conf->start_tag_len);
+
+ return NULL;
+}
+static const char *set_undefined_echo(cmd_parms *cmd, void *mconfig, const char *msg)
+{
+ include_server_config *conf;
+ conf = ap_get_module_config(cmd->server->module_config, &include_module);
+ conf->undefinedEcho = apr_pstrdup(cmd->pool, msg);
+ conf->undefinedEchoLen = strlen(msg);
+
+ return NULL;
+}
+
+
+static const char *set_default_end_tag(cmd_parms *cmd, void *mconfig, const char *msg)
+{
+ include_server_config *conf;
+ conf= ap_get_module_config(cmd->server->module_config , &include_module);
+ conf->default_end_tag = apr_pstrdup(cmd->pool, msg);
+
+ return NULL;
+}
+
+static const char *set_default_time_fmt(cmd_parms *cmd, void *mconfig, const char *fmt)
+{
+ include_dir_config *conf = (include_dir_config *)mconfig;
+ conf->default_time_fmt = apr_pstrdup(cmd->pool, fmt);
+ return NULL;
+}
+
+/*
+ * Module definition and configuration data structs...
+ */
+static const command_rec includes_cmds[] =
+{
+ AP_INIT_TAKE1("XBitHack", set_xbithack, NULL, OR_OPTIONS,
+ "Off, On, or Full"),
+ AP_INIT_TAKE1("SSIErrorMsg", set_default_error_msg, NULL, OR_ALL,
+ "a string"),
+ AP_INIT_TAKE1("SSITimeFormat", set_default_time_fmt, NULL, OR_ALL,
+ "a strftime(3) formatted string"),
+ AP_INIT_TAKE1("SSIStartTag", set_default_start_tag, NULL, RSRC_CONF,
+ "SSI Start String Tag"),
+ AP_INIT_TAKE1("SSIEndTag", set_default_end_tag, NULL, RSRC_CONF,
+ "SSI End String Tag"),
+ AP_INIT_TAKE1("SSIUndefinedEcho", set_undefined_echo, NULL, RSRC_CONF,
+ "SSI Start String Tag"),
+
+ {NULL}
+};
+
+static int include_fixup(request_rec *r)
+{
+ include_dir_config *conf;
+
+ conf = (include_dir_config *) ap_get_module_config(r->per_dir_config,
+ &include_module);
+
+ if (r->handler && (strcmp(r->handler, "server-parsed") == 0))
+ {
+ if (!r->content_type || !*r->content_type) {
+ ap_set_content_type(r, "text/html");
+ }
+ r->handler = "default-handler";
+ }
+ else
+#if defined(OS2) || defined(WIN32) || defined(NETWARE)
+ /* These OS's don't support xbithack. This is being worked on. */
+ {
+ return DECLINED;
+ }
+#else
+ {
+ if (*conf->xbithack == xbithack_off) {
+ return DECLINED;
+ }
+
+ if (!(r->finfo.protection & APR_UEXECUTE)) {
+ return DECLINED;
+ }
+
+ if (!r->content_type || strcmp(r->content_type, "text/html")) {
+ return DECLINED;
+ }
+ }
+#endif
+
+ /* We always return declined, because the default handler actually
+ * serves the file. All we have to do is add the filter.
+ */
+ ap_add_output_filter("INCLUDES", NULL, r, r->connection);
+ return DECLINED;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ APR_REGISTER_OPTIONAL_FN(ap_ssi_get_tag_and_value);
+ APR_REGISTER_OPTIONAL_FN(ap_ssi_parse_string);
+ APR_REGISTER_OPTIONAL_FN(ap_register_include_handler);
+ ap_hook_post_config(include_post_config, NULL, NULL, APR_HOOK_REALLY_FIRST);
+ ap_hook_fixups(include_fixup, NULL, NULL, APR_HOOK_LAST);
+ ap_register_output_filter("INCLUDES", includes_filter, includes_setup,
+ AP_FTYPE_RESOURCE);
+}
+
+module AP_MODULE_DECLARE_DATA include_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_includes_dir_config, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ create_includes_server_config,/* server config */
+ NULL, /* merge server config */
+ includes_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_include.dsp b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.dsp
new file mode 100644
index 00000000..52136219
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.dsp
@@ -0,0 +1,132 @@
+# Microsoft Developer Studio Project File - Name="mod_include" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_include - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_include.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_include.mak" CFG="mod_include - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_include - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_include - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_include - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_include_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_include.so" /base:@..\..\os\win32\BaseAddr.ref,mod_include.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_include.so" /base:@..\..\os\win32\BaseAddr.ref,mod_include.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_include - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_include_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_include.so" /base:@..\..\os\win32\BaseAddr.ref,mod_include.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_include.so" /base:@..\..\os\win32\BaseAddr.ref,mod_include.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_include - Win32 Release"
+# Name "mod_include - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_include.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_include.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_include.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_include - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_include.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_include.so "include_module for Apache" ../../include/ap_release.h > .\mod_include.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_include - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_include.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_include.so "include_module for Apache" ../../include/ap_release.h > .\mod_include.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_include.exp b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.exp
new file mode 100644
index 00000000..112e1c4d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.exp
@@ -0,0 +1 @@
+include_module
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_include.h b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.h
new file mode 100644
index 00000000..6264b888
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.h
@@ -0,0 +1,206 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _MOD_INCLUDE_H
+#define _MOD_INCLUDE_H 1
+
+#include "apr_pools.h"
+#include "apr_optional.h"
+
+#define STARTING_SEQUENCE "<!--#"
+#define ENDING_SEQUENCE "-->"
+
+#define DEFAULT_ERROR_MSG "[an error occurred while processing this directive]"
+#define DEFAULT_TIME_FORMAT "%A, %d-%b-%Y %H:%M:%S %Z"
+#define SIZEFMT_BYTES 0
+#define SIZEFMT_KMG 1
+#define TMP_BUF_SIZE 1024
+#if APR_CHARSET_EBCDIC
+#define RAW_ASCII_CHAR(ch) apr_xlate_conv_byte(ap_hdrs_from_ascii, (unsigned char)ch)
+#else /*APR_CHARSET_EBCDIC*/
+#define RAW_ASCII_CHAR(ch) (ch)
+#endif /*APR_CHARSET_EBCDIC*/
+
+/****************************************************************************
+ * Used to keep context information during parsing of a request for SSI tags.
+ * This is especially useful if the tag stretches across multiple buckets or
+ * brigades. This keeps track of which buckets need to be replaced with the
+ * content generated by the SSI tag.
+ *
+ * state: PRE_HEAD - State prior to finding the first character of the
+ * STARTING_SEQUENCE. Next state is PARSE_HEAD.
+ * PARSE_HEAD - State entered once the first character of the
+ * STARTING_SEQUENCE is found and exited when the
+ * the full STARTING_SEQUENCE has been matched or
+ * a match failure occurs. Next state is PRE_HEAD
+ * or PARSE_TAG.
+ * PARSE_TAG - State entered once the STARTING sequence has been
+ * matched. It is exited when the first character in
+ * ENDING_SEQUENCE is found. Next state is PARSE_TAIL.
+ * PARSE_TAIL - State entered from PARSE_TAG state when the first
+ * character in ENDING_SEQUENCE is encountered. This
+ * state is exited when the ENDING_SEQUENCE has been
+ * completely matched, or when a match failure occurs.
+ * Next state is PARSE_TAG or PARSED.
+ * PARSED - State entered from PARSE_TAIL once the complete
+ * ENDING_SEQUENCE has been matched. The SSI tag is
+ * processed and the SSI buckets are replaced with the
+ * SSI content during this state.
+ * parse_pos: Current matched position within the STARTING_SEQUENCE or
+ * ENDING_SEQUENCE during the PARSE_HEAD and PARSE_TAIL states.
+ * This is especially useful when the sequence spans brigades.
+ * X_start_bucket: These point to the buckets containing the first character
+ * of the STARTING_SEQUENCE, the first non-whitespace
+ * character of the tag, and the first character in the
+ * ENDING_SEQUENCE (head_, tag_, and tail_ respectively).
+ * The buckets are kept intact until the PARSED state is
+ * reached, at which time the tag is consolidated and the
+ * buckets are released. The buckets that these point to
+ * have all been set aside in the ssi_tag_brigade (along
+ * with all of the intervening buckets).
+ * X_start_index: The index points within the specified bucket contents
+ * where the first character of the STARTING_SEQUENCE,
+ * the first non-whitespace character of the tag, and the
+ * first character in the ENDING_SEQUENCE can be found
+ * (head_, tag_, and tail_ respectively).
+ * combined_tag: Once the PARSED state is reached the tag is collected from
+ * the bucket(s) in the ssi_tag_brigade into this contiguous
+ * buffer. The buckets in the ssi_tag_brigade are released
+ * and the tag is processed.
+ * curr_tag_pos: Ptr to the combined_tag buffer indicating the current
+ * parse position.
+ * tag_length: The number of bytes in the actual tag (excluding the
+ * STARTING_SEQUENCE, leading and trailing whitespace,
+ * and ENDING_SEQUENCE). This length is computed as the
+ * buckets are parsed and set aside during the PARSE_TAG state.
+ * ssi_tag_brigade: The temporary brigade used by this filter to set aside
+ * the buckets containing parts of the ssi tag and headers.
+ */
+
+/* I keep this stuff here, because of binary compat. It probably doesn't care,
+ * but who knows ...?
+ */
+#ifdef MOD_INCLUDE_REDESIGN
+typedef enum {PRE_HEAD, BLOW_PARSE_HEAD, BLOW_PARSE_DIRECTIVE, PARSE_TAG,
+ BLOW_PARSE_TAIL, PARSED} states;
+#else
+typedef enum {PRE_HEAD, PARSE_HEAD, PARSE_DIRECTIVE, PARSE_TAG, PARSE_TAIL,
+ PARSED} states;
+#endif
+
+/** forward referenced as it needs to be held on the context */
+typedef struct bndm_t bndm_t;
+
+typedef struct include_filter_ctx {
+ states state;
+ long flags; /* See the FLAG_XXXXX definitions. */
+ int if_nesting_level;
+ apr_size_t parse_pos;
+ int bytes_parsed;
+ apr_status_t status;
+ int output_now;
+ int output_flush;
+
+ apr_bucket *head_start_bucket;
+ apr_size_t head_start_index;
+
+ apr_bucket *tag_start_bucket;
+ apr_size_t tag_start_index;
+
+ apr_bucket *tail_start_bucket;
+ apr_size_t tail_start_index;
+
+ char *combined_tag;
+ char *curr_tag_pos;
+ apr_size_t directive_length;
+ apr_size_t tag_length;
+
+ char *error_str;
+ char *error_str_override;
+ char *time_str;
+ char *time_str_override;
+ apr_pool_t *pool;
+
+ apr_bucket_brigade *ssi_tag_brigade;
+ bndm_t *start_seq_pat;
+ char *start_seq;
+ int start_seq_len;
+ char *end_seq;
+ char *re_string;
+ regmatch_t (*re_result)[10];
+} include_ctx_t;
+
+/* These flags are used to set flag bits. */
+#define FLAG_PRINTING 0x00000001 /* Printing conditional lines. */
+#define FLAG_COND_TRUE 0x00000002 /* Conditional eval'd to true. */
+#define FLAG_SIZE_IN_BYTES 0x00000004 /* Sizes displayed in bytes. */
+#define FLAG_NO_EXEC 0x00000008 /* No Exec in current context. */
+
+/* These flags are used to clear flag bits. */
+#define FLAG_SIZE_ABBREV 0xFFFFFFFB /* Reset SIZE_IN_BYTES bit. */
+#define FLAG_CLEAR_PRINT_COND 0xFFFFFFFC /* Reset PRINTING and COND_TRUE*/
+#define FLAG_CLEAR_PRINTING 0xFFFFFFFE /* Reset just PRINTING bit. */
+
+#define CREATE_ERROR_BUCKET(cntx, t_buck, h_ptr, ins_head) \
+{ \
+ /* XXX: it'd probably be nice to use a pool bucket here */ \
+ t_buck = apr_bucket_heap_create(cntx->error_str, \
+ strlen(cntx->error_str), \
+ NULL, h_ptr->list); \
+ APR_BUCKET_INSERT_BEFORE(h_ptr, t_buck); \
+ \
+ if (ins_head == NULL) { \
+ ins_head = t_buck; \
+ } \
+}
+
+/* Make sure to check the return code rc. If it is anything other
+ * than APR_SUCCESS, then you should return this value up the
+ * call chain.
+ */
+#define SPLIT_AND_PASS_PRETAG_BUCKETS(brgd, cntxt, next, rc) \
+if ((APR_BRIGADE_EMPTY((cntxt)->ssi_tag_brigade)) && \
+ ((cntxt)->head_start_bucket != NULL)) { \
+ apr_bucket_brigade *tag_plus; \
+ \
+ tag_plus = apr_brigade_split((brgd), (cntxt)->head_start_bucket); \
+ if ((cntxt)->output_flush) { \
+ APR_BRIGADE_INSERT_TAIL((brgd), apr_bucket_flush_create((brgd)->bucket_alloc)); \
+ } \
+ (rc) = ap_pass_brigade((next), (brgd)); \
+ (cntxt)->bytes_parsed = 0; \
+ (brgd) = tag_plus; \
+}
+
+
+typedef int (include_handler_fn_t)(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f, apr_bucket *head_ptr,
+ apr_bucket **inserted_head);
+
+APR_DECLARE_OPTIONAL_FN(void, ap_ssi_get_tag_and_value, (include_ctx_t *ctx,
+ char **tag,
+ char **tag_val,
+ int dodecode));
+APR_DECLARE_OPTIONAL_FN(char*, ap_ssi_parse_string, (request_rec *r,
+ include_ctx_t *ctx,
+ const char *in,
+ char *out,
+ apr_size_t length,
+ int leave_name));
+APR_DECLARE_OPTIONAL_FN(void, ap_register_include_handler,
+ (char *tag, include_handler_fn_t *func));
+
+#endif /* MOD_INCLUDE */
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_include.la b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.la
new file mode 100644
index 00000000..602915a6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.la
@@ -0,0 +1,35 @@
+# mod_include.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_include.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_include.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_include.lo b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.lo
new file mode 100644
index 00000000..9fdb766d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.lo
@@ -0,0 +1,12 @@
+# mod_include.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_include.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_include.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/mod_include.o b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.o
new file mode 100644
index 00000000..d08f85f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/mod_include.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/filters/modules.mk b/rubbos/app/httpd-2.0.64/modules/filters/modules.mk
new file mode 100644
index 00000000..cd07d4a0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/filters/modules.mk
@@ -0,0 +1,5 @@
+mod_include.la: mod_include.lo
+ $(MOD_LINK) mod_include.lo $(MOD_INCLUDE_LDADD)
+DISTCLEAN_TARGETS = modules.mk
+static = mod_include.la
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.deps b/rubbos/app/httpd-2.0.64/modules/generators/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.indent.pro b/rubbos/app/httpd-2.0.64/modules/generators/.indent.pro
new file mode 100644
index 00000000..a9fbe9f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_asis.a b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_asis.a
new file mode 100644
index 00000000..f7606c14
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_asis.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_asis.la b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_asis.la
new file mode 100644
index 00000000..f3cb430d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_asis.la
@@ -0,0 +1,35 @@
+# mod_asis.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_asis.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_asis.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_asis.o b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_asis.o
new file mode 100644
index 00000000..acca9866
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_asis.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_autoindex.a b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_autoindex.a
new file mode 100644
index 00000000..e4afe274
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_autoindex.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_autoindex.la b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_autoindex.la
new file mode 100644
index 00000000..139298d2
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_autoindex.la
@@ -0,0 +1,35 @@
+# mod_autoindex.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_autoindex.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_autoindex.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_autoindex.o b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_autoindex.o
new file mode 100644
index 00000000..08725d05
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_autoindex.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_cgid.a b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_cgid.a
new file mode 100644
index 00000000..80394ff9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_cgid.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_cgid.la b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_cgid.la
new file mode 100644
index 00000000..7528f2d9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_cgid.la
@@ -0,0 +1,35 @@
+# mod_cgid.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_cgid.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_cgid.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_cgid.o b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_cgid.o
new file mode 100644
index 00000000..ffbaef81
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_cgid.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_status.a b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_status.a
new file mode 100644
index 00000000..00fe8005
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_status.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_status.la b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_status.la
new file mode 100644
index 00000000..fc5f7173
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_status.la
@@ -0,0 +1,35 @@
+# mod_status.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_status.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_status.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_status.o b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_status.o
new file mode 100644
index 00000000..107ccfef
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/.libs/mod_status.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/Makefile b/rubbos/app/httpd-2.0.64/modules/generators/Makefile
new file mode 100644
index 00000000..16be1a0d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/generators
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/generators
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/generators
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/Makefile.in b/rubbos/app/httpd-2.0.64/modules/generators/Makefile.in
new file mode 100644
index 00000000..167b343d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/Makefile.in
@@ -0,0 +1,3 @@
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/NWGNUinfo b/rubbos/app/httpd-2.0.64/modules/generators/NWGNUinfo
new file mode 100644
index 00000000..eb76803f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/NWGNUinfo
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = info
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Info Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Info Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/info.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_info.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ info_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/generators/NWGNUmakefile
new file mode 100644
index 00000000..7f7d343b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/NWGNUmakefile
@@ -0,0 +1,247 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME =
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION =
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME =
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE =
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/info.nlm \
+ $(OBJDIR)/status.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/NWGNUstatus b/rubbos/app/httpd-2.0.64/modules/generators/NWGNUstatus
new file mode 100644
index 00000000..01cbef5a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/NWGNUstatus
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = status
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Status Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Status Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/status.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_status.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ status_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/config5.m4 b/rubbos/app/httpd-2.0.64/modules/generators/config5.m4
new file mode 100644
index 00000000..f4afb7f1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/config5.m4
@@ -0,0 +1,66 @@
+dnl modules enabled in this directory by default
+
+dnl APACHE_MODULE(name, helptext[, objects[, structname[, default[, config]]]])
+
+APACHE_MODPATH_INIT(generators)
+
+APACHE_MODULE(status, process/thread monitoring, , , yes)
+APACHE_MODULE(autoindex, directory listing, , , yes)
+APACHE_MODULE(asis, as-is filetypes, , , yes)
+APACHE_MODULE(info, server information, , , most)
+APACHE_MODULE(suexec, set uid and gid for spawned processes, , , no, [
+ other_targets=suexec ] )
+
+APR_ADDTO(LT_LDFLAGS,-export-dynamic)
+
+if test "$apache_cv_mpm" = "worker" -o "$apache_cv_mpm" = "perchild"; then
+# if we are using a threaded MPM, we will get better performance with
+# mod_cgid, so make it the default.
+ APACHE_MODULE(cgid, CGI scripts, , , yes, [
+ case $host in
+ *-solaris2*)
+ case `uname -r` in
+ 5.10)
+ dnl Does the system have the appropriate patches?
+ case `uname -p` in
+ i386)
+ patch_id="120665"
+ ;;
+ sparc)
+ patch_id="120664"
+ ;;
+ *)
+ AC_MSG_WARN([Unknown platform])
+ patch_id="120664"
+ ;;
+ esac
+ AC_MSG_CHECKING([for Solaris patch $patch_id])
+ showrev -p | grep "$patch_id" >/dev/null 2>&1
+ if test $? -eq 1; then
+ dnl Solaris 11 (next release) as of snv_19 doesn't have this problem.
+ dnl It may be possible to use /kernel/drv/tl from later releases.
+ AC_MSG_ERROR([Please apply either patch # 120664 (Sparc) or # 120665 (x86).
+Without these patches, mod_cgid is non-functional on Solaris 10 due to an OS
+bug with AF_UNIX sockets.
+If you can not apply these patches, you can do one of the following:
+ - run configure with --disable-cgid
+ - switch to the prefork MPM
+For more info: <http://issues.apache.org/bugzilla/show_bug.cgi?id=34264>])
+ else
+ AC_MSG_RESULT(yes)
+ fi
+ ;;
+ esac
+ ;;
+ esac
+ ])
+ APACHE_MODULE(cgi, CGI scripts, , , no)
+else
+# if we are using a non-threaded MPM, it makes little sense to use
+# mod_cgid, and it just opens up holes we don't need. Make mod_cgi the
+# default
+ APACHE_MODULE(cgi, CGI scripts, , , yes)
+ APACHE_MODULE(cgid, CGI scripts, , , no)
+fi
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.c b/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.c
new file mode 100644
index 00000000..a5c65b47
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.c
@@ -0,0 +1,145 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_strings.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_protocol.h"
+#include "http_log.h"
+#include "util_script.h"
+#include "http_main.h"
+#include "http_request.h"
+
+#include "mod_core.h"
+
+#define ASIS_MAGIC_TYPE "httpd/send-as-is"
+
+static int asis_handler(request_rec *r)
+{
+ conn_rec *c = r->connection;
+ apr_file_t *f = NULL;
+ apr_status_t rv;
+ const char *location;
+
+ if(strcmp(r->handler,ASIS_MAGIC_TYPE) && strcmp(r->handler,"send-as-is"))
+ return DECLINED;
+
+ r->allowed |= (AP_METHOD_BIT << M_GET);
+ if (r->method_number != M_GET)
+ return DECLINED;
+ if (r->finfo.filetype == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "File does not exist: %s", r->filename);
+ return HTTP_NOT_FOUND;
+ }
+
+ if ((rv = apr_file_open(&f, r->filename, APR_READ,
+ APR_OS_DEFAULT, r->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "file permissions deny server access: %s", r->filename);
+ return HTTP_FORBIDDEN;
+ }
+
+ ap_scan_script_header_err(r, f, NULL);
+ location = apr_table_get(r->headers_out, "Location");
+
+ if (location && location[0] == '/' &&
+ ((r->status == HTTP_OK) || ap_is_HTTP_REDIRECT(r->status))) {
+
+ apr_file_close(f);
+
+ /* Internal redirect -- fake-up a pseudo-request */
+ r->status = HTTP_OK;
+
+ /* This redirect needs to be a GET no matter what the original
+ * method was.
+ */
+ r->method = apr_pstrdup(r->pool, "GET");
+ r->method_number = M_GET;
+
+ ap_internal_redirect_handler(location, r);
+ return OK;
+ }
+
+ if (!r->header_only) {
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ apr_off_t pos = 0;
+
+ rv = apr_file_seek(f, APR_CUR, &pos);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "mod_asis: failed to find end-of-headers position "
+ "for %s", r->filename);
+ apr_file_close(f);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+#if APR_HAS_LARGE_FILES
+ if (r->finfo.size - pos > AP_MAX_SENDFILE) {
+ /* APR_HAS_LARGE_FILES issue; must split into mutiple buckets,
+ * no greater than MAX(apr_size_t), and more granular than that
+ * in case the brigade code/filters attempt to read it directly.
+ */
+ apr_off_t fsize = r->finfo.size - pos;
+ b = apr_bucket_file_create(f, pos, AP_MAX_SENDFILE,
+ r->pool, c->bucket_alloc);
+ while (fsize > AP_MAX_SENDFILE) {
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ apr_bucket_copy(b, &b);
+ b->start += AP_MAX_SENDFILE;
+ fsize -= AP_MAX_SENDFILE;
+ }
+ b->length = (apr_size_t)fsize; /* Resize just the last bucket */
+ }
+ else
+#endif
+ b = apr_bucket_file_create(f, pos, (apr_size_t) (r->finfo.size - pos),
+ r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(r->output_filters, bb);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "mod_asis: ap_pass_brigade failed for file %s", r->filename);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+ else {
+ apr_file_close(f);
+ }
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_handler(asis_handler,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA asis_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ NULL, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.dsp b/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.dsp
new file mode 100644
index 00000000..77fdafd3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_asis" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_asis - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_asis.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_asis.mak" CFG="mod_asis - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_asis - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_asis - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_asis - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_asis_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_asis.so" /base:@..\..\os\win32\BaseAddr.ref,mod_asis.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_asis.so" /base:@..\..\os\win32\BaseAddr.ref,mod_asis.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_asis - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_asis_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_asis.so" /base:@..\..\os\win32\BaseAddr.ref,mod_asis.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_asis.so" /base:@..\..\os\win32\BaseAddr.ref,mod_asis.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_asis - Win32 Release"
+# Name "mod_asis - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_asis.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_asis.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_asis - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_asis.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_asis.so "asis_module for Apache" ../../include/ap_release.h > .\mod_asis.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_asis - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_asis.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_asis.so "asis_module for Apache" ../../include/ap_release.h > .\mod_asis.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.exp b/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.exp
new file mode 100644
index 00000000..4f347d92
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.exp
@@ -0,0 +1 @@
+asis_module
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.la b/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.la
new file mode 100644
index 00000000..f3cb430d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.la
@@ -0,0 +1,35 @@
+# mod_asis.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_asis.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_asis.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.lo b/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.lo
new file mode 100644
index 00000000..7eeba69a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.lo
@@ -0,0 +1,12 @@
+# mod_asis.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_asis.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_asis.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.o b/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.o
new file mode 100644
index 00000000..acca9866
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_asis.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.c b/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.c
new file mode 100644
index 00000000..f98f12f3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.c
@@ -0,0 +1,2252 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_autoindex.c: Handles the on-the-fly html index generation
+ *
+ * Rob McCool
+ * 3/23/93
+ *
+ * Adapted to Apache by rst.
+ *
+ * Version sort added by Martin Pool <mbp@humbug.org.au>.
+ */
+
+#include "apr_strings.h"
+#include "apr_fnmatch.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_request.h"
+#include "http_protocol.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "util_script.h"
+
+#include "mod_core.h"
+
+module AP_MODULE_DECLARE_DATA autoindex_module;
+
+/****************************************************************
+ *
+ * Handling configuration directives...
+ */
+
+#define NO_OPTIONS (1 << 0) /* Indexing options */
+#define ICONS_ARE_LINKS (1 << 1)
+#define SCAN_HTML_TITLES (1 << 2)
+#define SUPPRESS_ICON (1 << 3)
+#define SUPPRESS_LAST_MOD (1 << 4)
+#define SUPPRESS_SIZE (1 << 5)
+#define SUPPRESS_DESC (1 << 6)
+#define SUPPRESS_PREAMBLE (1 << 7)
+#define SUPPRESS_COLSORT (1 << 8)
+#define SUPPRESS_RULES (1 << 9)
+#define FOLDERS_FIRST (1 << 10)
+#define VERSION_SORT (1 << 11)
+#define TRACK_MODIFIED (1 << 12)
+#define FANCY_INDEXING (1 << 13)
+#define TABLE_INDEXING (1 << 14)
+#define IGNORE_CLIENT (1 << 15)
+#define IGNORE_CASE (1 << 16)
+#define EMIT_XHTML (1 << 17)
+
+#define K_NOADJUST 0
+#define K_ADJUST 1
+#define K_UNSET 2
+
+/*
+ * Define keys for sorting.
+ */
+#define K_NAME 'N' /* Sort by file name (default) */
+#define K_LAST_MOD 'M' /* Last modification date */
+#define K_SIZE 'S' /* Size (absolute, not as displayed) */
+#define K_DESC 'D' /* Description */
+#define K_VALID "NMSD" /* String containing _all_ valid K_ opts */
+
+#define D_ASCENDING 'A'
+#define D_DESCENDING 'D'
+#define D_VALID "AD" /* String containing _all_ valid D_ opts */
+
+/*
+ * These are the dimensions of the default icons supplied with Apache.
+ */
+#define DEFAULT_ICON_WIDTH 20
+#define DEFAULT_ICON_HEIGHT 22
+
+/*
+ * Other default dimensions.
+ */
+#define DEFAULT_NAME_WIDTH 23
+#define DEFAULT_DESC_WIDTH 23
+
+struct item {
+ char *type;
+ char *apply_to;
+ char *apply_path;
+ char *data;
+};
+
+typedef struct ai_desc_t {
+ char *pattern;
+ char *description;
+ int full_path;
+ int wildcards;
+} ai_desc_t;
+
+typedef struct autoindex_config_struct {
+
+ char *default_icon;
+ apr_int32_t opts;
+ apr_int32_t incremented_opts;
+ apr_int32_t decremented_opts;
+ int name_width;
+ int name_adjust;
+ int desc_width;
+ int desc_adjust;
+ int icon_width;
+ int icon_height;
+ char default_keyid;
+ char default_direction;
+
+ apr_array_header_t *icon_list;
+ apr_array_header_t *alt_list;
+ apr_array_header_t *desc_list;
+ apr_array_header_t *ign_list;
+ apr_array_header_t *hdr_list;
+ apr_array_header_t *rdme_list;
+
+ char *ctype;
+ char *charset;
+} autoindex_config_rec;
+
+static char c_by_encoding, c_by_type, c_by_path;
+
+#define BY_ENCODING &c_by_encoding
+#define BY_TYPE &c_by_type
+#define BY_PATH &c_by_path
+
+/*
+ * This routine puts the standard HTML header at the top of the index page.
+ * We include the DOCTYPE because we may be using features therefrom (i.e.,
+ * HEIGHT and WIDTH attributes on the icons if we're FancyIndexing).
+ */
+static void emit_preamble(request_rec *r, int xhtml, const char *title)
+{
+ ap_rvputs(r, xhtml ? DOCTYPE_XHTML_1_0T : DOCTYPE_HTML_3_2,
+ "<html>\n <head>\n <title>Index of ", title,
+ "</title>\n </head>\n <body>\n", NULL);
+}
+
+static void push_item(apr_array_header_t *arr, char *type, const char *to,
+ const char *path, const char *data)
+{
+ struct item *p = (struct item *) apr_array_push(arr);
+
+ if (!to) {
+ to = "";
+ }
+ if (!path) {
+ path = "";
+ }
+
+ p->type = type;
+ p->data = data ? apr_pstrdup(arr->pool, data) : NULL;
+ p->apply_path = apr_pstrcat(arr->pool, path, "*", NULL);
+
+ if ((type == BY_PATH) && (!ap_is_matchexp(to))) {
+ p->apply_to = apr_pstrcat(arr->pool, "*", to, NULL);
+ }
+ else if (to) {
+ p->apply_to = apr_pstrdup(arr->pool, to);
+ }
+ else {
+ p->apply_to = NULL;
+ }
+}
+
+static const char *add_alt(cmd_parms *cmd, void *d, const char *alt,
+ const char *to)
+{
+ if (cmd->info == BY_PATH) {
+ if (!strcmp(to, "**DIRECTORY**")) {
+ to = "^^DIRECTORY^^";
+ }
+ }
+ if (cmd->info == BY_ENCODING) {
+ char *tmp = apr_pstrdup(cmd->pool, to);
+ ap_str_tolower(tmp);
+ to = tmp;
+ }
+
+ push_item(((autoindex_config_rec *) d)->alt_list, cmd->info, to,
+ cmd->path, alt);
+ return NULL;
+}
+
+static const char *add_icon(cmd_parms *cmd, void *d, const char *icon,
+ const char *to)
+{
+ char *iconbak = apr_pstrdup(cmd->pool, icon);
+
+ if (icon[0] == '(') {
+ char *alt;
+ char *cl = strchr(iconbak, ')');
+
+ if (cl == NULL) {
+ return "missing closing paren";
+ }
+ alt = ap_getword_nc(cmd->pool, &iconbak, ',');
+ *cl = '\0'; /* Lose closing paren */
+ add_alt(cmd, d, &alt[1], to);
+ }
+ if (cmd->info == BY_PATH) {
+ if (!strcmp(to, "**DIRECTORY**")) {
+ to = "^^DIRECTORY^^";
+ }
+ }
+ if (cmd->info == BY_ENCODING) {
+ char *tmp = apr_pstrdup(cmd->pool, to);
+ ap_str_tolower(tmp);
+ to = tmp;
+ }
+
+ push_item(((autoindex_config_rec *) d)->icon_list, cmd->info, to,
+ cmd->path, iconbak);
+ return NULL;
+}
+
+/*
+ * Add description text for a filename pattern. If the pattern has
+ * wildcards already (or we need to add them), add leading and
+ * trailing wildcards to it to ensure substring processing. If the
+ * pattern contains a '/' anywhere, force wildcard matching mode,
+ * add a slash to the prefix so that "bar/bletch" won't be matched
+ * by "foobar/bletch", and make a note that there's a delimiter;
+ * the matching routine simplifies to just the actual filename
+ * whenever it can. This allows definitions in parent directories
+ * to be made for files in subordinate ones using relative paths.
+ */
+
+/*
+ * Absent a strcasestr() function, we have to force wildcards on
+ * systems for which "AAA" and "aaa" mean the same file.
+ */
+#ifdef CASE_BLIND_FILESYSTEM
+#define WILDCARDS_REQUIRED 1
+#else
+#define WILDCARDS_REQUIRED 0
+#endif
+
+static const char *add_desc(cmd_parms *cmd, void *d, const char *desc,
+ const char *to)
+{
+ autoindex_config_rec *dcfg = (autoindex_config_rec *) d;
+ ai_desc_t *desc_entry;
+ char *prefix = "";
+
+ desc_entry = (ai_desc_t *) apr_array_push(dcfg->desc_list);
+ desc_entry->full_path = (ap_strchr_c(to, '/') == NULL) ? 0 : 1;
+ desc_entry->wildcards = (WILDCARDS_REQUIRED
+ || desc_entry->full_path
+ || apr_fnmatch_test(to));
+ if (desc_entry->wildcards) {
+ prefix = desc_entry->full_path ? "*/" : "*";
+ desc_entry->pattern = apr_pstrcat(dcfg->desc_list->pool,
+ prefix, to, "*", NULL);
+ }
+ else {
+ desc_entry->pattern = apr_pstrdup(dcfg->desc_list->pool, to);
+ }
+ desc_entry->description = apr_pstrdup(dcfg->desc_list->pool, desc);
+ return NULL;
+}
+
+static const char *add_ignore(cmd_parms *cmd, void *d, const char *ext)
+{
+ push_item(((autoindex_config_rec *) d)->ign_list, 0, ext, cmd->path, NULL);
+ return NULL;
+}
+
+static const char *add_header(cmd_parms *cmd, void *d, const char *name)
+{
+ push_item(((autoindex_config_rec *) d)->hdr_list, 0, NULL, cmd->path,
+ name);
+ return NULL;
+}
+
+static const char *add_readme(cmd_parms *cmd, void *d, const char *name)
+{
+ push_item(((autoindex_config_rec *) d)->rdme_list, 0, NULL, cmd->path,
+ name);
+ return NULL;
+}
+
+static const char *add_opts(cmd_parms *cmd, void *d, const char *optstr)
+{
+ char *w;
+ apr_int32_t opts;
+ apr_int32_t opts_add;
+ apr_int32_t opts_remove;
+ char action;
+ autoindex_config_rec *d_cfg = (autoindex_config_rec *) d;
+
+ opts = d_cfg->opts;
+ opts_add = d_cfg->incremented_opts;
+ opts_remove = d_cfg->decremented_opts;
+ while (optstr[0]) {
+ int option = 0;
+
+ w = ap_getword_conf(cmd->pool, &optstr);
+ if ((*w == '+') || (*w == '-')) {
+ action = *(w++);
+ }
+ else {
+ action = '\0';
+ }
+ if (!strcasecmp(w, "FancyIndexing")) {
+ option = FANCY_INDEXING;
+ }
+ else if (!strcasecmp(w, "FoldersFirst")) {
+ option = FOLDERS_FIRST;
+ }
+ else if (!strcasecmp(w, "HTMLTable")) {
+ option = TABLE_INDEXING;
+ }
+ else if (!strcasecmp(w, "IconsAreLinks")) {
+ option = ICONS_ARE_LINKS;
+ }
+ else if (!strcasecmp(w, "IgnoreCase")) {
+ option = IGNORE_CASE;
+ }
+ else if (!strcasecmp(w, "IgnoreClient")) {
+ option = IGNORE_CLIENT;
+ }
+ else if (!strcasecmp(w, "ScanHTMLTitles")) {
+ option = SCAN_HTML_TITLES;
+ }
+ else if (!strcasecmp(w, "SuppressColumnSorting")) {
+ option = SUPPRESS_COLSORT;
+ }
+ else if (!strcasecmp(w, "SuppressDescription")) {
+ option = SUPPRESS_DESC;
+ }
+ else if (!strcasecmp(w, "SuppressHTMLPreamble")) {
+ option = SUPPRESS_PREAMBLE;
+ }
+ else if (!strcasecmp(w, "SuppressIcon")) {
+ option = SUPPRESS_ICON;
+ }
+ else if (!strcasecmp(w, "SuppressLastModified")) {
+ option = SUPPRESS_LAST_MOD;
+ }
+ else if (!strcasecmp(w, "SuppressSize")) {
+ option = SUPPRESS_SIZE;
+ }
+ else if (!strcasecmp(w, "SuppressRules")) {
+ option = SUPPRESS_RULES;
+ }
+ else if (!strcasecmp(w, "TrackModified")) {
+ option = TRACK_MODIFIED;
+ }
+ else if (!strcasecmp(w, "VersionSort")) {
+ option = VERSION_SORT;
+ }
+ else if (!strcasecmp(w, "XHTML")) {
+ option = EMIT_XHTML;
+ }
+ else if (!strcasecmp(w, "None")) {
+ if (action != '\0') {
+ return "Cannot combine '+' or '-' with 'None' keyword";
+ }
+ opts = NO_OPTIONS;
+ opts_add = 0;
+ opts_remove = 0;
+ }
+ else if (!strcasecmp(w, "IconWidth")) {
+ if (action != '-') {
+ d_cfg->icon_width = DEFAULT_ICON_WIDTH;
+ }
+ else {
+ d_cfg->icon_width = 0;
+ }
+ }
+ else if (!strncasecmp(w, "IconWidth=", 10)) {
+ if (action == '-') {
+ return "Cannot combine '-' with IconWidth=n";
+ }
+ d_cfg->icon_width = atoi(&w[10]);
+ }
+ else if (!strcasecmp(w, "IconHeight")) {
+ if (action != '-') {
+ d_cfg->icon_height = DEFAULT_ICON_HEIGHT;
+ }
+ else {
+ d_cfg->icon_height = 0;
+ }
+ }
+ else if (!strncasecmp(w, "IconHeight=", 11)) {
+ if (action == '-') {
+ return "Cannot combine '-' with IconHeight=n";
+ }
+ d_cfg->icon_height = atoi(&w[11]);
+ }
+ else if (!strcasecmp(w, "NameWidth")) {
+ if (action != '-') {
+ return "NameWidth with no value may only appear as "
+ "'-NameWidth'";
+ }
+ d_cfg->name_width = DEFAULT_NAME_WIDTH;
+ d_cfg->name_adjust = K_NOADJUST;
+ }
+ else if (!strncasecmp(w, "NameWidth=", 10)) {
+ if (action == '-') {
+ return "Cannot combine '-' with NameWidth=n";
+ }
+ if (w[10] == '*') {
+ d_cfg->name_adjust = K_ADJUST;
+ }
+ else {
+ int width = atoi(&w[10]);
+
+ if (width && (width < 5)) {
+ return "NameWidth value must be greater than 5";
+ }
+ d_cfg->name_width = width;
+ d_cfg->name_adjust = K_NOADJUST;
+ }
+ }
+ else if (!strcasecmp(w, "DescriptionWidth")) {
+ if (action != '-') {
+ return "DescriptionWidth with no value may only appear as "
+ "'-DescriptionWidth'";
+ }
+ d_cfg->desc_width = DEFAULT_DESC_WIDTH;
+ d_cfg->desc_adjust = K_NOADJUST;
+ }
+ else if (!strncasecmp(w, "DescriptionWidth=", 17)) {
+ if (action == '-') {
+ return "Cannot combine '-' with DescriptionWidth=n";
+ }
+ if (w[17] == '*') {
+ d_cfg->desc_adjust = K_ADJUST;
+ }
+ else {
+ int width = atoi(&w[17]);
+
+ if (width && (width < 12)) {
+ return "DescriptionWidth value must be greater than 12";
+ }
+ d_cfg->desc_width = width;
+ d_cfg->desc_adjust = K_NOADJUST;
+ }
+ }
+ else if (!strncasecmp(w, "Type=", 5)) {
+ d_cfg->ctype = apr_pstrdup(cmd->pool, &w[5]);
+ }
+ else if (!strncasecmp(w, "Charset=", 8)) {
+ d_cfg->charset = apr_pstrdup(cmd->pool, &w[8]);
+ }
+ else {
+ return "Invalid directory indexing option";
+ }
+ if (action == '\0') {
+ opts |= option;
+ opts_add = 0;
+ opts_remove = 0;
+ }
+ else if (action == '+') {
+ opts_add |= option;
+ opts_remove &= ~option;
+ }
+ else {
+ opts_remove |= option;
+ opts_add &= ~option;
+ }
+ }
+ if ((opts & NO_OPTIONS) && (opts & ~NO_OPTIONS)) {
+ return "Cannot combine other IndexOptions keywords with 'None'";
+ }
+ d_cfg->incremented_opts = opts_add;
+ d_cfg->decremented_opts = opts_remove;
+ d_cfg->opts = opts;
+ return NULL;
+}
+
+static const char *set_default_order(cmd_parms *cmd, void *m,
+ const char *direction, const char *key)
+{
+ autoindex_config_rec *d_cfg = (autoindex_config_rec *) m;
+
+ if (!strcasecmp(direction, "Ascending")) {
+ d_cfg->default_direction = D_ASCENDING;
+ }
+ else if (!strcasecmp(direction, "Descending")) {
+ d_cfg->default_direction = D_DESCENDING;
+ }
+ else {
+ return "First keyword must be 'Ascending' or 'Descending'";
+ }
+
+ if (!strcasecmp(key, "Name")) {
+ d_cfg->default_keyid = K_NAME;
+ }
+ else if (!strcasecmp(key, "Date")) {
+ d_cfg->default_keyid = K_LAST_MOD;
+ }
+ else if (!strcasecmp(key, "Size")) {
+ d_cfg->default_keyid = K_SIZE;
+ }
+ else if (!strcasecmp(key, "Description")) {
+ d_cfg->default_keyid = K_DESC;
+ }
+ else {
+ return "Second keyword must be 'Name', 'Date', 'Size', or "
+ "'Description'";
+ }
+
+ return NULL;
+}
+
+#define DIR_CMD_PERMS OR_INDEXES
+
+static const command_rec autoindex_cmds[] =
+{
+ AP_INIT_ITERATE2("AddIcon", add_icon, BY_PATH, DIR_CMD_PERMS,
+ "an icon URL followed by one or more filenames"),
+ AP_INIT_ITERATE2("AddIconByType", add_icon, BY_TYPE, DIR_CMD_PERMS,
+ "an icon URL followed by one or more MIME types"),
+ AP_INIT_ITERATE2("AddIconByEncoding", add_icon, BY_ENCODING, DIR_CMD_PERMS,
+ "an icon URL followed by one or more content encodings"),
+ AP_INIT_ITERATE2("AddAlt", add_alt, BY_PATH, DIR_CMD_PERMS,
+ "alternate descriptive text followed by one or more "
+ "filenames"),
+ AP_INIT_ITERATE2("AddAltByType", add_alt, BY_TYPE, DIR_CMD_PERMS,
+ "alternate descriptive text followed by one or more MIME "
+ "types"),
+ AP_INIT_ITERATE2("AddAltByEncoding", add_alt, BY_ENCODING, DIR_CMD_PERMS,
+ "alternate descriptive text followed by one or more "
+ "content encodings"),
+ AP_INIT_RAW_ARGS("IndexOptions", add_opts, NULL, DIR_CMD_PERMS,
+ "one or more index options [+|-][]"),
+ AP_INIT_TAKE2("IndexOrderDefault", set_default_order, NULL, DIR_CMD_PERMS,
+ "{Ascending,Descending} {Name,Size,Description,Date}"),
+ AP_INIT_ITERATE("IndexIgnore", add_ignore, NULL, DIR_CMD_PERMS,
+ "one or more file extensions"),
+ AP_INIT_ITERATE2("AddDescription", add_desc, BY_PATH, DIR_CMD_PERMS,
+ "Descriptive text followed by one or more filenames"),
+ AP_INIT_TAKE1("HeaderName", add_header, NULL, DIR_CMD_PERMS,
+ "a filename"),
+ AP_INIT_TAKE1("ReadmeName", add_readme, NULL, DIR_CMD_PERMS,
+ "a filename"),
+ AP_INIT_RAW_ARGS("FancyIndexing", ap_set_deprecated, NULL, OR_ALL,
+ "The FancyIndexing directive is no longer supported. "
+ "Use IndexOptions FancyIndexing."),
+ AP_INIT_TAKE1("DefaultIcon", ap_set_string_slot,
+ (void *)APR_OFFSETOF(autoindex_config_rec, default_icon),
+ DIR_CMD_PERMS, "an icon URL"),
+ {NULL}
+};
+
+static void *create_autoindex_config(apr_pool_t *p, char *dummy)
+{
+ autoindex_config_rec *new =
+ (autoindex_config_rec *) apr_pcalloc(p, sizeof(autoindex_config_rec));
+
+ new->icon_width = 0;
+ new->icon_height = 0;
+ new->name_width = DEFAULT_NAME_WIDTH;
+ new->name_adjust = K_UNSET;
+ new->desc_width = DEFAULT_DESC_WIDTH;
+ new->desc_adjust = K_UNSET;
+ new->icon_list = apr_array_make(p, 4, sizeof(struct item));
+ new->alt_list = apr_array_make(p, 4, sizeof(struct item));
+ new->desc_list = apr_array_make(p, 4, sizeof(ai_desc_t));
+ new->ign_list = apr_array_make(p, 4, sizeof(struct item));
+ new->hdr_list = apr_array_make(p, 4, sizeof(struct item));
+ new->rdme_list = apr_array_make(p, 4, sizeof(struct item));
+ new->opts = 0;
+ new->incremented_opts = 0;
+ new->decremented_opts = 0;
+ new->default_keyid = '\0';
+ new->default_direction = '\0';
+
+ return (void *) new;
+}
+
+static void *merge_autoindex_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ autoindex_config_rec *new;
+ autoindex_config_rec *base = (autoindex_config_rec *) basev;
+ autoindex_config_rec *add = (autoindex_config_rec *) addv;
+
+ new = (autoindex_config_rec *) apr_pcalloc(p, sizeof(autoindex_config_rec));
+ new->default_icon = add->default_icon ? add->default_icon
+ : base->default_icon;
+ new->icon_height = add->icon_height ? add->icon_height : base->icon_height;
+ new->icon_width = add->icon_width ? add->icon_width : base->icon_width;
+
+ new->ctype = add->ctype ? add->ctype : base->ctype;
+ new->charset = add->charset ? add->charset : base->charset;
+
+ new->alt_list = apr_array_append(p, add->alt_list, base->alt_list);
+ new->ign_list = apr_array_append(p, add->ign_list, base->ign_list);
+ new->hdr_list = apr_array_append(p, add->hdr_list, base->hdr_list);
+ new->desc_list = apr_array_append(p, add->desc_list, base->desc_list);
+ new->icon_list = apr_array_append(p, add->icon_list, base->icon_list);
+ new->rdme_list = apr_array_append(p, add->rdme_list, base->rdme_list);
+ if (add->opts & NO_OPTIONS) {
+ /*
+ * If the current directory says 'no options' then we also
+ * clear any incremental mods from being inheritable further down.
+ */
+ new->opts = NO_OPTIONS;
+ new->incremented_opts = 0;
+ new->decremented_opts = 0;
+ }
+ else {
+ /*
+ * If there were any nonincremental options selected for
+ * this directory, they dominate and we don't inherit *anything.*
+ * Contrariwise, we *do* inherit if the only settings here are
+ * incremental ones.
+ */
+ if (add->opts == 0) {
+ new->incremented_opts = (base->incremented_opts
+ | add->incremented_opts)
+ & ~add->decremented_opts;
+ new->decremented_opts = (base->decremented_opts
+ | add->decremented_opts);
+ /*
+ * We may have incremental settings, so make sure we don't
+ * inadvertently inherit an IndexOptions None from above.
+ */
+ new->opts = (base->opts & ~NO_OPTIONS);
+ }
+ else {
+ /*
+ * There are local nonincremental settings, which clear
+ * all inheritance from above. They *are* the new base settings.
+ */
+ new->opts = add->opts;;
+ }
+ /*
+ * We're guaranteed that there'll be no overlap between
+ * the add-options and the remove-options.
+ */
+ new->opts |= new->incremented_opts;
+ new->opts &= ~new->decremented_opts;
+ }
+ /*
+ * Inherit the NameWidth settings if there aren't any specific to
+ * the new location; otherwise we'll end up using the defaults set in the
+ * config-rec creation routine.
+ */
+ if (add->name_adjust == K_UNSET) {
+ new->name_width = base->name_width;
+ new->name_adjust = base->name_adjust;
+ }
+ else {
+ new->name_width = add->name_width;
+ new->name_adjust = add->name_adjust;
+ }
+
+ /*
+ * Likewise for DescriptionWidth.
+ */
+ if (add->desc_adjust == K_UNSET) {
+ new->desc_width = base->desc_width;
+ new->desc_adjust = base->desc_adjust;
+ }
+ else {
+ new->desc_width = add->desc_width;
+ new->desc_adjust = add->desc_adjust;
+ }
+
+ new->default_keyid = add->default_keyid ? add->default_keyid
+ : base->default_keyid;
+ new->default_direction = add->default_direction ? add->default_direction
+ : base->default_direction;
+ return new;
+}
+
+/****************************************************************
+ *
+ * Looking things up in config entries...
+ */
+
+/* Structure used to hold entries when we're actually building an index */
+
+struct ent {
+ char *name;
+ char *icon;
+ char *alt;
+ char *desc;
+ apr_off_t size;
+ apr_time_t lm;
+ struct ent *next;
+ int ascending, ignore_case, version_sort;
+ char key;
+ int isdir;
+};
+
+static char *find_item(request_rec *r, apr_array_header_t *list, int path_only)
+{
+ const char *content_type = ap_field_noparam(r->pool, r->content_type);
+ const char *content_encoding = r->content_encoding;
+ char *path = r->filename;
+
+ struct item *items = (struct item *) list->elts;
+ int i;
+
+ for (i = 0; i < list->nelts; ++i) {
+ struct item *p = &items[i];
+
+ /* Special cased for ^^DIRECTORY^^ and ^^BLANKICON^^ */
+ if ((path[0] == '^') || (!ap_strcmp_match(path, p->apply_path))) {
+ if (!*(p->apply_to)) {
+ return p->data;
+ }
+ else if (p->type == BY_PATH || path[0] == '^') {
+ if (!ap_strcmp_match(path, p->apply_to)) {
+ return p->data;
+ }
+ }
+ else if (!path_only) {
+ if (!content_encoding) {
+ if (p->type == BY_TYPE) {
+ if (content_type
+ && !ap_strcasecmp_match(content_type,
+ p->apply_to)) {
+ return p->data;
+ }
+ }
+ }
+ else {
+ if (p->type == BY_ENCODING) {
+ if (!ap_strcasecmp_match(content_encoding,
+ p->apply_to)) {
+ return p->data;
+ }
+ }
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+#define find_icon(d,p,t) find_item(p,d->icon_list,t)
+#define find_alt(d,p,t) find_item(p,d->alt_list,t)
+#define find_header(d,p) find_item(p,d->hdr_list,0)
+#define find_readme(d,p) find_item(p,d->rdme_list,0)
+
+static char *find_default_item(char *bogus_name, apr_array_header_t *list)
+{
+ request_rec r;
+ /* Bleah. I tried to clean up find_item, and it lead to this bit
+ * of ugliness. Note that the fields initialized are precisely
+ * those that find_item looks at...
+ */
+ r.filename = bogus_name;
+ r.content_type = r.content_encoding = NULL;
+ return find_item(&r, list, 1);
+}
+
+#define find_default_icon(d,n) find_default_item(n, d->icon_list)
+#define find_default_alt(d,n) find_default_item(n, d->alt_list)
+
+/*
+ * Look through the list of pattern/description pairs and return the first one
+ * if any) that matches the filename in the request. If multiple patterns
+ * match, only the first one is used; since the order in the array is the
+ * same as the order in which directives were processed, earlier matching
+ * directives will dominate.
+ */
+
+#ifdef CASE_BLIND_FILESYSTEM
+#define MATCH_FLAGS FNM_CASE_BLIND
+#else
+#define MATCH_FLAGS 0
+#endif
+
+static char *find_desc(autoindex_config_rec *dcfg, const char *filename_full)
+{
+ int i;
+ ai_desc_t *list = (ai_desc_t *) dcfg->desc_list->elts;
+ const char *filename_only;
+ const char *filename;
+
+ /*
+ * If the filename includes a path, extract just the name itself
+ * for the simple matches.
+ */
+ if ((filename_only = ap_strrchr_c(filename_full, '/')) == NULL) {
+ filename_only = filename_full;
+ }
+ else {
+ filename_only++;
+ }
+ for (i = 0; i < dcfg->desc_list->nelts; ++i) {
+ ai_desc_t *tuple = &list[i];
+ int found;
+
+ /*
+ * Only use the full-path filename if the pattern contains '/'s.
+ */
+ filename = (tuple->full_path) ? filename_full : filename_only;
+ /*
+ * Make the comparison using the cheapest method; only do
+ * wildcard checking if we must.
+ */
+ if (tuple->wildcards) {
+ found = (apr_fnmatch(tuple->pattern, filename, MATCH_FLAGS) == 0);
+ }
+ else {
+ found = (ap_strstr_c(filename, tuple->pattern) != NULL);
+ }
+ if (found) {
+ return tuple->description;
+ }
+ }
+ return NULL;
+}
+
+static int ignore_entry(autoindex_config_rec *d, char *path)
+{
+ apr_array_header_t *list = d->ign_list;
+ struct item *items = (struct item *) list->elts;
+ char *tt;
+ int i;
+
+ if ((tt = strrchr(path, '/')) == NULL) {
+ tt = path;
+ }
+ else {
+ tt++;
+ }
+
+ for (i = 0; i < list->nelts; ++i) {
+ struct item *p = &items[i];
+ char *ap;
+
+ if ((ap = strrchr(p->apply_to, '/')) == NULL) {
+ ap = p->apply_to;
+ }
+ else {
+ ap++;
+ }
+
+#ifndef CASE_BLIND_FILESYSTEM
+ if (!ap_strcmp_match(path, p->apply_path)
+ && !ap_strcmp_match(tt, ap)) {
+ return 1;
+ }
+#else /* !CASE_BLIND_FILESYSTEM */
+ /*
+ * On some platforms, the match must be case-blind. This is really
+ * a factor of the filesystem involved, but we can't detect that
+ * reliably - so we have to granularise at the OS level.
+ */
+ if (!ap_strcasecmp_match(path, p->apply_path)
+ && !ap_strcasecmp_match(tt, ap)) {
+ return 1;
+ }
+#endif /* !CASE_BLIND_FILESYSTEM */
+ }
+ return 0;
+}
+
+/*****************************************************************
+ *
+ * Actually generating output
+ */
+
+/*
+ * Elements of the emitted document:
+ * Preamble
+ * Emitted unless SUPPRESS_PREAMBLE is set AND ap_run_sub_req
+ * succeeds for the (content_type == text/html) header file.
+ * Header file
+ * Emitted if found (and able).
+ * H1 tag line
+ * Emitted if a header file is NOT emitted.
+ * Directory stuff
+ * Always emitted.
+ * HR
+ * Emitted if FANCY_INDEXING is set.
+ * Readme file
+ * Emitted if found (and able).
+ * ServerSig
+ * Emitted if ServerSignature is not Off AND a readme file
+ * is NOT emitted.
+ * Postamble
+ * Emitted unless SUPPRESS_PREAMBLE is set AND ap_run_sub_req
+ * succeeds for the (content_type == text/html) readme file.
+ */
+
+
+/*
+ * emit a plain text file
+ */
+static void do_emit_plain(request_rec *r, apr_file_t *f)
+{
+ char buf[AP_IOBUFSIZE + 1];
+ int ch;
+ apr_size_t i, c, n;
+ apr_status_t rv;
+
+ ap_rputs("<pre>\n", r);
+ while (!apr_file_eof(f)) {
+ do {
+ n = sizeof(char) * AP_IOBUFSIZE;
+ rv = apr_file_read(f, buf, &n);
+ } while (APR_STATUS_IS_EINTR(rv));
+ if (n == 0 || rv != APR_SUCCESS) {
+ /* ###: better error here? */
+ break;
+ }
+ buf[n] = '\0';
+ c = 0;
+ while (c < n) {
+ for (i = c; i < n; i++) {
+ if (buf[i] == '<' || buf[i] == '>' || buf[i] == '&') {
+ break;
+ }
+ }
+ ch = buf[i];
+ buf[i] = '\0';
+ ap_rputs(&buf[c], r);
+ if (ch == '<') {
+ ap_rputs("&lt;", r);
+ }
+ else if (ch == '>') {
+ ap_rputs("&gt;", r);
+ }
+ else if (ch == '&') {
+ ap_rputs("&amp;", r);
+ }
+ c = i + 1;
+ }
+ }
+ ap_rputs("</pre>\n", r);
+}
+
+/*
+ * Handle the preamble through the H1 tag line, inclusive. Locate
+ * the file with a subrequests. Process text/html documents by actually
+ * running the subrequest; text/xxx documents get copied verbatim,
+ * and any other content type is ignored. This means that a non-text
+ * document (such as HEADER.gif) might get multiviewed as the result
+ * instead of a text document, meaning nothing will be displayed, but
+ * oh well.
+ */
+static void emit_head(request_rec *r, char *header_fname, int suppress_amble,
+ int emit_xhtml, char *title)
+{
+ apr_table_t *hdrs = r->headers_in;
+ apr_file_t *f = NULL;
+ request_rec *rr = NULL;
+ int emit_amble = 1;
+ int emit_H1 = 1;
+ const char *r_accept;
+ const char *r_accept_enc;
+
+ /*
+ * If there's a header file, send a subrequest to look for it. If it's
+ * found and html do the subrequest, otherwise handle it
+ */
+ r_accept = apr_table_get(hdrs, "Accept");
+ r_accept_enc = apr_table_get(hdrs, "Accept-Encoding");
+ apr_table_setn(hdrs, "Accept", "text/html, text/plain");
+ apr_table_unset(hdrs, "Accept-Encoding");
+
+
+ if ((header_fname != NULL) && r->args) {
+ header_fname = apr_pstrcat(r->pool, header_fname, "?", r->args, NULL);
+ }
+
+ if ((header_fname != NULL)
+ && (rr = ap_sub_req_lookup_uri(header_fname, r, r->output_filters))
+ && (rr->status == HTTP_OK)
+ && (rr->filename != NULL)
+ && (rr->finfo.filetype == APR_REG)) {
+ /*
+ * Check for the two specific cases we allow: text/html and
+ * text/anything-else. The former is allowed to be processed for
+ * SSIs.
+ */
+ if (rr->content_type != NULL) {
+ if (!strcasecmp(ap_field_noparam(r->pool, rr->content_type),
+ "text/html")) {
+ ap_filter_t *f;
+ /* Hope everything will work... */
+ emit_amble = 0;
+ emit_H1 = 0;
+
+ if (! suppress_amble) {
+ emit_preamble(r, emit_xhtml, title);
+ }
+ /* This is a hack, but I can't find any better way to do this.
+ * The problem is that we have already created the sub-request,
+ * but we just inserted the OLD_WRITE filter, and the
+ * sub-request needs to pass its data through the OLD_WRITE
+ * filter, or things go horribly wrong (missing data, data in
+ * the wrong order, etc). To fix it, if you create a
+ * sub-request and then insert the OLD_WRITE filter before you
+ * run the request, you need to make sure that the sub-request
+ * data goes through the OLD_WRITE filter. Just steal this
+ * code. The long-term solution is to remove the ap_r*
+ * functions.
+ */
+ for (f=rr->output_filters;
+ f->frec != ap_subreq_core_filter_handle; f = f->next);
+ f->next = r->output_filters;
+
+ /*
+ * If there's a problem running the subrequest, display the
+ * preamble if we didn't do it before -- the header file
+ * didn't get displayed.
+ */
+ if (ap_run_sub_req(rr) != OK) {
+ /* It didn't work */
+ emit_amble = suppress_amble;
+ emit_H1 = 1;
+ }
+ }
+ else if (!strncasecmp("text/", rr->content_type, 5)) {
+ /*
+ * If we can open the file, prefix it with the preamble
+ * regardless; since we'll be sending a <pre> block around
+ * the file's contents, any HTML header it had won't end up
+ * where it belongs.
+ */
+ if (apr_file_open(&f, rr->filename, APR_READ,
+ APR_OS_DEFAULT, r->pool) == APR_SUCCESS) {
+ emit_preamble(r, emit_xhtml, title);
+ emit_amble = 0;
+ do_emit_plain(r, f);
+ apr_file_close(f);
+ emit_H1 = 0;
+ }
+ }
+ }
+ }
+
+ if (r_accept) {
+ apr_table_setn(hdrs, "Accept", r_accept);
+ }
+ else {
+ apr_table_unset(hdrs, "Accept");
+ }
+
+ if (r_accept_enc) {
+ apr_table_setn(hdrs, "Accept-Encoding", r_accept_enc);
+ }
+
+ if (emit_amble) {
+ emit_preamble(r, emit_xhtml, title);
+ }
+ if (emit_H1) {
+ ap_rvputs(r, "<h1>Index of ", title, "</h1>\n", NULL);
+ }
+ if (rr != NULL) {
+ ap_destroy_sub_req(rr);
+ }
+}
+
+
+/*
+ * Handle the Readme file through the postamble, inclusive. Locate
+ * the file with a subrequests. Process text/html documents by actually
+ * running the subrequest; text/xxx documents get copied verbatim,
+ * and any other content type is ignored. This means that a non-text
+ * document (such as FOOTER.gif) might get multiviewed as the result
+ * instead of a text document, meaning nothing will be displayed, but
+ * oh well.
+ */
+static void emit_tail(request_rec *r, char *readme_fname, int suppress_amble)
+{
+ apr_file_t *f = NULL;
+ request_rec *rr = NULL;
+ int suppress_post = 0;
+ int suppress_sig = 0;
+
+ /*
+ * If there's a readme file, send a subrequest to look for it. If it's
+ * found and a text file, handle it -- otherwise fall through and
+ * pretend there's nothing there.
+ */
+ if ((readme_fname != NULL)
+ && (rr = ap_sub_req_lookup_uri(readme_fname, r, r->output_filters))
+ && (rr->status == HTTP_OK)
+ && (rr->filename != NULL)
+ && rr->finfo.filetype == APR_REG) {
+ /*
+ * Check for the two specific cases we allow: text/html and
+ * text/anything-else. The former is allowed to be processed for
+ * SSIs.
+ */
+ if (rr->content_type != NULL) {
+ if (!strcasecmp(ap_field_noparam(r->pool, rr->content_type),
+ "text/html")) {
+ ap_filter_t *f;
+ for (f=rr->output_filters;
+ f->frec != ap_subreq_core_filter_handle; f = f->next);
+ f->next = r->output_filters;
+
+
+ if (ap_run_sub_req(rr) == OK) {
+ /* worked... */
+ suppress_sig = 1;
+ suppress_post = suppress_amble;
+ }
+ }
+ else if (!strncasecmp("text/", rr->content_type, 5)) {
+ /*
+ * If we can open the file, suppress the signature.
+ */
+ if (apr_file_open(&f, rr->filename, APR_READ,
+ APR_OS_DEFAULT, r->pool) == APR_SUCCESS) {
+ do_emit_plain(r, f);
+ apr_file_close(f);
+ suppress_sig = 1;
+ }
+ }
+ }
+ }
+
+ if (!suppress_sig) {
+ ap_rputs(ap_psignature("", r), r);
+ }
+ if (!suppress_post) {
+ ap_rputs("</body></html>\n", r);
+ }
+ if (rr != NULL) {
+ ap_destroy_sub_req(rr);
+ }
+}
+
+
+static char *find_title(request_rec *r)
+{
+ char titlebuf[MAX_STRING_LEN], *find = "<title>";
+ apr_file_t *thefile = NULL;
+ int x, y, p;
+ apr_size_t n;
+
+ if (r->status != HTTP_OK) {
+ return NULL;
+ }
+ if ((r->content_type != NULL)
+ && (!strcasecmp(ap_field_noparam(r->pool, r->content_type),
+ "text/html")
+ || !strcmp(r->content_type, INCLUDES_MAGIC_TYPE))
+ && !r->content_encoding) {
+ if (apr_file_open(&thefile, r->filename, APR_READ,
+ APR_OS_DEFAULT, r->pool) != APR_SUCCESS) {
+ return NULL;
+ }
+ n = sizeof(char) * (MAX_STRING_LEN - 1);
+ apr_file_read(thefile, titlebuf, &n);
+ if (n <= 0) {
+ apr_file_close(thefile);
+ return NULL;
+ }
+ titlebuf[n] = '\0';
+ for (x = 0, p = 0; titlebuf[x]; x++) {
+ if (apr_tolower(titlebuf[x]) == find[p]) {
+ if (!find[++p]) {
+ if ((p = ap_ind(&titlebuf[++x], '<')) != -1) {
+ titlebuf[x + p] = '\0';
+ }
+ /* Scan for line breaks for Tanmoy's secretary */
+ for (y = x; titlebuf[y]; y++) {
+ if ((titlebuf[y] == CR) || (titlebuf[y] == LF)) {
+ if (y == x) {
+ x++;
+ }
+ else {
+ titlebuf[y] = ' ';
+ }
+ }
+ }
+ apr_file_close(thefile);
+ return apr_pstrdup(r->pool, &titlebuf[x]);
+ }
+ }
+ else {
+ p = 0;
+ }
+ }
+ apr_file_close(thefile);
+ }
+ return NULL;
+}
+
+static struct ent *make_parent_entry(apr_int32_t autoindex_opts,
+ autoindex_config_rec *d,
+ request_rec *r, char keyid,
+ char direction)
+{
+ struct ent *p = (struct ent *) apr_pcalloc(r->pool, sizeof(struct ent));
+ char *testpath;
+ /*
+ * p->name is now the true parent URI.
+ * testpath is a crafted lie, so that the syntax '/some/..'
+ * (or simply '..')be used to describe 'up' from '/some/'
+ * when processeing IndexIgnore, and Icon|Alt|Desc configs.
+ */
+
+ /* The output has always been to the parent. Don't make ourself
+ * our own parent (worthless cyclical reference).
+ */
+ if (!(p->name = ap_make_full_path(r->pool, r->uri, "../"))) {
+ return (NULL);
+ }
+ ap_getparents(p->name);
+ if (!*p->name) {
+ return (NULL);
+ }
+
+ /* IndexIgnore has always compared "/thispath/.." */
+ testpath = ap_make_full_path(r->pool, r->filename, "..");
+ if (ignore_entry(d, testpath)) {
+ return (NULL);
+ }
+
+ p->size = -1;
+ p->lm = -1;
+ p->key = apr_toupper(keyid);
+ p->ascending = (apr_toupper(direction) == D_ASCENDING);
+ p->version_sort = autoindex_opts & VERSION_SORT;
+ if (autoindex_opts & FANCY_INDEXING) {
+ if (!(p->icon = find_default_icon(d, testpath))) {
+ p->icon = find_default_icon(d, "^^DIRECTORY^^");
+ }
+ if (!(p->alt = find_default_alt(d, testpath))) {
+ if (!(p->alt = find_default_alt(d, "^^DIRECTORY^^"))) {
+ p->alt = "DIR";
+ }
+ }
+ p->desc = find_desc(d, testpath);
+ }
+ return p;
+}
+
+static struct ent *make_autoindex_entry(const apr_finfo_t *dirent,
+ int autoindex_opts,
+ autoindex_config_rec *d,
+ request_rec *r, char keyid,
+ char direction,
+ const char *pattern)
+{
+ request_rec *rr;
+ struct ent *p;
+
+ /* Dot is ignored, Parent is handled by make_parent_entry() */
+ if ((dirent->name[0] == '.') && (!dirent->name[1]
+ || ((dirent->name[1] == '.') && !dirent->name[2])))
+ return (NULL);
+
+#ifndef CASE_BLIND_FILESYSTEM
+ if (pattern && (apr_fnmatch(pattern, dirent->name,
+ FNM_NOESCAPE | FNM_PERIOD)
+ != APR_SUCCESS))
+ return (NULL);
+#else /* !CASE_BLIND_FILESYSTEM */
+ /*
+ * On some platforms, the match must be case-blind. This is really
+ * a factor of the filesystem involved, but we can't detect that
+ * reliably - so we have to granularise at the OS level.
+ */
+ if (pattern && (apr_fnmatch(pattern, dirent->name,
+ FNM_NOESCAPE | FNM_PERIOD | FNM_CASE_BLIND)
+ != APR_SUCCESS))
+ return (NULL);
+#endif /* !CASE_BLIND_FILESYSTEM */
+
+ if (ignore_entry(d, ap_make_full_path(r->pool,
+ r->filename, dirent->name))) {
+ return (NULL);
+ }
+
+ if (!(rr = ap_sub_req_lookup_dirent(dirent, r, AP_SUBREQ_NO_ARGS, NULL))) {
+ return (NULL);
+ }
+
+ if ((rr->finfo.filetype != APR_DIR && rr->finfo.filetype != APR_REG)
+ || !(rr->status == OK || ap_is_HTTP_SUCCESS(rr->status)
+ || ap_is_HTTP_REDIRECT(rr->status))) {
+ ap_destroy_sub_req(rr);
+ return (NULL);
+ }
+
+ p = (struct ent *) apr_pcalloc(r->pool, sizeof(struct ent));
+ if (dirent->filetype == APR_DIR) {
+ p->name = apr_pstrcat(r->pool, dirent->name, "/", NULL);
+ }
+ else {
+ p->name = apr_pstrdup(r->pool, dirent->name);
+ }
+ p->size = -1;
+ p->icon = NULL;
+ p->alt = NULL;
+ p->desc = NULL;
+ p->lm = -1;
+ p->isdir = 0;
+ p->key = apr_toupper(keyid);
+ p->ascending = (apr_toupper(direction) == D_ASCENDING);
+ p->version_sort = !!(autoindex_opts & VERSION_SORT);
+ p->ignore_case = !!(autoindex_opts & IGNORE_CASE);
+
+ if (autoindex_opts & (FANCY_INDEXING | TABLE_INDEXING)) {
+ p->lm = rr->finfo.mtime;
+ if (dirent->filetype == APR_DIR) {
+ if (autoindex_opts & FOLDERS_FIRST) {
+ p->isdir = 1;
+ }
+ rr->filename = ap_make_dirstr_parent (rr->pool, rr->filename);
+
+ /* omit the trailing slash (1.3 compat) */
+ rr->filename[strlen(rr->filename) - 1] = '\0';
+
+ if (!(p->icon = find_icon(d, rr, 1))) {
+ p->icon = find_default_icon(d, "^^DIRECTORY^^");
+ }
+ if (!(p->alt = find_alt(d, rr, 1))) {
+ if (!(p->alt = find_default_alt(d, "^^DIRECTORY^^"))) {
+ p->alt = "DIR";
+ }
+ }
+ }
+ else {
+ p->icon = find_icon(d, rr, 0);
+ p->alt = find_alt(d, rr, 0);
+ p->size = rr->finfo.size;
+ }
+
+ p->desc = find_desc(d, rr->filename);
+
+ if ((!p->desc) && (autoindex_opts & SCAN_HTML_TITLES)) {
+ p->desc = apr_pstrdup(r->pool, find_title(rr));
+ }
+ }
+ ap_destroy_sub_req(rr);
+ /*
+ * We don't need to take any special action for the file size key.
+ * If we did, it would go here.
+ */
+ if (keyid == K_LAST_MOD) {
+ if (p->lm < 0) {
+ p->lm = 0;
+ }
+ }
+ return (p);
+}
+
+static char *terminate_description(autoindex_config_rec *d, char *desc,
+ apr_int32_t autoindex_opts, int desc_width)
+{
+ int maxsize = desc_width;
+ register int x;
+
+ /*
+ * If there's no DescriptionWidth in effect, default to the old
+ * behaviour of adjusting the description size depending upon
+ * what else is being displayed. Otherwise, stick with the
+ * setting.
+ */
+ if (d->desc_adjust == K_UNSET) {
+ if (autoindex_opts & SUPPRESS_ICON) {
+ maxsize += 6;
+ }
+ if (autoindex_opts & SUPPRESS_LAST_MOD) {
+ maxsize += 19;
+ }
+ if (autoindex_opts & SUPPRESS_SIZE) {
+ maxsize += 7;
+ }
+ }
+ for (x = 0; desc[x] && ((maxsize > 0) || (desc[x] == '<')); x++) {
+ if (desc[x] == '<') {
+ while (desc[x] != '>') {
+ if (!desc[x]) {
+ maxsize = 0;
+ break;
+ }
+ ++x;
+ }
+ }
+ else if (desc[x] == '&') {
+ /* entities like &auml; count as one character */
+ --maxsize;
+ for ( ; desc[x] != ';'; ++x) {
+ if (desc[x] == '\0') {
+ maxsize = 0;
+ break;
+ }
+ }
+ }
+ else {
+ --maxsize;
+ }
+ }
+ if (!maxsize && desc[x] != '\0') {
+ desc[x - 1] = '>'; /* Grump. */
+ desc[x] = '\0'; /* Double Grump! */
+ }
+ return desc;
+}
+
+/*
+ * Emit the anchor for the specified field. If a field is the key for the
+ * current request, the link changes its meaning to reverse the order when
+ * selected again. Non-active fields always start in ascending order.
+ */
+static void emit_link(request_rec *r, const char *anchor, char column,
+ char curkey, char curdirection,
+ const char *colargs, int nosort)
+{
+ if (!nosort) {
+ char qvalue[9];
+
+ qvalue[0] = '?';
+ qvalue[1] = 'C';
+ qvalue[2] = '=';
+ qvalue[3] = column;
+ qvalue[4] = ';';
+ qvalue[5] = 'O';
+ qvalue[6] = '=';
+ /* reverse? */
+ qvalue[7] = ((curkey == column) && (curdirection == D_ASCENDING))
+ ? D_DESCENDING : D_ASCENDING;
+ qvalue[8] = '\0';
+ ap_rvputs(r, "<a href=\"", qvalue, colargs ? colargs : "",
+ "\">", anchor, "</a>", NULL);
+ }
+ else {
+ ap_rputs(anchor, r);
+ }
+}
+
+static void output_directories(struct ent **ar, int n,
+ autoindex_config_rec *d, request_rec *r,
+ apr_int32_t autoindex_opts, char keyid,
+ char direction, const char *colargs)
+{
+ int x;
+ apr_size_t rv;
+ char *name = r->uri;
+ char *tp;
+ int static_columns = !!(autoindex_opts & SUPPRESS_COLSORT);
+ apr_pool_t *scratch;
+ int name_width;
+ int desc_width;
+ char *name_scratch;
+ char *pad_scratch;
+ char *breakrow = "";
+
+ apr_pool_create(&scratch, r->pool);
+ if (name[0] == '\0') {
+ name = "/";
+ }
+
+ name_width = d->name_width;
+ desc_width = d->desc_width;
+
+ if ((autoindex_opts & (FANCY_INDEXING | TABLE_INDEXING))
+ == FANCY_INDEXING) {
+ if (d->name_adjust == K_ADJUST) {
+ for (x = 0; x < n; x++) {
+ int t = strlen(ar[x]->name);
+ if (t > name_width) {
+ name_width = t;
+ }
+ }
+ }
+
+ if (d->desc_adjust == K_ADJUST) {
+ for (x = 0; x < n; x++) {
+ if (ar[x]->desc != NULL) {
+ int t = strlen(ar[x]->desc);
+ if (t > desc_width) {
+ desc_width = t;
+ }
+ }
+ }
+ }
+ }
+ name_scratch = apr_palloc(r->pool, name_width + 1);
+ pad_scratch = apr_palloc(r->pool, name_width + 1);
+ memset(pad_scratch, ' ', name_width);
+ pad_scratch[name_width] = '\0';
+
+ if (autoindex_opts & TABLE_INDEXING) {
+ int cols = 1;
+ ap_rputs("<table><tr>", r);
+ if (!(autoindex_opts & SUPPRESS_ICON)) {
+ ap_rputs("<th>", r);
+ if ((tp = find_default_icon(d, "^^BLANKICON^^"))) {
+ ap_rvputs(r, "<img src=\"", ap_escape_html(scratch, tp),
+ "\" alt=\"[ICO]\"", NULL);
+ if (d->icon_width) {
+ ap_rprintf(r, " width=\"%d\"", d->icon_width);
+ }
+ if (d->icon_height) {
+ ap_rprintf(r, " height=\"%d\"", d->icon_height);
+ }
+
+ if (autoindex_opts & EMIT_XHTML) {
+ ap_rputs(" /", r);
+ }
+ ap_rputs("></th>", r);
+ }
+ else {
+ ap_rputs("&nbsp;</th>", r);
+ }
+
+ ++cols;
+ }
+ ap_rputs("<th>", r);
+ emit_link(r, "Name", K_NAME, keyid, direction,
+ colargs, static_columns);
+ if (!(autoindex_opts & SUPPRESS_LAST_MOD)) {
+ ap_rputs("</th><th>", r);
+ emit_link(r, "Last modified", K_LAST_MOD, keyid, direction,
+ colargs, static_columns);
+ ++cols;
+ }
+ if (!(autoindex_opts & SUPPRESS_SIZE)) {
+ ap_rputs("</th><th>", r);
+ emit_link(r, "Size", K_SIZE, keyid, direction,
+ colargs, static_columns);
+ ++cols;
+ }
+ if (!(autoindex_opts & SUPPRESS_DESC)) {
+ ap_rputs("</th><th>", r);
+ emit_link(r, "Description", K_DESC, keyid, direction,
+ colargs, static_columns);
+ ++cols;
+ }
+ if (!(autoindex_opts & SUPPRESS_RULES)) {
+ breakrow = apr_psprintf(r->pool,
+ "<tr><th colspan=\"%d\">"
+ "<hr%s></th></tr>\n", cols,
+ (autoindex_opts & EMIT_XHTML) ? " /" : "");
+ }
+ ap_rvputs(r, "</th></tr>", breakrow, NULL);
+ }
+ else if (autoindex_opts & FANCY_INDEXING) {
+ ap_rputs("<pre>", r);
+ if (!(autoindex_opts & SUPPRESS_ICON)) {
+ if ((tp = find_default_icon(d, "^^BLANKICON^^"))) {
+ ap_rvputs(r, "<img src=\"", ap_escape_html(scratch, tp),
+ "\" alt=\"Icon \"", NULL);
+ if (d->icon_width) {
+ ap_rprintf(r, " width=\"%d\"", d->icon_width);
+ }
+ if (d->icon_height) {
+ ap_rprintf(r, " height=\"%d\"", d->icon_height);
+ }
+
+ if (autoindex_opts & EMIT_XHTML) {
+ ap_rputs(" /", r);
+ }
+ ap_rputs("> ", r);
+ }
+ else {
+ ap_rputs(" ", r);
+ }
+ }
+ emit_link(r, "Name", K_NAME, keyid, direction,
+ colargs, static_columns);
+ ap_rputs(pad_scratch + 4, r);
+ /*
+ * Emit the guaranteed-at-least-one-space-between-columns byte.
+ */
+ ap_rputs(" ", r);
+ if (!(autoindex_opts & SUPPRESS_LAST_MOD)) {
+ emit_link(r, "Last modified", K_LAST_MOD, keyid, direction,
+ colargs, static_columns);
+ ap_rputs(" ", r);
+ }
+ if (!(autoindex_opts & SUPPRESS_SIZE)) {
+ emit_link(r, "Size", K_SIZE, keyid, direction,
+ colargs, static_columns);
+ ap_rputs(" ", r);
+ }
+ if (!(autoindex_opts & SUPPRESS_DESC)) {
+ emit_link(r, "Description", K_DESC, keyid, direction,
+ colargs, static_columns);
+ }
+ if (!(autoindex_opts & SUPPRESS_RULES)) {
+ ap_rputs("<hr", r);
+ if (autoindex_opts & EMIT_XHTML) {
+ ap_rputs(" /", r);
+ }
+ ap_rputs(">", r);
+ }
+ else {
+ ap_rputc('\n', r);
+ }
+ }
+ else {
+ ap_rputs("<ul>", r);
+ }
+
+ for (x = 0; x < n; x++) {
+ char *anchor, *t, *t2;
+ int nwidth;
+
+ apr_pool_clear(scratch);
+
+ t = ar[x]->name;
+ anchor = ap_escape_html(scratch, ap_os_escape_path(scratch, t, 0));
+
+ if (!x && t[0] == '/') {
+ t2 = "Parent Directory";
+ }
+ else {
+ t2 = t;
+ }
+
+ if (autoindex_opts & TABLE_INDEXING) {
+ ap_rputs("<tr>", r);
+ if (!(autoindex_opts & SUPPRESS_ICON)) {
+ ap_rputs("<td valign=\"top\">", r);
+ if (autoindex_opts & ICONS_ARE_LINKS) {
+ ap_rvputs(r, "<a href=\"", anchor, "\">", NULL);
+ }
+ if ((ar[x]->icon) || d->default_icon) {
+ ap_rvputs(r, "<img src=\"",
+ ap_escape_html(scratch,
+ ar[x]->icon ? ar[x]->icon
+ : d->default_icon),
+ "\" alt=\"[", (ar[x]->alt ? ar[x]->alt : " "),
+ "]\"", NULL);
+ if (d->icon_width) {
+ ap_rprintf(r, " width=\"%d\"", d->icon_width);
+ }
+ if (d->icon_height) {
+ ap_rprintf(r, " height=\"%d\"", d->icon_height);
+ }
+
+ if (autoindex_opts & EMIT_XHTML) {
+ ap_rputs(" /", r);
+ }
+ ap_rputs(">", r);
+ }
+ else {
+ ap_rputs("&nbsp;", r);
+ }
+ if (autoindex_opts & ICONS_ARE_LINKS) {
+ ap_rputs("</a></td>", r);
+ }
+ else {
+ ap_rputs("</td>", r);
+ }
+ }
+ if (d->name_adjust == K_ADJUST) {
+ ap_rvputs(r, "<td><a href=\"", anchor, "\">",
+ ap_escape_html(scratch, t2), "</a>", NULL);
+ }
+ else {
+ nwidth = strlen(t2);
+ if (nwidth > name_width) {
+ memcpy(name_scratch, t2, name_width - 3);
+ name_scratch[name_width - 3] = '.';
+ name_scratch[name_width - 2] = '.';
+ name_scratch[name_width - 1] = '>';
+ name_scratch[name_width] = 0;
+ t2 = name_scratch;
+ nwidth = name_width;
+ }
+ ap_rvputs(r, "<td><a href=\"", anchor, "\">",
+ ap_escape_html(scratch, t2),
+ "</a>", pad_scratch + nwidth, NULL);
+ }
+ if (!(autoindex_opts & SUPPRESS_LAST_MOD)) {
+ if (ar[x]->lm != -1) {
+ char time_str[MAX_STRING_LEN];
+ apr_time_exp_t ts;
+ apr_time_exp_lt(&ts, ar[x]->lm);
+ apr_strftime(time_str, &rv, MAX_STRING_LEN,
+ "</td><td align=\"right\">%d-%b-%Y %H:%M ",
+ &ts);
+ ap_rputs(time_str, r);
+ }
+ else {
+ ap_rputs("</td><td>&nbsp;", r);
+ }
+ }
+ if (!(autoindex_opts & SUPPRESS_SIZE)) {
+ char buf[5];
+ ap_rvputs(r, "</td><td align=\"right\">",
+ apr_strfsize(ar[x]->size, buf), NULL);
+ }
+ if (!(autoindex_opts & SUPPRESS_DESC)) {
+ if (ar[x]->desc) {
+ if (d->desc_adjust == K_ADJUST) {
+ ap_rvputs(r, "</td><td>", ar[x]->desc, NULL);
+ }
+ else {
+ ap_rvputs(r, "</td><td>",
+ terminate_description(d, ar[x]->desc,
+ autoindex_opts,
+ desc_width), NULL);
+ }
+ }
+ }
+ else {
+ ap_rputs("</td><td>&nbsp;", r);
+ }
+ ap_rputs("</td></tr>\n", r);
+ }
+ else if (autoindex_opts & FANCY_INDEXING) {
+ if (!(autoindex_opts & SUPPRESS_ICON)) {
+ if (autoindex_opts & ICONS_ARE_LINKS) {
+ ap_rvputs(r, "<a href=\"", anchor, "\">", NULL);
+ }
+ if ((ar[x]->icon) || d->default_icon) {
+ ap_rvputs(r, "<img src=\"",
+ ap_escape_html(scratch,
+ ar[x]->icon ? ar[x]->icon
+ : d->default_icon),
+ "\" alt=\"[", (ar[x]->alt ? ar[x]->alt : " "),
+ "]\"", NULL);
+ if (d->icon_width) {
+ ap_rprintf(r, " width=\"%d\"", d->icon_width);
+ }
+ if (d->icon_height) {
+ ap_rprintf(r, " height=\"%d\"", d->icon_height);
+ }
+
+ if (autoindex_opts & EMIT_XHTML) {
+ ap_rputs(" /", r);
+ }
+ ap_rputs(">", r);
+ }
+ else {
+ ap_rputs(" ", r);
+ }
+ if (autoindex_opts & ICONS_ARE_LINKS) {
+ ap_rputs("</a> ", r);
+ }
+ else {
+ ap_rputc(' ', r);
+ }
+ }
+ nwidth = strlen(t2);
+ if (nwidth > name_width) {
+ memcpy(name_scratch, t2, name_width - 3);
+ name_scratch[name_width - 3] = '.';
+ name_scratch[name_width - 2] = '.';
+ name_scratch[name_width - 1] = '>';
+ name_scratch[name_width] = 0;
+ t2 = name_scratch;
+ nwidth = name_width;
+ }
+ ap_rvputs(r, "<a href=\"", anchor, "\">",
+ ap_escape_html(scratch, t2),
+ "</a>", pad_scratch + nwidth, NULL);
+ /*
+ * The blank before the storm.. er, before the next field.
+ */
+ ap_rputs(" ", r);
+ if (!(autoindex_opts & SUPPRESS_LAST_MOD)) {
+ if (ar[x]->lm != -1) {
+ char time_str[MAX_STRING_LEN];
+ apr_time_exp_t ts;
+ apr_time_exp_lt(&ts, ar[x]->lm);
+ apr_strftime(time_str, &rv, MAX_STRING_LEN,
+ "%d-%b-%Y %H:%M ", &ts);
+ ap_rputs(time_str, r);
+ }
+ else {
+ /*Length="22-Feb-1998 23:42 " (see 4 lines above) */
+ ap_rputs(" ", r);
+ }
+ }
+ if (!(autoindex_opts & SUPPRESS_SIZE)) {
+ char buf[5];
+ ap_rputs(apr_strfsize(ar[x]->size, buf), r);
+ ap_rputs(" ", r);
+ }
+ if (!(autoindex_opts & SUPPRESS_DESC)) {
+ if (ar[x]->desc) {
+ ap_rputs(terminate_description(d, ar[x]->desc,
+ autoindex_opts,
+ desc_width), r);
+ }
+ }
+ ap_rputc('\n', r);
+ }
+ else {
+ ap_rvputs(r, "<li><a href=\"", anchor, "\"> ", t2,
+ "</a></li>\n", NULL);
+ }
+ }
+ if (autoindex_opts & TABLE_INDEXING) {
+ ap_rvputs(r, breakrow, "</table>\n", NULL);
+ }
+ else if (autoindex_opts & FANCY_INDEXING) {
+ if (!(autoindex_opts & SUPPRESS_RULES)) {
+ ap_rputs("<hr", r);
+ if (autoindex_opts & EMIT_XHTML) {
+ ap_rputs(" /", r);
+ }
+ ap_rputs("></pre>\n", r);
+ }
+ else {
+ ap_rputs("</pre>\n", r);
+ }
+ }
+ else {
+ ap_rputs("</ul>\n", r);
+ }
+}
+
+/*
+ * Compare two file entries according to the sort criteria. The return
+ * is essentially a signum function value.
+ */
+
+static int dsortf(struct ent **e1, struct ent **e2)
+{
+ struct ent *c1;
+ struct ent *c2;
+ int result = 0;
+
+ /*
+ * First, see if either of the entries is for the parent directory.
+ * If so, that *always* sorts lower than anything else.
+ */
+ if ((*e1)->name[0] == '/') {
+ return -1;
+ }
+ if ((*e2)->name[0] == '/') {
+ return 1;
+ }
+ /*
+ * Now see if one's a directory and one isn't, if we're set
+ * isdir for FOLDERS_FIRST.
+ */
+ if ((*e1)->isdir != (*e2)->isdir) {
+ return (*e1)->isdir ? -1 : 1;
+ }
+ /*
+ * All of our comparisons will be of the c1 entry against the c2 one,
+ * so assign them appropriately to take care of the ordering.
+ */
+ if ((*e1)->ascending) {
+ c1 = *e1;
+ c2 = *e2;
+ }
+ else {
+ c1 = *e2;
+ c2 = *e1;
+ }
+
+ switch (c1->key) {
+ case K_LAST_MOD:
+ if (c1->lm > c2->lm) {
+ return 1;
+ }
+ else if (c1->lm < c2->lm) {
+ return -1;
+ }
+ break;
+ case K_SIZE:
+ if (c1->size > c2->size) {
+ return 1;
+ }
+ else if (c1->size < c2->size) {
+ return -1;
+ }
+ break;
+ case K_DESC:
+ if (c1->version_sort) {
+ result = apr_strnatcmp(c1->desc ? c1->desc : "",
+ c2->desc ? c2->desc : "");
+ }
+ else {
+ result = strcmp(c1->desc ? c1->desc : "",
+ c2->desc ? c2->desc : "");
+ }
+ if (result) {
+ return result;
+ }
+ break;
+ }
+
+ /* names may identical when treated case-insensitively,
+ * so always fall back on strcmp() flavors to put entries
+ * in deterministic order. This means that 'ABC' and 'abc'
+ * will always appear in the same order, rather than
+ * variably between 'ABC abc' and 'abc ABC' order.
+ */
+
+ if (c1->version_sort) {
+ if (c1->ignore_case) {
+ result = apr_strnatcasecmp (c1->name, c2->name);
+ }
+ if (!result) {
+ result = apr_strnatcmp(c1->name, c2->name);
+ }
+ }
+
+ /* The names may be identical in respects other other than
+ * filename case when strnatcmp is used above, so fall back
+ * to strcmp on conflicts so that fn1.01.zzz and fn1.1.zzz
+ * are also sorted in a deterministic order.
+ */
+
+ if (!result && c1->ignore_case) {
+ result = strcasecmp (c1->name, c2->name);
+ }
+ if (!result) {
+ result = strcmp (c1->name, c2->name);
+ }
+ return result;
+}
+
+
+static int index_directory(request_rec *r,
+ autoindex_config_rec *autoindex_conf)
+{
+ char *title_name = ap_escape_html(r->pool, r->uri);
+ char *title_endp;
+ char *name = r->filename;
+ char *pstring = NULL;
+ apr_finfo_t dirent;
+ apr_dir_t *thedir;
+ apr_status_t status;
+ int num_ent = 0, x;
+ struct ent *head, *p;
+ struct ent **ar = NULL;
+ const char *qstring;
+ apr_int32_t autoindex_opts = autoindex_conf->opts;
+ char keyid;
+ char direction;
+ char *colargs;
+ char *fullpath;
+ apr_size_t dirpathlen;
+ char *ctype = "text/html";
+ char *charset;
+
+ if ((status = apr_dir_open(&thedir, name, r->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r,
+ "Can't open directory for index: %s", r->filename);
+ return HTTP_FORBIDDEN;
+ }
+
+ if (autoindex_conf->ctype) {
+ ctype = autoindex_conf->ctype;
+ }
+ if (autoindex_conf->charset) {
+ charset = autoindex_conf->charset;
+ }
+ else {
+#if APR_HAS_UNICODE_FS
+ charset = "UTF-8";
+#else
+ charset = "ISO-8859-1";
+#endif
+ }
+ if (*charset) {
+ ap_set_content_type(r, apr_pstrcat(r->pool, ctype, ";charset=",
+ charset, NULL));
+ }
+ else {
+ ap_set_content_type(r, ctype);
+ }
+
+ if (autoindex_opts & TRACK_MODIFIED) {
+ ap_update_mtime(r, r->finfo.mtime);
+ ap_set_last_modified(r);
+ ap_set_etag(r);
+ }
+ if (r->header_only) {
+ apr_dir_close(thedir);
+ return 0;
+ }
+
+ /*
+ * If there is no specific ordering defined for this directory,
+ * default to ascending by filename.
+ */
+ keyid = autoindex_conf->default_keyid
+ ? autoindex_conf->default_keyid : K_NAME;
+ direction = autoindex_conf->default_direction
+ ? autoindex_conf->default_direction : D_ASCENDING;
+
+ /*
+ * Figure out what sort of indexing (if any) we're supposed to use.
+ *
+ * If no QUERY_STRING was specified or client query strings have been
+ * explicitly disabled.
+ * If we are ignoring the client, suppress column sorting as well.
+ */
+ if (autoindex_opts & IGNORE_CLIENT) {
+ qstring = NULL;
+ autoindex_opts |= SUPPRESS_COLSORT;
+ colargs = "";
+ }
+ else {
+ char fval[5], vval[5], *ppre = "", *epattern = "";
+ fval[0] = '\0'; vval[0] = '\0';
+ qstring = r->args;
+
+ while (qstring && *qstring) {
+ if (qstring[0] == 'C' && qstring[1] == '='
+ && qstring[2] && strchr(K_VALID, qstring[2])
+ && (qstring[3] == '&' || qstring[3] == ';'
+ || !qstring[3])) {
+ keyid = qstring[2];
+ qstring += qstring[3] ? 4 : 3;
+ }
+ else if (qstring[0] == 'O' && qstring[1] == '='
+ && ((qstring[2] == D_ASCENDING)
+ || (qstring[2] == D_DESCENDING))
+ && (qstring[3] == '&' || qstring[3] == ';'
+ || !qstring[3])) {
+ direction = qstring[2];
+ qstring += qstring[3] ? 4 : 3;
+ }
+ else if (qstring[0] == 'F' && qstring[1] == '='
+ && qstring[2] && strchr("012", qstring[2])
+ && (qstring[3] == '&' || qstring[3] == ';'
+ || !qstring[3])) {
+ if (qstring[2] == '0') {
+ autoindex_opts &= ~(FANCY_INDEXING | TABLE_INDEXING);
+ }
+ else if (qstring[2] == '1') {
+ autoindex_opts = (autoindex_opts | FANCY_INDEXING)
+ & ~TABLE_INDEXING;
+ }
+ else if (qstring[2] == '2') {
+ autoindex_opts |= FANCY_INDEXING | TABLE_INDEXING;
+ }
+ strcpy(fval, ";F= ");
+ fval[3] = qstring[2];
+ qstring += qstring[3] ? 4 : 3;
+ }
+ else if (qstring[0] == 'V' && qstring[1] == '='
+ && (qstring[2] == '0' || qstring[2] == '1')
+ && (qstring[3] == '&' || qstring[3] == ';'
+ || !qstring[3])) {
+ if (qstring[2] == '0') {
+ autoindex_opts &= ~VERSION_SORT;
+ }
+ else if (qstring[2] == '1') {
+ autoindex_opts |= VERSION_SORT;
+ }
+ strcpy(vval, ";V= ");
+ vval[3] = qstring[2];
+ qstring += qstring[3] ? 4 : 3;
+ }
+ else if (qstring[0] == 'P' && qstring[1] == '=') {
+ const char *eos = qstring += 2; /* for efficiency */
+
+ while (*eos && *eos != '&' && *eos != ';') {
+ ++eos;
+ }
+
+ if (eos == qstring) {
+ pstring = NULL;
+ }
+ else {
+ pstring = apr_pstrndup(r->pool, qstring, eos - qstring);
+ if (ap_unescape_url(pstring) != OK) {
+ /* ignore the pattern, if it's bad. */
+ pstring = NULL;
+ }
+ else {
+ ppre = ";P=";
+ /* be correct */
+ epattern = ap_escape_uri(r->pool, pstring);
+ }
+ }
+
+ if (*eos && *++eos) {
+ qstring = eos;
+ }
+ else {
+ qstring = NULL;
+ }
+ }
+ else { /* Syntax error? Ignore the remainder! */
+ qstring = NULL;
+ }
+ }
+ colargs = apr_pstrcat(r->pool, fval, vval, ppre, epattern, NULL);
+ }
+
+ /* Spew HTML preamble */
+ title_endp = title_name + strlen(title_name) - 1;
+
+ while (title_endp > title_name && *title_endp == '/') {
+ *title_endp-- = '\0';
+ }
+
+ emit_head(r, find_header(autoindex_conf, r),
+ autoindex_opts & SUPPRESS_PREAMBLE,
+ autoindex_opts & EMIT_XHTML, title_name);
+
+ /*
+ * Since we don't know how many dir. entries there are, put them into a
+ * linked list and then arrayificate them so qsort can use them.
+ */
+ head = NULL;
+ p = make_parent_entry(autoindex_opts, autoindex_conf, r, keyid, direction);
+ if (p != NULL) {
+ p->next = head;
+ head = p;
+ num_ent++;
+ }
+ fullpath = apr_palloc(r->pool, APR_PATH_MAX);
+ dirpathlen = strlen(name);
+ memcpy(fullpath, name, dirpathlen);
+
+ do {
+ status = apr_dir_read(&dirent, APR_FINFO_MIN | APR_FINFO_NAME, thedir);
+ if (APR_STATUS_IS_INCOMPLETE(status)) {
+ continue; /* ignore un-stat()able files */
+ }
+ else if (status != APR_SUCCESS) {
+ break;
+ }
+
+ /* We want to explode symlinks here. */
+ if (dirent.filetype == APR_LNK) {
+ const char *savename;
+ apr_finfo_t fi;
+ /* We *must* have FNAME. */
+ savename = dirent.name;
+ apr_cpystrn(fullpath + dirpathlen, dirent.name,
+ APR_PATH_MAX - dirpathlen);
+ status = apr_stat(&fi, fullpath,
+ dirent.valid & ~(APR_FINFO_NAME), r->pool);
+ if (status != APR_SUCCESS) {
+ /* Something bad happened, skip this file. */
+ continue;
+ }
+ memcpy(&dirent, &fi, sizeof(fi));
+ dirent.name = savename;
+ dirent.valid |= APR_FINFO_NAME;
+ }
+ p = make_autoindex_entry(&dirent, autoindex_opts, autoindex_conf, r,
+ keyid, direction, pstring);
+ if (p != NULL) {
+ p->next = head;
+ head = p;
+ num_ent++;
+ }
+ } while (1);
+
+ if (num_ent > 0) {
+ ar = (struct ent **) apr_palloc(r->pool,
+ num_ent * sizeof(struct ent *));
+ p = head;
+ x = 0;
+ while (p) {
+ ar[x++] = p;
+ p = p->next;
+ }
+
+ qsort((void *) ar, num_ent, sizeof(struct ent *),
+ (int (*)(const void *, const void *)) dsortf);
+ }
+ output_directories(ar, num_ent, autoindex_conf, r, autoindex_opts,
+ keyid, direction, colargs);
+ apr_dir_close(thedir);
+
+ emit_tail(r, find_readme(autoindex_conf, r),
+ autoindex_opts & SUPPRESS_PREAMBLE);
+
+ return 0;
+}
+
+/* The formal handler... */
+
+static int handle_autoindex(request_rec *r)
+{
+ autoindex_config_rec *d;
+ int allow_opts;
+
+ if(strcmp(r->handler,DIR_MAGIC_TYPE)) {
+ return DECLINED;
+ }
+
+ allow_opts = ap_allow_options(r);
+
+ d = (autoindex_config_rec *) ap_get_module_config(r->per_dir_config,
+ &autoindex_module);
+
+ r->allowed |= (AP_METHOD_BIT << M_GET);
+ if (r->method_number != M_GET) {
+ return DECLINED;
+ }
+
+ /* OK, nothing easy. Trot out the heavy artillery... */
+
+ if (allow_opts & OPT_INDEXES) {
+ int errstatus;
+
+ if ((errstatus = ap_discard_request_body(r)) != OK) {
+ return errstatus;
+ }
+
+ /* KLUDGE --- make the sub_req lookups happen in the right directory.
+ * Fixing this in the sub_req_lookup functions themselves is difficult,
+ * and would probably break virtual includes...
+ */
+
+ if (r->filename[strlen(r->filename) - 1] != '/') {
+ r->filename = apr_pstrcat(r->pool, r->filename, "/", NULL);
+ }
+ return index_directory(r, d);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Directory index forbidden by rule: %s", r->filename);
+ return HTTP_FORBIDDEN;
+ }
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_handler(handle_autoindex,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA autoindex_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_autoindex_config, /* dir config creater */
+ merge_autoindex_configs, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ autoindex_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.dsp b/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.dsp
new file mode 100644
index 00000000..da8a9f5c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_autoindex" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_autoindex - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_autoindex.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_autoindex.mak" CFG="mod_autoindex - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_autoindex - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_autoindex - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_autoindex - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_autoindex_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_autoindex.so" /base:@..\..\os\win32\BaseAddr.ref,mod_autoindex.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_autoindex.so" /base:@..\..\os\win32\BaseAddr.ref,mod_autoindex.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_autoindex - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_autoindex_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_autoindex.so" /base:@..\..\os\win32\BaseAddr.ref,mod_autoindex.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_autoindex.so" /base:@..\..\os\win32\BaseAddr.ref,mod_autoindex.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_autoindex - Win32 Release"
+# Name "mod_autoindex - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_autoindex.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_autoindex.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_autoindex - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_autoindex.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_autoindex.so "autoindex_module for Apache" ../../include/ap_release.h > .\mod_autoindex.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_autoindex - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_autoindex.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_autoindex.so "autoindex_module for Apache" ../../include/ap_release.h > .\mod_autoindex.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.exp b/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.exp
new file mode 100644
index 00000000..90f4057e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.exp
@@ -0,0 +1 @@
+autoindex_module
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.la b/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.la
new file mode 100644
index 00000000..139298d2
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.la
@@ -0,0 +1,35 @@
+# mod_autoindex.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_autoindex.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_autoindex.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.lo b/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.lo
new file mode 100644
index 00000000..6533eb63
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.lo
@@ -0,0 +1,12 @@
+# mod_autoindex.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_autoindex.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_autoindex.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.o b/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.o
new file mode 100644
index 00000000..08725d05
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_autoindex.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.c b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.c
new file mode 100644
index 00000000..32902c10
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.c
@@ -0,0 +1,1235 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_script: keeps all script-related ramblings together.
+ *
+ * Compliant to CGI/1.1 spec
+ *
+ * Adapted by rst from original NCSA code by Rob McCool
+ *
+ * Apache adds some new env vars; REDIRECT_URL and REDIRECT_QUERY_STRING for
+ * custom error responses, and DOCUMENT_ROOT because we found it useful.
+ * It also adds SERVER_ADMIN - useful for scripts to know who to mail when
+ * they fail.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+#include "apr_optional.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_poll.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#define CORE_PRIVATE
+
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_log.h"
+#include "util_script.h"
+#include "ap_mpm.h"
+#include "mod_core.h"
+#include "mod_cgi.h"
+
+module AP_MODULE_DECLARE_DATA cgi_module;
+
+static APR_OPTIONAL_FN_TYPE(ap_register_include_handler) *cgi_pfn_reg_with_ssi;
+static APR_OPTIONAL_FN_TYPE(ap_ssi_get_tag_and_value) *cgi_pfn_gtv;
+static APR_OPTIONAL_FN_TYPE(ap_ssi_parse_string) *cgi_pfn_ps;
+static APR_OPTIONAL_FN_TYPE(ap_cgi_build_command) *cgi_build_command;
+
+/* Read and discard the data in the brigade produced by a CGI script */
+static void discard_script_output(apr_bucket_brigade *bb);
+
+/* KLUDGE --- for back-combatibility, we don't have to check ExecCGI
+ * in ScriptAliased directories, which means we need to know if this
+ * request came through ScriptAlias or not... so the Alias module
+ * leaves a note for us.
+ */
+
+static int is_scriptaliased(request_rec *r)
+{
+ const char *t = apr_table_get(r->notes, "alias-forced-type");
+ return t && (!strcasecmp(t, "cgi-script"));
+}
+
+/* Configuration stuff */
+
+#define DEFAULT_LOGBYTES 10385760
+#define DEFAULT_BUFBYTES 1024
+
+typedef struct {
+ const char *logname;
+ long logbytes;
+ apr_size_t bufbytes;
+} cgi_server_conf;
+
+static void *create_cgi_config(apr_pool_t *p, server_rec *s)
+{
+ cgi_server_conf *c =
+ (cgi_server_conf *) apr_pcalloc(p, sizeof(cgi_server_conf));
+
+ c->logname = NULL;
+ c->logbytes = DEFAULT_LOGBYTES;
+ c->bufbytes = DEFAULT_BUFBYTES;
+
+ return c;
+}
+
+static void *merge_cgi_config(apr_pool_t *p, void *basev, void *overridesv)
+{
+ cgi_server_conf *base = (cgi_server_conf *) basev,
+ *overrides = (cgi_server_conf *) overridesv;
+
+ return overrides->logname ? overrides : base;
+}
+
+static const char *set_scriptlog(cmd_parms *cmd, void *dummy, const char *arg)
+{
+ server_rec *s = cmd->server;
+ cgi_server_conf *conf = ap_get_module_config(s->module_config,
+ &cgi_module);
+
+ conf->logname = ap_server_root_relative(cmd->pool, arg);
+
+ if (!conf->logname) {
+ return apr_pstrcat(cmd->pool, "Invalid ScriptLog path ",
+ arg, NULL);
+ }
+
+ return NULL;
+}
+
+static const char *set_scriptlog_length(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ server_rec *s = cmd->server;
+ cgi_server_conf *conf = ap_get_module_config(s->module_config,
+ &cgi_module);
+
+ conf->logbytes = atol(arg);
+ return NULL;
+}
+
+static const char *set_scriptlog_buffer(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ server_rec *s = cmd->server;
+ cgi_server_conf *conf = ap_get_module_config(s->module_config,
+ &cgi_module);
+
+ conf->bufbytes = atoi(arg);
+ return NULL;
+}
+
+static const command_rec cgi_cmds[] =
+{
+AP_INIT_TAKE1("ScriptLog", set_scriptlog, NULL, RSRC_CONF,
+ "the name of a log for script debugging info"),
+AP_INIT_TAKE1("ScriptLogLength", set_scriptlog_length, NULL, RSRC_CONF,
+ "the maximum length (in bytes) of the script debug log"),
+AP_INIT_TAKE1("ScriptLogBuffer", set_scriptlog_buffer, NULL, RSRC_CONF,
+ "the maximum size (in bytes) to record of a POST request"),
+ {NULL}
+};
+
+static int log_scripterror(request_rec *r, cgi_server_conf * conf, int ret,
+ apr_status_t rv, char *error)
+{
+ apr_file_t *f = NULL;
+ apr_finfo_t finfo;
+ char time_str[APR_CTIME_LEN];
+ int log_flags = rv ? APLOG_ERR : APLOG_ERR;
+
+ ap_log_rerror(APLOG_MARK, log_flags, rv, r,
+ "%s: %s", error, r->filename);
+
+ /* XXX Very expensive mainline case! Open, then getfileinfo! */
+ if (!conf->logname ||
+ ((apr_stat(&finfo, conf->logname,
+ APR_FINFO_SIZE, r->pool) == APR_SUCCESS) &&
+ (finfo.size > conf->logbytes)) ||
+ (apr_file_open(&f, conf->logname,
+ APR_APPEND|APR_WRITE|APR_CREATE, APR_OS_DEFAULT,
+ r->pool) != APR_SUCCESS)) {
+ return ret;
+ }
+
+ /* "%% [Wed Jun 19 10:53:21 1996] GET /cgi-bin/printenv HTTP/1.0" */
+ apr_ctime(time_str, apr_time_now());
+ apr_file_printf(f, "%%%% [%s] %s %s%s%s %s\n", time_str, r->method, r->uri,
+ r->args ? "?" : "", r->args ? r->args : "", r->protocol);
+ /* "%% 500 /usr/local/apache/cgi-bin */
+ apr_file_printf(f, "%%%% %d %s\n", ret, r->filename);
+
+ apr_file_printf(f, "%%error\n%s\n", error);
+
+ apr_file_close(f);
+ return ret;
+}
+
+/* Soak up stderr from a script and redirect it to the error log.
+ */
+static apr_status_t log_script_err(request_rec *r, apr_file_t *script_err)
+{
+ char argsbuffer[HUGE_STRING_LEN];
+ char *newline;
+ apr_status_t rv;
+
+ while ((rv = apr_file_gets(argsbuffer, HUGE_STRING_LEN,
+ script_err)) == APR_SUCCESS) {
+ newline = strchr(argsbuffer, '\n');
+ if (newline) {
+ *newline = '\0';
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "%s", argsbuffer);
+ }
+
+ return rv;
+}
+
+static int log_script(request_rec *r, cgi_server_conf * conf, int ret,
+ char *dbuf, const char *sbuf, apr_bucket_brigade *bb,
+ apr_file_t *script_err)
+{
+ const apr_array_header_t *hdrs_arr = apr_table_elts(r->headers_in);
+ const apr_table_entry_t *hdrs = (const apr_table_entry_t *) hdrs_arr->elts;
+ char argsbuffer[HUGE_STRING_LEN];
+ apr_file_t *f = NULL;
+ apr_bucket *e;
+ const char *buf;
+ apr_size_t len;
+ apr_status_t rv;
+ int first;
+ int i;
+ apr_finfo_t finfo;
+ char time_str[APR_CTIME_LEN];
+
+ /* XXX Very expensive mainline case! Open, then getfileinfo! */
+ if (!conf->logname ||
+ ((apr_stat(&finfo, conf->logname,
+ APR_FINFO_SIZE, r->pool) == APR_SUCCESS) &&
+ (finfo.size > conf->logbytes)) ||
+ (apr_file_open(&f, conf->logname,
+ APR_APPEND|APR_WRITE|APR_CREATE, APR_OS_DEFAULT,
+ r->pool) != APR_SUCCESS)) {
+ /* Soak up script output */
+ discard_script_output(bb);
+ log_script_err(r, script_err);
+ return ret;
+ }
+
+ /* "%% [Wed Jun 19 10:53:21 1996] GET /cgi-bin/printenv HTTP/1.0" */
+ apr_ctime(time_str, apr_time_now());
+ apr_file_printf(f, "%%%% [%s] %s %s%s%s %s\n", time_str, r->method, r->uri,
+ r->args ? "?" : "", r->args ? r->args : "", r->protocol);
+ /* "%% 500 /usr/local/apache/cgi-bin" */
+ apr_file_printf(f, "%%%% %d %s\n", ret, r->filename);
+
+ apr_file_puts("%request\n", f);
+ for (i = 0; i < hdrs_arr->nelts; ++i) {
+ if (!hdrs[i].key)
+ continue;
+ apr_file_printf(f, "%s: %s\n", hdrs[i].key, hdrs[i].val);
+ }
+ if ((r->method_number == M_POST || r->method_number == M_PUT) &&
+ *dbuf) {
+ apr_file_printf(f, "\n%s\n", dbuf);
+ }
+
+ apr_file_puts("%response\n", f);
+ hdrs_arr = apr_table_elts(r->err_headers_out);
+ hdrs = (const apr_table_entry_t *) hdrs_arr->elts;
+
+ for (i = 0; i < hdrs_arr->nelts; ++i) {
+ if (!hdrs[i].key)
+ continue;
+ apr_file_printf(f, "%s: %s\n", hdrs[i].key, hdrs[i].val);
+ }
+
+ if (sbuf && *sbuf)
+ apr_file_printf(f, "%s\n", sbuf);
+
+ first = 1;
+ APR_BRIGADE_FOREACH(e, bb) {
+ if (APR_BUCKET_IS_EOS(e)) {
+ break;
+ }
+ rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS || (len == 0)) {
+ break;
+ }
+ if (first) {
+ apr_file_puts("%stdout\n", f);
+ first = 0;
+ }
+ apr_file_write(f, buf, &len);
+ apr_file_puts("\n", f);
+ }
+
+ if (apr_file_gets(argsbuffer, HUGE_STRING_LEN, script_err) == APR_SUCCESS) {
+ apr_file_puts("%stderr\n", f);
+ apr_file_puts(argsbuffer, f);
+ while (apr_file_gets(argsbuffer, HUGE_STRING_LEN,
+ script_err) == APR_SUCCESS) {
+ apr_file_puts(argsbuffer, f);
+ }
+ apr_file_puts("\n", f);
+ }
+
+ apr_brigade_destroy(bb);
+ apr_file_close(script_err);
+
+ apr_file_close(f);
+ return ret;
+}
+
+
+/* This is the special environment used for running the "exec cmd="
+ * variety of SSI directives.
+ */
+static void add_ssi_vars(request_rec *r)
+{
+ apr_table_t *e = r->subprocess_env;
+
+ if (r->path_info && r->path_info[0] != '\0') {
+ request_rec *pa_req;
+
+ apr_table_setn(e, "PATH_INFO", ap_escape_shell_cmd(r->pool,
+ r->path_info));
+
+ pa_req = ap_sub_req_lookup_uri(ap_escape_uri(r->pool, r->path_info),
+ r, NULL);
+ if (pa_req->filename) {
+ apr_table_setn(e, "PATH_TRANSLATED",
+ apr_pstrcat(r->pool, pa_req->filename,
+ pa_req->path_info, NULL));
+ }
+ ap_destroy_sub_req(pa_req);
+ }
+
+ if (r->args) {
+ char *arg_copy = apr_pstrdup(r->pool, r->args);
+
+ apr_table_setn(e, "QUERY_STRING", r->args);
+ ap_unescape_url(arg_copy);
+ apr_table_setn(e, "QUERY_STRING_UNESCAPED",
+ ap_escape_shell_cmd(r->pool, arg_copy));
+ }
+}
+
+static void cgi_child_errfn(apr_pool_t *pool, apr_status_t err,
+ const char *description)
+{
+ apr_file_t *stderr_log;
+ char errbuf[200];
+
+ apr_file_open_stderr(&stderr_log, pool);
+ /* Escape the logged string because it may be something that
+ * came in over the network.
+ */
+ apr_file_printf(stderr_log,
+ "(%d)%s: %s\n",
+ err,
+ apr_strerror(err, errbuf, sizeof(errbuf)),
+#ifdef AP_UNSAFE_ERROR_LOG_UNESCAPED
+ description
+#else
+ ap_escape_logitem(pool, description)
+#endif
+ );
+}
+
+static apr_status_t run_cgi_child(apr_file_t **script_out,
+ apr_file_t **script_in,
+ apr_file_t **script_err,
+ const char *command,
+ const char * const argv[],
+ request_rec *r,
+ apr_pool_t *p,
+ cgi_exec_info_t *e_info)
+{
+ const char * const *env;
+ apr_procattr_t *procattr;
+ apr_proc_t *procnew;
+ apr_status_t rc = APR_SUCCESS;
+
+#if defined(RLIMIT_CPU) || defined(RLIMIT_NPROC) || \
+ defined(RLIMIT_DATA) || defined(RLIMIT_VMEM) || defined (RLIMIT_AS)
+
+ core_dir_config *conf = ap_get_module_config(r->per_dir_config,
+ &core_module);
+#endif
+
+#ifdef DEBUG_CGI
+#ifdef OS2
+ /* Under OS/2 need to use device con. */
+ FILE *dbg = fopen("con", "w");
+#else
+ FILE *dbg = fopen("/dev/tty", "w");
+#endif
+ int i;
+#endif
+
+ RAISE_SIGSTOP(CGI_CHILD);
+#ifdef DEBUG_CGI
+ fprintf(dbg, "Attempting to exec %s as CGI child (argv0 = %s)\n",
+ r->filename, argv[0]);
+#endif
+
+ env = (const char * const *)ap_create_environment(p, r->subprocess_env);
+
+#ifdef DEBUG_CGI
+ fprintf(dbg, "Environment: \n");
+ for (i = 0; env[i]; ++i)
+ fprintf(dbg, "'%s'\n", env[i]);
+#endif
+
+ /* Transmute ourselves into the script.
+ * NB only ISINDEX scripts get decoded arguments.
+ */
+ if (((rc = apr_procattr_create(&procattr, p)) != APR_SUCCESS) ||
+ ((rc = apr_procattr_io_set(procattr,
+ e_info->in_pipe,
+ e_info->out_pipe,
+ e_info->err_pipe)) != APR_SUCCESS) ||
+ ((rc = apr_procattr_dir_set(procattr,
+ ap_make_dirstr_parent(r->pool,
+ r->filename))) != APR_SUCCESS) ||
+#ifdef RLIMIT_CPU
+ ((rc = apr_procattr_limit_set(procattr, APR_LIMIT_CPU,
+ conf->limit_cpu)) != APR_SUCCESS) ||
+#endif
+#if defined(RLIMIT_DATA) || defined(RLIMIT_VMEM) || defined(RLIMIT_AS)
+ ((rc = apr_procattr_limit_set(procattr, APR_LIMIT_MEM,
+ conf->limit_mem)) != APR_SUCCESS) ||
+#endif
+#ifdef RLIMIT_NPROC
+ ((rc = apr_procattr_limit_set(procattr, APR_LIMIT_NPROC,
+ conf->limit_nproc)) != APR_SUCCESS) ||
+#endif
+ ((rc = apr_procattr_cmdtype_set(procattr,
+ e_info->cmd_type)) != APR_SUCCESS) ||
+
+ ((rc = apr_procattr_detach_set(procattr,
+ e_info->detached & AP_PROC_DETACHED)) != APR_SUCCESS) ||
+ ((rc = apr_procattr_addrspace_set(procattr,
+ (e_info->detached & AP_PROC_NEWADDRSPACE) ? 1 : 0)) != APR_SUCCESS) ||
+ ((rc = apr_procattr_child_errfn_set(procattr, cgi_child_errfn)) != APR_SUCCESS)) {
+ /* Something bad happened, tell the world. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r,
+ "couldn't set child process attributes: %s", r->filename);
+ }
+ else {
+ procnew = apr_pcalloc(p, sizeof(*procnew));
+ if (e_info->prog_type == RUN_AS_SSI) {
+ SPLIT_AND_PASS_PRETAG_BUCKETS(*(e_info->bb), e_info->ctx,
+ e_info->next, rc);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+ }
+
+ rc = ap_os_create_privileged_process(r, procnew, command, argv, env,
+ procattr, p);
+
+ if (rc != APR_SUCCESS) {
+ /* Bad things happened. Everyone should have cleaned up. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR|APLOG_TOCLIENT, rc, r,
+ "couldn't create child process: %d: %s", rc,
+ apr_filename_of_pathname(r->filename));
+ }
+ else {
+ apr_pool_note_subprocess(p, procnew, APR_KILL_AFTER_TIMEOUT);
+
+ *script_in = procnew->out;
+ if (!*script_in)
+ return APR_EBADF;
+ apr_file_pipe_timeout_set(*script_in, r->server->timeout);
+
+ if (e_info->prog_type == RUN_AS_CGI) {
+ *script_out = procnew->in;
+ if (!*script_out)
+ return APR_EBADF;
+ apr_file_pipe_timeout_set(*script_out, r->server->timeout);
+
+ *script_err = procnew->err;
+ if (!*script_err)
+ return APR_EBADF;
+ apr_file_pipe_timeout_set(*script_err, r->server->timeout);
+ }
+ }
+ }
+#ifdef DEBUG_CGI
+ fclose(dbg);
+#endif
+ return (rc);
+}
+
+
+static apr_status_t default_build_command(const char **cmd, const char ***argv,
+ request_rec *r, apr_pool_t *p,
+ cgi_exec_info_t *e_info)
+{
+ int numwords, x, idx;
+ char *w;
+ const char *args = NULL;
+
+ if (e_info->process_cgi) {
+ *cmd = r->filename;
+ /* Do not process r->args if they contain an '=' assignment
+ */
+ if (r->args && r->args[0] && !ap_strchr_c(r->args, '=')) {
+ args = r->args;
+ }
+ }
+
+ if (!args) {
+ numwords = 1;
+ }
+ else {
+ /* count the number of keywords */
+ for (x = 0, numwords = 2; args[x]; x++) {
+ if (args[x] == '+') {
+ ++numwords;
+ }
+ }
+ }
+ /* Everything is - 1 to account for the first parameter
+ * which is the program name.
+ */
+ if (numwords > APACHE_ARG_MAX - 1) {
+ numwords = APACHE_ARG_MAX - 1; /* Truncate args to prevent overrun */
+ }
+ *argv = apr_palloc(p, (numwords + 2) * sizeof(char *));
+ (*argv)[0] = *cmd;
+ for (x = 1, idx = 1; x < numwords; x++) {
+ w = ap_getword_nulls(p, &args, '+');
+ ap_unescape_url(w);
+ (*argv)[idx++] = ap_escape_shell_cmd(p, w);
+ }
+ (*argv)[idx] = NULL;
+
+ return APR_SUCCESS;
+}
+
+static void discard_script_output(apr_bucket_brigade *bb)
+{
+ apr_bucket *e;
+ const char *buf;
+ apr_size_t len;
+ apr_status_t rv;
+ APR_BRIGADE_FOREACH(e, bb) {
+ if (APR_BUCKET_IS_EOS(e)) {
+ break;
+ }
+ rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ break;
+ }
+ }
+}
+
+#if APR_FILES_AS_SOCKETS
+
+/* A CGI bucket type is needed to catch any output to stderr from the
+ * script; see PR 22030. */
+static const apr_bucket_type_t bucket_type_cgi;
+
+struct cgi_bucket_data {
+ apr_pollset_t *pollset;
+ request_rec *r;
+};
+
+/* Create a CGI bucket using pipes from script stdout 'out'
+ * and stderr 'err', for request 'r'. */
+static apr_bucket *cgi_bucket_create(request_rec *r,
+ apr_file_t *out, apr_file_t *err,
+ apr_bucket_alloc_t *list)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+ apr_status_t rv;
+ apr_pollfd_t fd;
+ struct cgi_bucket_data *data = apr_palloc(r->pool, sizeof *data);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ b->type = &bucket_type_cgi;
+ b->length = (apr_size_t)(-1);
+ b->start = -1;
+
+ /* Create the pollset */
+ rv = apr_pollset_create(&data->pollset, 2, r->pool, 0);
+ AP_DEBUG_ASSERT(rv == APR_SUCCESS);
+
+ fd.desc_type = APR_POLL_FILE;
+ fd.reqevents = APR_POLLIN;
+ fd.p = r->pool;
+ fd.desc.f = out; /* script's stdout */
+ fd.client_data = (void *)1;
+ rv = apr_pollset_add(data->pollset, &fd);
+ AP_DEBUG_ASSERT(rv == APR_SUCCESS);
+
+ fd.desc.f = err; /* script's stderr */
+ fd.client_data = (void *)2;
+ rv = apr_pollset_add(data->pollset, &fd);
+ AP_DEBUG_ASSERT(rv == APR_SUCCESS);
+
+ data->r = r;
+ b->data = data;
+ return b;
+}
+
+/* Create a duplicate CGI bucket using given bucket data */
+static apr_bucket *cgi_bucket_dup(struct cgi_bucket_data *data,
+ apr_bucket_alloc_t *list)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ b->type = &bucket_type_cgi;
+ b->length = (apr_size_t)(-1);
+ b->start = -1;
+ b->data = data;
+ return b;
+}
+
+/* Handle stdout from CGI child. Duplicate of logic from the _read
+ * method of the real APR pipe bucket implementation. */
+static apr_status_t cgi_read_stdout(apr_bucket *a, apr_file_t *out,
+ const char **str, apr_size_t *len)
+{
+ char *buf;
+ apr_status_t rv;
+
+ *str = NULL;
+ *len = APR_BUCKET_BUFF_SIZE;
+ buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */
+
+ rv = apr_file_read(out, buf, len);
+
+ if (rv != APR_SUCCESS && rv != APR_EOF) {
+ apr_bucket_free(buf);
+ return rv;
+ }
+
+ if (*len > 0) {
+ struct cgi_bucket_data *data = a->data;
+ apr_bucket_heap *h;
+
+ /* Change the current bucket to refer to what we read */
+ a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free);
+ h = a->data;
+ h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */
+ *str = buf;
+ APR_BUCKET_INSERT_AFTER(a, cgi_bucket_dup(data, a->list));
+ }
+ else {
+ apr_bucket_free(buf);
+ a = apr_bucket_immortal_make(a, "", 0);
+ *str = a->data;
+ }
+ return rv;
+}
+
+/* Read method of CGI bucket: polls on stderr and stdout of the child,
+ * sending any stderr output immediately away to the error log. */
+static apr_status_t cgi_bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ struct cgi_bucket_data *data = b->data;
+ apr_interval_time_t timeout;
+ apr_status_t rv;
+ int gotdata = 0;
+
+ timeout = block == APR_NONBLOCK_READ ? 0 : data->r->server->timeout;
+
+ do {
+ const apr_pollfd_t *results;
+ apr_int32_t num;
+
+ rv = apr_pollset_poll(data->pollset, timeout, &num, &results);
+ if (APR_STATUS_IS_TIMEUP(rv)) {
+ return timeout == 0 ? APR_EAGAIN : rv;
+ }
+ else if (APR_STATUS_IS_EINTR(rv)) {
+ continue;
+ }
+ else if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, data->r,
+ "poll failed waiting for CGI child");
+ return rv;
+ }
+
+ for (; num; num--, results++) {
+ if (results[0].client_data == (void *)1) {
+ /* stdout */
+ rv = cgi_read_stdout(b, results[0].desc.f, str, len);
+ if (APR_STATUS_IS_EOF(rv)) {
+ rv = APR_SUCCESS;
+ }
+ gotdata = 1;
+ } else {
+ /* stderr */
+ apr_status_t rv2 = log_script_err(data->r, results[0].desc.f);
+ if (APR_STATUS_IS_EOF(rv2)) {
+ apr_pollset_remove(data->pollset, &results[0]);
+ }
+ }
+ }
+
+ } while (!gotdata);
+
+ return rv;
+}
+
+static const apr_bucket_type_t bucket_type_cgi = {
+ "CGI", 5, APR_BUCKET_DATA,
+ apr_bucket_destroy_noop,
+ cgi_bucket_read,
+ apr_bucket_setaside_notimpl,
+ apr_bucket_split_notimpl,
+ apr_bucket_copy_notimpl
+};
+
+#endif
+
+static int cgi_handler(request_rec *r)
+{
+ int nph;
+ apr_size_t dbpos = 0;
+ const char *argv0;
+ const char *command;
+ const char **argv;
+ char *dbuf = NULL;
+ apr_file_t *script_out = NULL, *script_in = NULL, *script_err = NULL;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ int is_included;
+ int seen_eos, child_stopped_reading;
+ apr_pool_t *p;
+ cgi_server_conf *conf;
+ apr_status_t rv;
+ cgi_exec_info_t e_info;
+ conn_rec *c = r->connection;
+
+ if(strcmp(r->handler, CGI_MAGIC_TYPE) && strcmp(r->handler, "cgi-script"))
+ return DECLINED;
+
+ is_included = !strcmp(r->protocol, "INCLUDED");
+
+ p = r->main ? r->main->pool : r->pool;
+
+ argv0 = apr_filename_of_pathname(r->filename);
+ nph = !(strncmp(argv0, "nph-", 4));
+ conf = ap_get_module_config(r->server->module_config, &cgi_module);
+
+ if (!(ap_allow_options(r) & OPT_EXECCGI) && !is_scriptaliased(r))
+ return log_scripterror(r, conf, HTTP_FORBIDDEN, 0,
+ "Options ExecCGI is off in this directory");
+ if (nph && is_included)
+ return log_scripterror(r, conf, HTTP_FORBIDDEN, 0,
+ "attempt to include NPH CGI script");
+
+ if (r->finfo.filetype == 0)
+ return log_scripterror(r, conf, HTTP_NOT_FOUND, 0,
+ "script not found or unable to stat");
+ if (r->finfo.filetype == APR_DIR)
+ return log_scripterror(r, conf, HTTP_FORBIDDEN, 0,
+ "attempt to invoke directory as script");
+
+ if ((r->used_path_info == AP_REQ_REJECT_PATH_INFO) &&
+ r->path_info && *r->path_info)
+ {
+ /* default to accept */
+ return log_scripterror(r, conf, HTTP_NOT_FOUND, 0,
+ "AcceptPathInfo off disallows user's path");
+ }
+/*
+ if (!ap_suexec_enabled) {
+ if (!ap_can_exec(&r->finfo))
+ return log_scripterror(r, conf, HTTP_FORBIDDEN, 0,
+ "file permissions deny server execution");
+ }
+
+*/
+ ap_add_common_vars(r);
+ ap_add_cgi_vars(r);
+
+ e_info.process_cgi = 1;
+ e_info.cmd_type = APR_PROGRAM;
+ e_info.detached = 0;
+ e_info.in_pipe = APR_CHILD_BLOCK;
+ e_info.out_pipe = APR_CHILD_BLOCK;
+ e_info.err_pipe = APR_CHILD_BLOCK;
+ e_info.prog_type = RUN_AS_CGI;
+ e_info.bb = NULL;
+ e_info.ctx = NULL;
+ e_info.next = NULL;
+
+ /* build the command line */
+ if ((rv = cgi_build_command(&command, &argv, r, p, &e_info)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "don't know how to spawn child process: %s",
+ r->filename);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* run the script in its own process */
+ if ((rv = run_cgi_child(&script_out, &script_in, &script_err,
+ command, argv, r, p, &e_info)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "couldn't spawn child process: %s", r->filename);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* Transfer any put/post args, CERN style...
+ * Note that we already ignore SIGPIPE in the core server.
+ */
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ seen_eos = 0;
+ child_stopped_reading = 0;
+ if (conf->logname) {
+ dbuf = apr_palloc(r->pool, conf->bufbytes + 1);
+ dbpos = 0;
+ }
+ do {
+ apr_bucket *bucket;
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, HUGE_STRING_LEN);
+
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "Error reading request entity data");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ APR_BRIGADE_FOREACH(bucket, bb) {
+ const char *data;
+ apr_size_t len;
+
+ if (APR_BUCKET_IS_EOS(bucket)) {
+ seen_eos = 1;
+ break;
+ }
+
+ /* We can't do much with this. */
+ if (APR_BUCKET_IS_FLUSH(bucket)) {
+ continue;
+ }
+
+ /* If the child stopped, we still must read to EOS. */
+ if (child_stopped_reading) {
+ continue;
+ }
+
+ /* read */
+ apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
+
+ if (conf->logname && dbpos < conf->bufbytes) {
+ int cursize;
+
+ if ((dbpos + len) > conf->bufbytes) {
+ cursize = conf->bufbytes - dbpos;
+ }
+ else {
+ cursize = len;
+ }
+ memcpy(dbuf + dbpos, data, cursize);
+ dbpos += cursize;
+ }
+
+ /* Keep writing data to the child until done or too much time
+ * elapses with no progress or an error occurs.
+ */
+ rv = apr_file_write_full(script_out, data, len, NULL);
+
+ if (rv != APR_SUCCESS) {
+ /* silly script stopped reading, soak up remaining message */
+ child_stopped_reading = 1;
+ }
+ }
+ apr_brigade_cleanup(bb);
+ }
+ while (!seen_eos);
+
+ if (conf->logname) {
+ dbuf[dbpos] = '\0';
+ }
+ /* Is this flush really needed? */
+ apr_file_flush(script_out);
+ apr_file_close(script_out);
+
+ AP_DEBUG_ASSERT(script_in != NULL);
+
+ apr_brigade_cleanup(bb);
+
+#if APR_FILES_AS_SOCKETS
+ apr_file_pipe_timeout_set(script_in, 0);
+ apr_file_pipe_timeout_set(script_err, 0);
+
+ b = cgi_bucket_create(r, script_in, script_err, c->bucket_alloc);
+#else
+ b = apr_bucket_pipe_create(script_in, c->bucket_alloc);
+#endif
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ /* Handle script return... */
+ if (!nph) {
+ const char *location;
+ char sbuf[MAX_STRING_LEN];
+ int ret;
+
+ if ((ret = ap_scan_script_header_err_brigade(r, bb, sbuf))) {
+ return log_script(r, conf, ret, dbuf, sbuf, bb, script_err);
+ }
+
+ location = apr_table_get(r->headers_out, "Location");
+
+ if (location && location[0] == '/' && r->status == 200) {
+ discard_script_output(bb);
+ apr_brigade_destroy(bb);
+ apr_file_pipe_timeout_set(script_err, r->server->timeout);
+ log_script_err(r, script_err);
+ /* This redirect needs to be a GET no matter what the original
+ * method was.
+ */
+ r->method = apr_pstrdup(r->pool, "GET");
+ r->method_number = M_GET;
+
+ /* We already read the message body (if any), so don't allow
+ * the redirected request to think it has one. We can ignore
+ * Transfer-Encoding, since we used REQUEST_CHUNKED_ERROR.
+ */
+ apr_table_unset(r->headers_in, "Content-Length");
+
+ ap_internal_redirect_handler(location, r);
+ return OK;
+ }
+ else if (location && r->status == 200) {
+ /* XX Note that if a script wants to produce its own Redirect
+ * body, it now has to explicitly *say* "Status: 302"
+ */
+ discard_script_output(bb);
+ apr_brigade_destroy(bb);
+ return HTTP_MOVED_TEMPORARILY;
+ }
+
+ rv = ap_pass_brigade(r->output_filters, bb);
+ }
+ else /* nph */ {
+ struct ap_filter_t *cur;
+
+ /* get rid of all filters up through protocol... since we
+ * haven't parsed off the headers, there is no way they can
+ * work
+ */
+
+ cur = r->proto_output_filters;
+ while (cur && cur->frec->ftype < AP_FTYPE_CONNECTION) {
+ cur = cur->next;
+ }
+ r->output_filters = r->proto_output_filters = cur;
+
+ rv = ap_pass_brigade(r->output_filters, bb);
+ }
+
+ /* don't soak up script output if errors occurred writing it
+ * out... otherwise, we prolong the life of the script when the
+ * connection drops or we stopped sending output for some other
+ * reason */
+ if (rv == APR_SUCCESS && !r->connection->aborted) {
+ apr_file_pipe_timeout_set(script_err, r->server->timeout);
+ log_script_err(r, script_err);
+ }
+
+ apr_file_close(script_err);
+
+ return OK; /* NOT r->status, even if it has changed. */
+}
+
+/*============================================================================
+ *============================================================================
+ * This is the beginning of the cgi filter code moved from mod_include. This
+ * is the code required to handle the "exec" SSI directive.
+ *============================================================================
+ *============================================================================*/
+static int include_cgi(char *s, request_rec *r, ap_filter_t *next,
+ apr_bucket *head_ptr, apr_bucket **inserted_head)
+{
+ request_rec *rr = ap_sub_req_lookup_uri(s, r, next);
+ int rr_status;
+ apr_bucket *tmp_buck, *tmp2_buck;
+
+ if (rr->status != HTTP_OK) {
+ ap_destroy_sub_req(rr);
+ return -1;
+ }
+
+ /* No hardwired path info or query allowed */
+
+ if ((rr->path_info && rr->path_info[0]) || rr->args) {
+ ap_destroy_sub_req(rr);
+ return -1;
+ }
+ if (rr->finfo.filetype != APR_REG) {
+ ap_destroy_sub_req(rr);
+ return -1;
+ }
+
+ /* Script gets parameters of the *document*, for back compatibility */
+
+ rr->path_info = r->path_info; /* hard to get right; see mod_cgi.c */
+ rr->args = r->args;
+
+ /* Force sub_req to be treated as a CGI request, even if ordinary
+ * typing rules would have called it something else.
+ */
+
+ ap_set_content_type(rr, CGI_MAGIC_TYPE);
+
+ /* Run it. */
+
+ rr_status = ap_run_sub_req(rr);
+ if (ap_is_HTTP_REDIRECT(rr_status)) {
+ apr_size_t len_loc;
+ const char *location = apr_table_get(rr->headers_out, "Location");
+ conn_rec *c = r->connection;
+
+ location = ap_escape_html(rr->pool, location);
+ len_loc = strlen(location);
+
+ /* XXX: if most of this stuff is going to get copied anyway,
+ * it'd be more efficient to pstrcat it into a single pool buffer
+ * and a single pool bucket */
+
+ tmp_buck = apr_bucket_immortal_create("<A HREF=\"",
+ sizeof("<A HREF=\"") - 1,
+ c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp_buck);
+ tmp2_buck = apr_bucket_heap_create(location, len_loc, NULL,
+ c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp2_buck);
+ tmp2_buck = apr_bucket_immortal_create("\">", sizeof("\">") - 1,
+ c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp2_buck);
+ tmp2_buck = apr_bucket_heap_create(location, len_loc, NULL,
+ c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp2_buck);
+ tmp2_buck = apr_bucket_immortal_create("</A>", sizeof("</A>") - 1,
+ c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp2_buck);
+
+ if (*inserted_head == NULL) {
+ *inserted_head = tmp_buck;
+ }
+ }
+
+ ap_destroy_sub_req(rr);
+
+ return 0;
+}
+
+
+static int include_cmd(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ const char *command, request_rec *r, ap_filter_t *f)
+{
+ cgi_exec_info_t e_info;
+ const char **argv;
+ apr_file_t *script_out = NULL, *script_in = NULL, *script_err = NULL;
+ apr_bucket_brigade *bcgi;
+ apr_bucket *b;
+ apr_status_t rv;
+
+ add_ssi_vars(r);
+
+ e_info.process_cgi = 0;
+ e_info.cmd_type = APR_SHELLCMD;
+ e_info.detached = 0;
+ e_info.in_pipe = APR_NO_PIPE;
+ e_info.out_pipe = APR_FULL_BLOCK;
+ e_info.err_pipe = APR_NO_PIPE;
+ e_info.prog_type = RUN_AS_SSI;
+ e_info.bb = bb;
+ e_info.ctx = ctx;
+ e_info.next = f->next;
+
+ if ((rv = cgi_build_command(&command, &argv, r, r->pool, &e_info)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "don't know how to spawn cmd child process: %s",
+ r->filename);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* run the script in its own process */
+ if ((rv = run_cgi_child(&script_out, &script_in, &script_err,
+ command, argv, r, r->pool,
+ &e_info)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "couldn't spawn child process: %s", r->filename);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ bcgi = apr_brigade_create(r->pool, f->c->bucket_alloc);
+ b = apr_bucket_pipe_create(script_in, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bcgi, b);
+ ap_pass_brigade(f->next, bcgi);
+
+ /* We can't close the pipe here, because we may return before the
+ * full CGI has been sent to the network. That's okay though,
+ * because we can rely on the pool to close the pipe for us.
+ */
+
+ return 0;
+}
+
+static int handle_exec(include_ctx_t *ctx, apr_bucket_brigade **bb,
+ request_rec *r, ap_filter_t *f, apr_bucket *head_ptr,
+ apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ char *file = r->filename;
+ apr_bucket *tmp_buck;
+ char parsed_string[MAX_STRING_LEN];
+
+ *inserted_head = NULL;
+ if (ctx->flags & FLAG_PRINTING) {
+ if (ctx->flags & FLAG_NO_EXEC) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "exec used but not allowed in %s", r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ }
+ else {
+ while (1) {
+ cgi_pfn_gtv(ctx, &tag, &tag_val, 1);
+ if (tag_val == NULL) {
+ if (tag == NULL) {
+ return 0;
+ }
+ else {
+ return 1;
+ }
+ }
+ if (!strcmp(tag, "cmd")) {
+ cgi_pfn_ps(r, ctx, tag_val, parsed_string,
+ sizeof(parsed_string), 1);
+ if (include_cmd(ctx, bb, parsed_string, r, f) == -1) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "execution failure for parameter \"%s\" "
+ "to tag exec in file %s", tag, r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr,
+ *inserted_head);
+ }
+ }
+ else if (!strcmp(tag, "cgi")) {
+ apr_status_t retval = APR_SUCCESS;
+
+ cgi_pfn_ps(r, ctx, tag_val, parsed_string,
+ sizeof(parsed_string), 0);
+
+ SPLIT_AND_PASS_PRETAG_BUCKETS(*bb, ctx, f->next, retval);
+ if (retval != APR_SUCCESS) {
+ return retval;
+ }
+
+ if (include_cgi(parsed_string, r, f->next, head_ptr,
+ inserted_head) == -1) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "invalid CGI ref \"%s\" in %s",
+ tag_val, file);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr,
+ *inserted_head);
+ }
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unknown parameter \"%s\" to tag exec in %s",
+ tag, file);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr,
+ *inserted_head);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+
+/*============================================================================
+ *============================================================================
+ * This is the end of the cgi filter code moved from mod_include.
+ *============================================================================
+ *============================================================================*/
+
+
+static int cgi_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ cgi_pfn_reg_with_ssi = APR_RETRIEVE_OPTIONAL_FN(ap_register_include_handler);
+ cgi_pfn_gtv = APR_RETRIEVE_OPTIONAL_FN(ap_ssi_get_tag_and_value);
+ cgi_pfn_ps = APR_RETRIEVE_OPTIONAL_FN(ap_ssi_parse_string);
+
+ if ((cgi_pfn_reg_with_ssi) && (cgi_pfn_gtv) && (cgi_pfn_ps)) {
+ /* Required by mod_include filter. This is how mod_cgi registers
+ * with mod_include to provide processing of the exec directive.
+ */
+ cgi_pfn_reg_with_ssi("exec", handle_exec);
+ }
+
+ /* This is the means by which unusual (non-unix) os's may find alternate
+ * means to run a given command (e.g. shebang/registry parsing on Win32)
+ */
+ cgi_build_command = APR_RETRIEVE_OPTIONAL_FN(ap_cgi_build_command);
+ if (!cgi_build_command) {
+ cgi_build_command = default_build_command;
+ }
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ static const char * const aszPre[] = { "mod_include.c", NULL };
+ ap_hook_handler(cgi_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_config(cgi_post_config, aszPre, NULL, APR_HOOK_REALLY_FIRST);
+}
+
+module AP_MODULE_DECLARE_DATA cgi_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ create_cgi_config, /* server config */
+ merge_cgi_config, /* merge server config */
+ cgi_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.dsp b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.dsp
new file mode 100644
index 00000000..02167c19
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.dsp
@@ -0,0 +1,132 @@
+# Microsoft Developer Studio Project File - Name="mod_cgi" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_cgi - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_cgi.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_cgi.mak" CFG="mod_cgi - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_cgi - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_cgi - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_cgi - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_cgi_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_cgi.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cgi.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_cgi.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cgi.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_cgi - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_cgi_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_cgi.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cgi.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_cgi.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cgi.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_cgi - Win32 Release"
+# Name "mod_cgi - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_cgi.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_cgi.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_cgi.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_cgi - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_cgi.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_cgi.so "cgi_module for Apache" ../../include/ap_release.h > .\mod_cgi.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_cgi - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_cgi.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_cgi.so "cgi_module for Apache" ../../include/ap_release.h > .\mod_cgi.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.exp b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.exp
new file mode 100644
index 00000000..96ea0c23
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.exp
@@ -0,0 +1 @@
+cgi_module
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.h b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.h
new file mode 100644
index 00000000..9a54895a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgi.h
@@ -0,0 +1,62 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _MOD_CGI_H
+#define _MOD_CGI_H 1
+
+#include "mod_include.h"
+
+#define AP_PROC_DETACHED 1
+#define AP_PROC_NEWADDRSPACE 2
+
+typedef enum {RUN_AS_SSI, RUN_AS_CGI} prog_types;
+
+typedef struct {
+ apr_int32_t in_pipe;
+ apr_int32_t out_pipe;
+ apr_int32_t err_pipe;
+ int process_cgi;
+ apr_cmdtype_e cmd_type;
+ apr_int32_t detached; /* used as a bitfield for detached_ & addrspace_set, */
+ /* when initializing apr_proc_attr structure */
+ prog_types prog_type;
+ apr_bucket_brigade **bb;
+ include_ctx_t *ctx;
+ ap_filter_t *next;
+} cgi_exec_info_t;
+
+/**
+ * Registerable optional function to override CGI behavior;
+ * Reprocess the command and arguments to execute the given CGI script.
+ * @param cmd Pointer to the command to execute (may be overridden)
+ * @param argv Pointer to the arguments to pass (may be overridden)
+ * @param r The current request
+ * @param p The pool to allocate correct cmd/argv elements within.
+ * @param process_cgi Set true if processing r->filename and r->args
+ * as a CGI invocation, otherwise false
+ * @param type Set to APR_SHELLCMD or APR_PROGRAM on entry, may be
+ * changed to invoke the program with alternate semantics.
+ * @param detach Should the child start in detached state? Default is no.
+ * @remark This callback may be registered by the os-specific module
+ * to correct the command and arguments for apr_proc_create invocation
+ * on a given os. mod_cgi will call the function if registered.
+ */
+APR_DECLARE_OPTIONAL_FN(apr_status_t, ap_cgi_build_command,
+ (const char **cmd, const char ***argv,
+ request_rec *r, apr_pool_t *p,
+ cgi_exec_info_t *e_info));
+
+#endif /* _MOD_CGI_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.c b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.c
new file mode 100644
index 00000000..55ac3b04
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.c
@@ -0,0 +1,1744 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_script: keeps all script-related ramblings together.
+ *
+ * Compliant to cgi/1.1 spec
+ *
+ * Adapted by rst from original NCSA code by Rob McCool
+ *
+ * Apache adds some new env vars; REDIRECT_URL and REDIRECT_QUERY_STRING for
+ * custom error responses, and DOCUMENT_ROOT because we found it useful.
+ * It also adds SERVER_ADMIN - useful for scripts to know who to mail when
+ * they fail.
+ */
+
+#include "apr_lib.h"
+#include "apr_strings.h"
+#include "apr_general.h"
+#include "apr_file_io.h"
+#include "apr_portable.h"
+#include "apr_buckets.h"
+#include "apr_optional.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#if APR_HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#if APR_HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#define CORE_PRIVATE
+
+#include "util_filter.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_log.h"
+#include "util_script.h"
+#include "ap_mpm.h"
+#include "unixd.h"
+#include "mod_suexec.h"
+#include "../filters/mod_include.h"
+
+#include "mod_core.h"
+
+
+/* ### should be tossed in favor of APR */
+#include <sys/stat.h>
+#include <sys/un.h> /* for sockaddr_un */
+
+
+module AP_MODULE_DECLARE_DATA cgid_module;
+
+static int cgid_start(apr_pool_t *p, server_rec *main_server, apr_proc_t *procnew);
+static int cgid_init(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *main_server);
+static int handle_exec(include_ctx_t *ctx, apr_bucket_brigade **bb, request_rec *r,
+ ap_filter_t *f, apr_bucket *head_ptr, apr_bucket **inserted_head);
+
+static APR_OPTIONAL_FN_TYPE(ap_register_include_handler) *cgid_pfn_reg_with_ssi;
+static APR_OPTIONAL_FN_TYPE(ap_ssi_get_tag_and_value) *cgid_pfn_gtv;
+static APR_OPTIONAL_FN_TYPE(ap_ssi_parse_string) *cgid_pfn_ps;
+
+static apr_pool_t *pcgi = NULL;
+static int total_modules = 0;
+static pid_t daemon_pid;
+static int daemon_should_exit = 0;
+static server_rec *root_server = NULL;
+static apr_pool_t *root_pool = NULL;
+static ap_unix_identity_t empty_ugid = { (uid_t)-1, (gid_t)-1, -1 };
+
+/* Read and discard the data in the brigade produced by a CGI script */
+static void discard_script_output(apr_bucket_brigade *bb);
+
+/* This doer will only ever be called when we are sure that we have
+ * a valid ugid.
+ */
+static ap_unix_identity_t *cgid_suexec_id_doer(const request_rec *r)
+{
+ return (ap_unix_identity_t *)
+ ap_get_module_config(r->request_config, &cgid_module);
+}
+
+/* KLUDGE --- for back-combatibility, we don't have to check ExecCGI
+ * in ScriptAliased directories, which means we need to know if this
+ * request came through ScriptAlias or not... so the Alias module
+ * leaves a note for us.
+ */
+
+static int is_scriptaliased(request_rec *r)
+{
+ const char *t = apr_table_get(r->notes, "alias-forced-type");
+ return t && (!strcasecmp(t, "cgi-script"));
+}
+
+/* Configuration stuff */
+
+#define DEFAULT_LOGBYTES 10385760
+#define DEFAULT_BUFBYTES 1024
+#define DEFAULT_SOCKET DEFAULT_REL_RUNTIMEDIR "/cgisock"
+
+#define CGI_REQ 1
+#define SSI_REQ 2
+#define GETPID_REQ 3 /* get the pid of script created for prior request */
+
+#define ERRFN_USERDATA_KEY "CGIDCHILDERRFN"
+
+/* DEFAULT_CGID_LISTENBACKLOG controls the max depth on the unix socket's
+ * pending connection queue. If a bunch of cgi requests arrive at about
+ * the same time, connections from httpd threads/processes will back up
+ * in the queue while the cgid process slowly forks off a child to process
+ * each connection on the unix socket. If the queue is too short, the
+ * httpd process will get ECONNREFUSED when trying to connect.
+ */
+#ifndef DEFAULT_CGID_LISTENBACKLOG
+#define DEFAULT_CGID_LISTENBACKLOG 100
+#endif
+
+/* DEFAULT_CONNECT_ATTEMPTS controls how many times we'll try to connect
+ * to the cgi daemon from the thread/process handling the cgi request.
+ * Generally we want to retry when we get ECONNREFUSED since it is
+ * probably because the listen queue is full. We need to try harder so
+ * the client doesn't see it as a 503 error.
+ *
+ * Set this to 0 to continually retry until the connect works or Apache
+ * terminates.
+ */
+#ifndef DEFAULT_CONNECT_ATTEMPTS
+#define DEFAULT_CONNECT_ATTEMPTS 15
+#endif
+
+typedef struct {
+ const char *sockname;
+ const char *logname;
+ long logbytes;
+ int bufbytes;
+} cgid_server_conf;
+
+typedef struct {
+ int req_type; /* request type (CGI_REQ, SSI_REQ, etc.) */
+ unsigned long conn_id; /* connection id; daemon uses this as a hash value
+ * to find the script pid when it is time for that
+ * process to be cleaned up
+ */
+ int core_module_index;
+ int env_count;
+ ap_unix_identity_t ugid;
+ apr_size_t filename_len;
+ apr_size_t argv0_len;
+ apr_size_t uri_len;
+ apr_size_t args_len;
+ int loglevel; /* to stuff in server_rec */
+} cgid_req_t;
+
+/* This routine is called to create the argument list to be passed
+ * to the CGI script. When suexec is enabled, the suexec path, user, and
+ * group are the first three arguments to be passed; if not, all three
+ * must be NULL. The query info is split into separate arguments, where
+ * "+" is the separator between keyword arguments.
+ *
+ * Do not process the args if they containing an '=' assignment.
+ */
+static char **create_argv(apr_pool_t *p, char *path, char *user, char *group,
+ char *av0, const char *args)
+{
+ int x, numwords;
+ char **av;
+ char *w;
+ int idx = 0;
+
+ if (ap_strchr_c(args, '=')) {
+ numwords = 0;
+ }
+ else {
+ /* count the number of keywords */
+
+ for (x = 0, numwords = 1; args[x]; x++) {
+ if (args[x] == '+') {
+ ++numwords;
+ }
+ }
+ }
+
+ if (numwords > APACHE_ARG_MAX - 5) {
+ numwords = APACHE_ARG_MAX - 5; /* Truncate args to prevent overrun */
+ }
+ av = (char **) apr_pcalloc(p, (numwords + 5) * sizeof(char *));
+
+ if (path) {
+ av[idx++] = path;
+ }
+ if (user) {
+ av[idx++] = user;
+ }
+ if (group) {
+ av[idx++] = group;
+ }
+
+ av[idx++] = apr_pstrdup(p, av0);
+
+ for (x = 1; x <= numwords; x++) {
+ w = ap_getword_nulls(p, &args, '+');
+ if (strcmp(w, "")) {
+ ap_unescape_url(w);
+ av[idx++] = ap_escape_shell_cmd(p, w);
+ }
+ }
+ av[idx] = NULL;
+ return av;
+}
+
+#if APR_HAS_OTHER_CHILD
+static void cgid_maint(int reason, void *data, apr_wait_t status)
+{
+ apr_proc_t *proc = data;
+ int mpm_state;
+ int stopping;
+
+ switch (reason) {
+ case APR_OC_REASON_DEATH:
+ apr_proc_other_child_unregister(data);
+ /* If apache is not terminating or restarting,
+ * restart the cgid daemon
+ */
+ stopping = 1; /* if MPM doesn't support query,
+ * assume we shouldn't restart daemon
+ */
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state) == APR_SUCCESS &&
+ mpm_state != AP_MPMQ_STOPPING) {
+ stopping = 0;
+ }
+ if (!stopping) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL,
+ "cgid daemon process died, restarting");
+ cgid_start(root_pool, root_server, proc);
+ }
+ break;
+ case APR_OC_REASON_RESTART:
+ /* don't do anything; server is stopping or restarting */
+ apr_proc_other_child_unregister(data);
+ break;
+ case APR_OC_REASON_LOST:
+ /* Restart the child cgid daemon process */
+ apr_proc_other_child_unregister(data);
+ cgid_start(root_pool, root_server, proc);
+ break;
+ case APR_OC_REASON_UNREGISTER:
+ /* we get here when pcgi is cleaned up; pcgi gets cleaned
+ * up when pconf gets cleaned up
+ */
+ kill(proc->pid, SIGHUP); /* send signal to daemon telling it to die */
+ break;
+ }
+}
+#endif
+
+/* deal with incomplete reads and signals
+ * assume you really have to read buf_size bytes
+ */
+static apr_status_t sock_read(int fd, void *vbuf, size_t buf_size)
+{
+ char *buf = vbuf;
+ int rc;
+ size_t bytes_read = 0;
+
+ do {
+ do {
+ rc = read(fd, buf + bytes_read, buf_size - bytes_read);
+ } while (rc < 0 && errno == EINTR);
+ switch(rc) {
+ case -1:
+ return errno;
+ case 0: /* unexpected */
+ return ECONNRESET;
+ default:
+ bytes_read += rc;
+ }
+ } while (bytes_read < buf_size);
+
+ return APR_SUCCESS;
+}
+
+/* deal with signals
+ */
+static apr_status_t sock_write(int fd, const void *buf, size_t buf_size)
+{
+ int rc;
+
+ do {
+ rc = write(fd, buf, buf_size);
+ } while (rc < 0 && errno == EINTR);
+ if (rc < 0) {
+ return errno;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t get_req(int fd, request_rec *r, char **argv0, char ***env,
+ cgid_req_t *req)
+{
+ int i;
+ char **environ;
+ core_request_config *temp_core;
+ void **rconf;
+ apr_status_t stat;
+
+ r->server = apr_pcalloc(r->pool, sizeof(server_rec));
+
+ /* read the request header */
+ stat = sock_read(fd, req, sizeof(*req));
+ if (stat != APR_SUCCESS) {
+ return stat;
+ }
+ r->server->loglevel = req->loglevel;
+ if (req->req_type == GETPID_REQ) {
+ /* no more data sent for this request */
+ return APR_SUCCESS;
+ }
+
+ /* handle module indexes and such */
+ rconf = (void **) apr_pcalloc(r->pool, sizeof(void *) * (total_modules + DYNAMIC_MODULE_LIMIT));
+
+ temp_core = (core_request_config *)apr_palloc(r->pool, sizeof(core_module));
+ rconf[req->core_module_index] = (void *)temp_core;
+ r->request_config = (ap_conf_vector_t *)rconf;
+ ap_set_module_config(r->request_config, &cgid_module, (void *)&req->ugid);
+
+ /* Read the filename, argv0, uri, and args */
+ r->filename = apr_pcalloc(r->pool, req->filename_len + 1);
+ *argv0 = apr_pcalloc(r->pool, req->argv0_len + 1);
+ r->uri = apr_pcalloc(r->pool, req->uri_len + 1);
+ if ((stat = sock_read(fd, r->filename, req->filename_len)) != APR_SUCCESS ||
+ (stat = sock_read(fd, *argv0, req->argv0_len)) != APR_SUCCESS ||
+ (stat = sock_read(fd, r->uri, req->uri_len)) != APR_SUCCESS) {
+ return stat;
+ }
+
+ r->args = apr_pcalloc(r->pool, req->args_len + 1); /* empty string if no args */
+ if (req->args_len) {
+ if ((stat = sock_read(fd, r->args, req->args_len)) != APR_SUCCESS) {
+ return stat;
+ }
+ }
+
+ /* read the environment variables */
+ environ = apr_pcalloc(r->pool, (req->env_count + 2) *sizeof(char *));
+ for (i = 0; i < req->env_count; i++) {
+ apr_size_t curlen;
+
+ if ((stat = sock_read(fd, &curlen, sizeof(curlen))) != APR_SUCCESS) {
+ return stat;
+ }
+ environ[i] = apr_pcalloc(r->pool, curlen + 1);
+ if ((stat = sock_read(fd, environ[i], curlen)) != APR_SUCCESS) {
+ return stat;
+ }
+ }
+ *env = environ;
+
+#if 0
+#ifdef RLIMIT_CPU
+ sock_read(fd, &j, sizeof(int));
+ if (j) {
+ temp_core->limit_cpu = (struct rlimit *)apr_palloc (sizeof(struct rlimit));
+ sock_read(fd, temp_core->limit_cpu, sizeof(struct rlimit));
+ }
+ else {
+ temp_core->limit_cpu = NULL;
+ }
+#endif
+
+#if defined (RLIMIT_DATA) || defined(RLIMIT_VMEM) || defined(RLIMIT_AS)
+ sock_read(fd, &j, sizeof(int));
+ if (j) {
+ temp_core->limit_mem = (struct rlimit *)apr_palloc(r->pool, sizeof(struct rlimit));
+ sock_read(fd, temp_core->limit_mem, sizeof(struct rlimit));
+ }
+ else {
+ temp_core->limit_mem = NULL;
+ }
+#endif
+
+#ifdef RLIMIT_NPROC
+ sock_read(fd, &j, sizeof(int));
+ if (j) {
+ temp_core->limit_nproc = (struct rlimit *)apr_palloc(r->pool, sizeof(struct rlimit));
+ sock_read(fd, temp_core->limit_nproc, sizeof(struct rlimit));
+ }
+ else {
+ temp_core->limit_nproc = NULL;
+ }
+#endif
+#endif
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t send_req(int fd, request_rec *r, char *argv0, char **env,
+ int req_type)
+{
+ int i;
+ cgid_req_t req = {0};
+ apr_status_t stat;
+ ap_unix_identity_t * ugid = ap_run_get_suexec_identity(r);
+
+ if (ugid == NULL) {
+ req.ugid = empty_ugid;
+ } else {
+ memcpy(&req.ugid, ugid, sizeof(ap_unix_identity_t));
+ }
+
+ req.req_type = req_type;
+ req.conn_id = r->connection->id;
+ req.core_module_index = core_module.module_index;
+ for (req.env_count = 0; env[req.env_count]; req.env_count++) {
+ continue;
+ }
+ req.filename_len = strlen(r->filename);
+ req.argv0_len = strlen(argv0);
+ req.uri_len = strlen(r->uri);
+ req.args_len = r->args ? strlen(r->args) : 0;
+ req.loglevel = r->server->loglevel;
+
+ /* Write the request header */
+ if ((stat = sock_write(fd, &req, sizeof(req))) != APR_SUCCESS) {
+ return stat;
+ }
+
+ /* Write filename, argv0, uri, and args */
+ if ((stat = sock_write(fd, r->filename, req.filename_len)) != APR_SUCCESS ||
+ (stat = sock_write(fd, argv0, req.argv0_len)) != APR_SUCCESS ||
+ (stat = sock_write(fd, r->uri, req.uri_len)) != APR_SUCCESS) {
+ return stat;
+ }
+ if (req.args_len) {
+ if ((stat = sock_write(fd, r->args, req.args_len)) != APR_SUCCESS) {
+ return stat;
+ }
+ }
+
+ /* write the environment variables */
+ for (i = 0; i < req.env_count; i++) {
+ apr_size_t curlen = strlen(env[i]);
+
+ if ((stat = sock_write(fd, &curlen, sizeof(curlen))) != APR_SUCCESS) {
+ return stat;
+ }
+
+ if ((stat = sock_write(fd, env[i], curlen)) != APR_SUCCESS) {
+ return stat;
+ }
+ }
+
+#if 0
+#ifdef RLIMIT_CPU
+ if (conf->limit_cpu) {
+ len = 1;
+ stat = sock_write(fd, &len, sizeof(int));
+ stat = sock_write(fd, conf->limit_cpu, sizeof(struct rlimit));
+ }
+ else {
+ len = 0;
+ stat = sock_write(fd, &len, sizeof(int));
+ }
+#endif
+
+#if defined(RLIMIT_DATA) || defined(RLIMIT_VMEM) || defined(RLIMIT_AS)
+ if (conf->limit_mem) {
+ len = 1;
+ stat = sock_write(fd, &len, sizeof(int));
+ stat = sock_write(fd, conf->limit_mem, sizeof(struct rlimit));
+ }
+ else {
+ len = 0;
+ stat = sock_write(fd, &len, sizeof(int));
+ }
+#endif
+
+#ifdef RLIMIT_NPROC
+ if (conf->limit_nproc) {
+ len = 1;
+ stat = sock_write(fd, &len, sizeof(int));
+ stat = sock_write(fd, conf->limit_nproc, sizeof(struct rlimit));
+ }
+ else {
+ len = 0;
+ stat = sock_write(fd, &len, sizeof(int));
+ }
+#endif
+#endif
+ return APR_SUCCESS;
+}
+
+static void daemon_signal_handler(int sig)
+{
+ if (sig == SIGHUP) {
+ ++daemon_should_exit;
+ }
+}
+
+static void cgid_child_errfn(apr_pool_t *pool, apr_status_t err,
+ const char *description)
+{
+ request_rec *r;
+ void *vr;
+
+ apr_pool_userdata_get(&vr, ERRFN_USERDATA_KEY, pool);
+ r = vr;
+
+ /* sure we got r, but don't call ap_log_rerror() because we don't
+ * have r->headers_in and possibly other storage referenced by
+ * ap_log_rerror()
+ */
+ ap_log_error(APLOG_MARK, APLOG_ERR, err, r->server, "%s",
+#ifdef AP_UNSAFE_ERROR_LOG_UNESCAPED
+ description
+#else
+ ap_escape_logitem(pool, description)
+#endif
+ );
+}
+
+static int cgid_server(void *data)
+{
+ struct sockaddr_un unix_addr;
+ int sd, sd2, rc;
+ mode_t omask;
+ apr_socklen_t len;
+ apr_pool_t *ptrans;
+ server_rec *main_server = data;
+ cgid_server_conf *sconf = ap_get_module_config(main_server->module_config,
+ &cgid_module);
+ apr_hash_t *script_hash = apr_hash_make(pcgi);
+
+ apr_pool_create(&ptrans, pcgi);
+
+ apr_signal(SIGCHLD, SIG_IGN);
+ apr_signal(SIGHUP, daemon_signal_handler);
+
+ if (unlink(sconf->sockname) < 0 && errno != ENOENT) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, errno, main_server,
+ "Couldn't unlink unix domain socket %s",
+ sconf->sockname);
+ /* just a warning; don't bail out */
+ }
+
+ /* cgid should use its own suexec doer */
+ ap_hook_get_suexec_identity(cgid_suexec_id_doer, NULL, NULL,
+ APR_HOOK_REALLY_FIRST);
+ apr_hook_sort_all();
+
+ if ((sd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, errno, main_server,
+ "Couldn't create unix domain socket");
+ return errno;
+ }
+
+ memset(&unix_addr, 0, sizeof(unix_addr));
+ unix_addr.sun_family = AF_UNIX;
+ strcpy(unix_addr.sun_path, sconf->sockname);
+
+ omask = umask(0077); /* so that only Apache can use socket */
+ rc = bind(sd, (struct sockaddr *)&unix_addr, sizeof(unix_addr));
+ umask(omask); /* can't fail, so can't clobber errno */
+ if (rc < 0) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, errno, main_server,
+ "Couldn't bind unix domain socket %s",
+ sconf->sockname);
+ return errno;
+ }
+
+ if (listen(sd, DEFAULT_CGID_LISTENBACKLOG) < 0) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, errno, main_server,
+ "Couldn't listen on unix domain socket");
+ return errno;
+ }
+
+ if (!geteuid()) {
+ if (chown(sconf->sockname, unixd_config.user_id, -1) < 0) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, errno, main_server,
+ "Couldn't change owner of unix domain socket %s",
+ sconf->sockname);
+ return errno;
+ }
+ }
+
+ unixd_setup_child(); /* if running as root, switch to configured user/group */
+
+ while (!daemon_should_exit) {
+ int errfileno = STDERR_FILENO;
+ char *argv0;
+ char **env;
+ const char * const *argv;
+ apr_int32_t in_pipe;
+ apr_int32_t out_pipe;
+ apr_int32_t err_pipe;
+ apr_cmdtype_e cmd_type;
+ request_rec *r;
+ apr_procattr_t *procattr = NULL;
+ apr_proc_t *procnew = NULL;
+ apr_file_t *inout;
+ cgid_req_t cgid_req;
+ apr_status_t stat;
+
+ apr_pool_clear(ptrans);
+
+ len = sizeof(unix_addr);
+ sd2 = accept(sd, (struct sockaddr *)&unix_addr, &len);
+ if (sd2 < 0) {
+ if (errno != EINTR) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, errno,
+ (server_rec *)data,
+ "Error accepting on cgid socket");
+ }
+ continue;
+ }
+
+ r = apr_pcalloc(ptrans, sizeof(request_rec));
+ procnew = apr_pcalloc(ptrans, sizeof(*procnew));
+ r->pool = ptrans;
+ stat = get_req(sd2, r, &argv0, &env, &cgid_req);
+ if (stat != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, stat,
+ main_server,
+ "Error reading request on cgid socket");
+ close(sd2);
+ continue;
+ }
+
+ if (cgid_req.req_type == GETPID_REQ) {
+ pid_t pid;
+
+ pid = (pid_t)apr_hash_get(script_hash, &cgid_req.conn_id, sizeof(cgid_req.conn_id));
+ if (write(sd2, &pid, sizeof(pid)) != sizeof(pid)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0,
+ main_server,
+ "Error writing pid %" APR_PID_T_FMT " to handler", pid);
+ }
+ close(sd2);
+ continue;
+ }
+
+ apr_os_file_put(&r->server->error_log, &errfileno, 0, r->pool);
+ apr_os_file_put(&inout, &sd2, 0, r->pool);
+
+ if (cgid_req.req_type == SSI_REQ) {
+ in_pipe = APR_NO_PIPE;
+ out_pipe = APR_FULL_BLOCK;
+ err_pipe = APR_NO_PIPE;
+ cmd_type = APR_SHELLCMD;
+ }
+ else {
+ in_pipe = APR_CHILD_BLOCK;
+ out_pipe = APR_CHILD_BLOCK;
+ err_pipe = APR_CHILD_BLOCK;
+ cmd_type = APR_PROGRAM;
+ }
+
+ if (((rc = apr_procattr_create(&procattr, ptrans)) != APR_SUCCESS) ||
+ ((cgid_req.req_type == CGI_REQ) &&
+ (((rc = apr_procattr_io_set(procattr,
+ in_pipe,
+ out_pipe,
+ err_pipe)) != APR_SUCCESS) ||
+ /* XXX apr_procattr_child_*_set() is creating an unnecessary
+ * pipe between this process and the child being created...
+ * It is cleaned up with the temporary pool for this request.
+ */
+ ((rc = apr_procattr_child_err_set(procattr, r->server->error_log, NULL)) != APR_SUCCESS) ||
+ ((rc = apr_procattr_child_in_set(procattr, inout, NULL)) != APR_SUCCESS))) ||
+ ((rc = apr_procattr_child_out_set(procattr, inout, NULL)) != APR_SUCCESS) ||
+ ((rc = apr_procattr_dir_set(procattr,
+ ap_make_dirstr_parent(r->pool, r->filename))) != APR_SUCCESS) ||
+ ((rc = apr_procattr_cmdtype_set(procattr, cmd_type)) != APR_SUCCESS) ||
+ ((rc = apr_procattr_child_errfn_set(procattr, cgid_child_errfn)) != APR_SUCCESS)) {
+ /* Something bad happened, tell the world.
+ * ap_log_rerror() won't work because the header table used by
+ * ap_log_rerror() hasn't been replicated in the phony r
+ */
+ ap_log_error(APLOG_MARK, APLOG_ERR, rc, r->server,
+ "couldn't set child process attributes: %s", r->filename);
+ }
+ else {
+ apr_pool_userdata_set(r, ERRFN_USERDATA_KEY, apr_pool_cleanup_null, ptrans);
+
+ argv = (const char * const *)create_argv(r->pool, NULL, NULL, NULL, argv0, r->args);
+
+ /* We want to close sd2 for the new CGI process too.
+ * If it is left open it'll make ap_pass_brigade() block
+ * waiting for EOF if CGI forked something running long.
+ * close(sd2) here should be okay, as CGI channel
+ * is already dup()ed by apr_procattr_child_{in,out}_set()
+ * above.
+ */
+ close(sd2);
+
+ if (memcmp(&empty_ugid, &cgid_req.ugid, sizeof(empty_ugid))) {
+ /* We have a valid identity, and can be sure that
+ * cgid_suexec_id_doer will return a valid ugid
+ */
+ rc = ap_os_create_privileged_process(r, procnew, argv0, argv,
+ (const char * const *)env,
+ procattr, ptrans);
+ } else {
+ rc = apr_proc_create(procnew, argv0, argv,
+ (const char * const *)env,
+ procattr, ptrans);
+ }
+
+ if (rc != APR_SUCCESS) {
+ /* Bad things happened. Everyone should have cleaned up.
+ * ap_log_rerror() won't work because the header table used by
+ * ap_log_rerror() hasn't been replicated in the phony r
+ */
+ ap_log_error(APLOG_MARK, APLOG_ERR, rc, r->server,
+ "couldn't create child process: %d: %s", rc,
+ apr_filename_of_pathname(r->filename));
+ }
+ else {
+ /* We don't want to leak storage for the key, so only allocate
+ * a key if the key doesn't exist yet in the hash; there are
+ * only a limited number of possible keys (one for each
+ * possible thread in the server), so we can allocate a copy
+ * of the key the first time a thread has a cgid request.
+ * Note that apr_hash_set() only uses the storage passed in
+ * for the key if it is adding the key to the hash for the
+ * first time; new key storage isn't needed for replacing the
+ * existing value of a key.
+ */
+ void *key;
+
+ if (apr_hash_get(script_hash, &cgid_req.conn_id, sizeof(cgid_req.conn_id))) {
+ key = &cgid_req.conn_id;
+ }
+ else {
+ key = apr_pcalloc(pcgi, sizeof(cgid_req.conn_id));
+ memcpy(key, &cgid_req.conn_id, sizeof(cgid_req.conn_id));
+ }
+ apr_hash_set(script_hash, key, sizeof(cgid_req.conn_id),
+ (void *)procnew->pid);
+ }
+ }
+ }
+ return -1;
+}
+
+static int cgid_start(apr_pool_t *p, server_rec *main_server,
+ apr_proc_t *procnew)
+{
+ daemon_should_exit = 0; /* clear setting from previous generation */
+ if ((daemon_pid = fork()) < 0) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, errno, main_server,
+ "mod_cgid: Couldn't spawn cgid daemon process");
+ return DECLINED;
+ }
+ else if (daemon_pid == 0) {
+ if (pcgi == NULL) {
+ apr_pool_create(&pcgi, p);
+ }
+ cgid_server(main_server);
+ exit(-1);
+ }
+ procnew->pid = daemon_pid;
+ procnew->err = procnew->in = procnew->out = NULL;
+ apr_pool_note_subprocess(p, procnew, APR_KILL_AFTER_TIMEOUT);
+#if APR_HAS_OTHER_CHILD
+ apr_proc_other_child_register(procnew, cgid_maint, procnew, NULL, p);
+#endif
+ return OK;
+}
+
+static int cgid_init(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp,
+ server_rec *main_server)
+{
+ apr_proc_t *procnew = NULL;
+ int first_time = 0;
+ const char *userdata_key = "cgid_init";
+ module **m;
+ int ret = OK;
+
+ root_server = main_server;
+ root_pool = p;
+
+ apr_pool_userdata_get((void **)&procnew, userdata_key, main_server->process->pool);
+ if (!procnew) {
+ first_time = 1;
+ procnew = apr_pcalloc(main_server->process->pool, sizeof(*procnew));
+ procnew->pid = -1;
+ procnew->err = procnew->in = procnew->out = NULL;
+ apr_pool_userdata_set((const void *)procnew, userdata_key,
+ apr_pool_cleanup_null, main_server->process->pool);
+ }
+
+ if (!first_time) {
+ total_modules = 0;
+ for (m = ap_preloaded_modules; *m != NULL; m++)
+ total_modules++;
+
+ ret = cgid_start(p, main_server, procnew);
+ if (ret != OK ) {
+ return ret;
+ }
+ cgid_pfn_reg_with_ssi = APR_RETRIEVE_OPTIONAL_FN(ap_register_include_handler);
+ cgid_pfn_gtv = APR_RETRIEVE_OPTIONAL_FN(ap_ssi_get_tag_and_value);
+ cgid_pfn_ps = APR_RETRIEVE_OPTIONAL_FN(ap_ssi_parse_string);
+
+ if ((cgid_pfn_reg_with_ssi) && (cgid_pfn_gtv) && (cgid_pfn_ps)) {
+ /* Required by mod_include filter. This is how mod_cgid registers
+ * with mod_include to provide processing of the exec directive.
+ */
+ cgid_pfn_reg_with_ssi("exec", handle_exec);
+ }
+ }
+ return ret;
+}
+
+static void *create_cgid_config(apr_pool_t *p, server_rec *s)
+{
+ cgid_server_conf *c =
+ (cgid_server_conf *) apr_pcalloc(p, sizeof(cgid_server_conf));
+
+ c->logname = NULL;
+ c->logbytes = DEFAULT_LOGBYTES;
+ c->bufbytes = DEFAULT_BUFBYTES;
+ c->sockname = ap_server_root_relative(p, DEFAULT_SOCKET);
+ return c;
+}
+
+static void *merge_cgid_config(apr_pool_t *p, void *basev, void *overridesv)
+{
+ cgid_server_conf *base = (cgid_server_conf *) basev, *overrides = (cgid_server_conf *) overridesv;
+
+ return overrides->logname ? overrides : base;
+}
+
+static const char *set_scriptlog(cmd_parms *cmd, void *dummy, const char *arg)
+{
+ server_rec *s = cmd->server;
+ cgid_server_conf *conf = ap_get_module_config(s->module_config,
+ &cgid_module);
+
+ conf->logname = ap_server_root_relative(cmd->pool, arg);
+
+ if (!conf->logname) {
+ return apr_pstrcat(cmd->pool, "Invalid ScriptLog path ",
+ arg, NULL);
+ }
+ return NULL;
+}
+
+static const char *set_scriptlog_length(cmd_parms *cmd, void *dummy, const char *arg)
+{
+ server_rec *s = cmd->server;
+ cgid_server_conf *conf = ap_get_module_config(s->module_config,
+ &cgid_module);
+
+ conf->logbytes = atol(arg);
+ return NULL;
+}
+
+static const char *set_scriptlog_buffer(cmd_parms *cmd, void *dummy, const char *arg)
+{
+ server_rec *s = cmd->server;
+ cgid_server_conf *conf = ap_get_module_config(s->module_config,
+ &cgid_module);
+
+ conf->bufbytes = atoi(arg);
+ return NULL;
+}
+
+static const char *set_script_socket(cmd_parms *cmd, void *dummy, const char *arg)
+{
+ server_rec *s = cmd->server;
+ cgid_server_conf *conf = ap_get_module_config(s->module_config,
+ &cgid_module);
+
+ conf->sockname = ap_server_root_relative(cmd->pool, arg);
+
+ if (!conf->sockname) {
+ return apr_pstrcat(cmd->pool, "Invalid Scriptsock path ",
+ arg, NULL);
+ }
+
+ return NULL;
+}
+
+static const command_rec cgid_cmds[] =
+{
+ AP_INIT_TAKE1("ScriptLog", set_scriptlog, NULL, RSRC_CONF,
+ "the name of a log for script debugging info"),
+ AP_INIT_TAKE1("ScriptLogLength", set_scriptlog_length, NULL, RSRC_CONF,
+ "the maximum length (in bytes) of the script debug log"),
+ AP_INIT_TAKE1("ScriptLogBuffer", set_scriptlog_buffer, NULL, RSRC_CONF,
+ "the maximum size (in bytes) to record of a POST request"),
+ AP_INIT_TAKE1("Scriptsock", set_script_socket, NULL, RSRC_CONF,
+ "the name of the socket to use for communication with "
+ "the cgi daemon."),
+ {NULL}
+};
+
+static int log_scripterror(request_rec *r, cgid_server_conf * conf, int ret,
+ apr_status_t rv, char *error)
+{
+ apr_file_t *f = NULL;
+ struct stat finfo;
+ char time_str[APR_CTIME_LEN];
+ int log_flags = rv ? APLOG_ERR : APLOG_ERR;
+
+ ap_log_rerror(APLOG_MARK, log_flags, rv, r,
+ "%s: %s", error, r->filename);
+
+ /* XXX Very expensive mainline case! Open, then getfileinfo! */
+ if (!conf->logname ||
+ ((stat(conf->logname, &finfo) == 0)
+ && (finfo.st_size > conf->logbytes)) ||
+ (apr_file_open(&f, conf->logname,
+ APR_APPEND|APR_WRITE|APR_CREATE, APR_OS_DEFAULT, r->pool) != APR_SUCCESS)) {
+ return ret;
+ }
+
+ /* "%% [Wed Jun 19 10:53:21 1996] GET /cgid-bin/printenv HTTP/1.0" */
+ apr_ctime(time_str, apr_time_now());
+ apr_file_printf(f, "%%%% [%s] %s %s%s%s %s\n", time_str, r->method, r->uri,
+ r->args ? "?" : "", r->args ? r->args : "", r->protocol);
+ /* "%% 500 /usr/local/apache/cgid-bin */
+ apr_file_printf(f, "%%%% %d %s\n", ret, r->filename);
+
+ apr_file_printf(f, "%%error\n%s\n", error);
+
+ apr_file_close(f);
+ return ret;
+}
+
+static int log_script(request_rec *r, cgid_server_conf * conf, int ret,
+ char *dbuf, const char *sbuf, apr_bucket_brigade *bb,
+ apr_file_t *script_err)
+{
+ const apr_array_header_t *hdrs_arr = apr_table_elts(r->headers_in);
+ const apr_table_entry_t *hdrs = (apr_table_entry_t *) hdrs_arr->elts;
+ char argsbuffer[HUGE_STRING_LEN];
+ apr_file_t *f = NULL;
+ apr_bucket *e;
+ const char *buf;
+ apr_size_t len;
+ apr_status_t rv;
+ int first;
+ int i;
+ struct stat finfo;
+ char time_str[APR_CTIME_LEN];
+
+ /* XXX Very expensive mainline case! Open, then getfileinfo! */
+ if (!conf->logname ||
+ ((stat(conf->logname, &finfo) == 0)
+ && (finfo.st_size > conf->logbytes)) ||
+ (apr_file_open(&f, conf->logname,
+ APR_APPEND|APR_WRITE|APR_CREATE, APR_OS_DEFAULT, r->pool) != APR_SUCCESS)) {
+ /* Soak up script output */
+ discard_script_output(bb);
+ if (script_err) {
+ while (apr_file_gets(argsbuffer, HUGE_STRING_LEN,
+ script_err) == APR_SUCCESS)
+ continue;
+ }
+ return ret;
+ }
+
+ /* "%% [Wed Jun 19 10:53:21 1996] GET /cgid-bin/printenv HTTP/1.0" */
+ apr_ctime(time_str, apr_time_now());
+ apr_file_printf(f, "%%%% [%s] %s %s%s%s %s\n", time_str, r->method, r->uri,
+ r->args ? "?" : "", r->args ? r->args : "", r->protocol);
+ /* "%% 500 /usr/local/apache/cgid-bin" */
+ apr_file_printf(f, "%%%% %d %s\n", ret, r->filename);
+
+ apr_file_puts("%request\n", f);
+ for (i = 0; i < hdrs_arr->nelts; ++i) {
+ if (!hdrs[i].key)
+ continue;
+ apr_file_printf(f, "%s: %s\n", hdrs[i].key, hdrs[i].val);
+ }
+ if ((r->method_number == M_POST || r->method_number == M_PUT)
+ && *dbuf) {
+ apr_file_printf(f, "\n%s\n", dbuf);
+ }
+
+ apr_file_puts("%response\n", f);
+ hdrs_arr = apr_table_elts(r->err_headers_out);
+ hdrs = (const apr_table_entry_t *) hdrs_arr->elts;
+
+ for (i = 0; i < hdrs_arr->nelts; ++i) {
+ if (!hdrs[i].key)
+ continue;
+ apr_file_printf(f, "%s: %s\n", hdrs[i].key, hdrs[i].val);
+ }
+
+ if (sbuf && *sbuf)
+ apr_file_printf(f, "%s\n", sbuf);
+
+ first = 1;
+ APR_BRIGADE_FOREACH(e, bb) {
+ if (APR_BUCKET_IS_EOS(e)) {
+ break;
+ }
+ rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ);
+ if (!APR_STATUS_IS_SUCCESS(rv) || (len == 0)) {
+ break;
+ }
+ if (first) {
+ apr_file_puts("%stdout\n", f);
+ first = 0;
+ }
+ apr_file_write(f, buf, &len);
+ apr_file_puts("\n", f);
+ }
+
+ if (script_err) {
+ if (apr_file_gets(argsbuffer, HUGE_STRING_LEN,
+ script_err) == APR_SUCCESS) {
+ apr_file_puts("%stderr\n", f);
+ apr_file_puts(argsbuffer, f);
+ while (apr_file_gets(argsbuffer, HUGE_STRING_LEN,
+ script_err) == APR_SUCCESS)
+ apr_file_puts(argsbuffer, f);
+ apr_file_puts("\n", f);
+ }
+ }
+
+ if (script_err) {
+ apr_file_close(script_err);
+ }
+
+ apr_file_close(f);
+ return ret;
+}
+
+static apr_status_t close_unix_socket(void *thefd)
+{
+ int fd = (int)thefd;
+
+ return close(fd);
+}
+
+static int connect_to_daemon(int *sdptr, request_rec *r,
+ cgid_server_conf *conf)
+{
+ struct sockaddr_un unix_addr;
+ int sd;
+ int connect_tries;
+ apr_interval_time_t sliding_timer;
+
+ memset(&unix_addr, 0, sizeof(unix_addr));
+ unix_addr.sun_family = AF_UNIX;
+ strcpy(unix_addr.sun_path, conf->sockname);
+
+ connect_tries = 0;
+ sliding_timer = 100000; /* 100 milliseconds */
+ while (1) {
+ ++connect_tries;
+ if ((sd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) {
+ return log_scripterror(r, conf, HTTP_INTERNAL_SERVER_ERROR, errno,
+ "unable to create socket to cgi daemon");
+ }
+ if (connect(sd, (struct sockaddr *)&unix_addr, sizeof(unix_addr)) < 0) {
+ if (errno == ECONNREFUSED && connect_tries < DEFAULT_CONNECT_ATTEMPTS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, errno, r,
+ "connect #%d to cgi daemon failed, sleeping before retry",
+ connect_tries);
+ close(sd);
+ apr_sleep(sliding_timer);
+ if (sliding_timer < apr_time_from_sec(2)) {
+ sliding_timer *= 2;
+ }
+ }
+ else {
+ close(sd);
+ return log_scripterror(r, conf, HTTP_SERVICE_UNAVAILABLE, errno,
+ "unable to connect to cgi daemon after multiple tries");
+ }
+ }
+ else {
+ apr_pool_cleanup_register(r->pool, (void *)sd, close_unix_socket,
+ apr_pool_cleanup_null);
+ break; /* we got connected! */
+ }
+ /* gotta try again, but make sure the cgid daemon is still around */
+ if (kill(daemon_pid, 0) != 0) {
+ return log_scripterror(r, conf, HTTP_SERVICE_UNAVAILABLE, errno,
+ "cgid daemon is gone; is Apache terminating?");
+ }
+ }
+ *sdptr = sd;
+ return OK;
+}
+
+static void discard_script_output(apr_bucket_brigade *bb)
+{
+ apr_bucket *e;
+ const char *buf;
+ apr_size_t len;
+ apr_status_t rv;
+ APR_BRIGADE_FOREACH(e, bb) {
+ if (APR_BUCKET_IS_EOS(e)) {
+ break;
+ }
+ rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ);
+ if (!APR_STATUS_IS_SUCCESS(rv)) {
+ break;
+ }
+ }
+}
+
+/****************************************************************
+ *
+ * Actual cgid handling...
+ */
+
+struct cleanup_script_info {
+ request_rec *r;
+ unsigned long conn_id;
+ cgid_server_conf *conf;
+};
+
+static apr_status_t dead_yet(pid_t pid, apr_interval_time_t max_wait)
+{
+ apr_interval_time_t interval = 10000; /* 10 ms */
+ apr_interval_time_t total = 0;
+
+ do {
+#ifdef _AIX
+ /* On AIX, for processes like mod_cgid's script children where
+ * SIGCHLD is ignored, kill(pid,0) returns success for up to
+ * one second after the script child exits, based on when a
+ * daemon runs to clean up unnecessary process table entries.
+ * getpgid() can report the proper info (-1/ESRCH) immediately.
+ */
+ if (getpgid(pid) < 0) {
+#else
+ if (kill(pid, 0) < 0) {
+#endif
+ return APR_SUCCESS;
+ }
+ apr_sleep(interval);
+ total = total + interval;
+ if (interval < 500000) {
+ interval *= 2;
+ }
+ } while (total < max_wait);
+ return APR_EGENERAL;
+}
+
+static apr_status_t cleanup_nonchild_process(request_rec *r, pid_t pid)
+{
+ kill(pid, SIGTERM); /* in case it isn't dead yet */
+ if (dead_yet(pid, apr_time_from_sec(3)) == APR_SUCCESS) {
+ return APR_SUCCESS;
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "CGI process %" APR_PID_T_FMT " didn't exit, sending SIGKILL",
+ pid);
+ kill(pid, SIGKILL);
+ if (dead_yet(pid, apr_time_from_sec(3)) == APR_SUCCESS) {
+ return APR_SUCCESS;
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "CGI process %" APR_PID_T_FMT " didn't exit, sending SIGKILL again",
+ pid);
+ kill(pid, SIGKILL);
+
+ return APR_EGENERAL;
+}
+
+static apr_status_t cleanup_script(void *vptr)
+{
+ struct cleanup_script_info *info = vptr;
+ int sd;
+ int rc;
+ cgid_req_t req = {0};
+ pid_t pid;
+ apr_status_t stat;
+
+ rc = connect_to_daemon(&sd, info->r, info->conf);
+ if (rc != OK) {
+ return APR_EGENERAL;
+ }
+
+ /* we got a socket, and there is already a cleanup registered for it */
+
+ req.req_type = GETPID_REQ;
+ req.conn_id = info->r->connection->id;
+
+ stat = sock_write(sd, &req, sizeof(req));
+ if (stat != APR_SUCCESS) {
+ return stat;
+ }
+
+ /* wait for pid of script */
+ stat = sock_read(sd, &pid, sizeof(pid));
+ if (stat != APR_SUCCESS) {
+ return stat;
+ }
+
+ if (pid == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, info->r,
+ "daemon couldn't find CGI process for connection %lu",
+ info->conn_id);
+ return APR_EGENERAL;
+ }
+ return cleanup_nonchild_process(info->r, pid);
+}
+
+static int cgid_handler(request_rec *r)
+{
+ conn_rec *c = r->connection;
+ int retval, nph, dbpos = 0;
+ char *argv0, *dbuf = NULL;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ cgid_server_conf *conf;
+ int is_included;
+ int seen_eos, child_stopped_reading;
+ int sd;
+ char **env;
+ apr_file_t *tempsock;
+ struct cleanup_script_info *info;
+ apr_status_t rv;
+
+ if (strcmp(r->handler,CGI_MAGIC_TYPE) && strcmp(r->handler,"cgi-script"))
+ return DECLINED;
+
+ conf = ap_get_module_config(r->server->module_config, &cgid_module);
+ is_included = !strcmp(r->protocol, "INCLUDED");
+
+ if ((argv0 = strrchr(r->filename, '/')) != NULL)
+ argv0++;
+ else
+ argv0 = r->filename;
+
+ nph = !(strncmp(argv0, "nph-", 4));
+
+ argv0 = r->filename;
+
+ if (!(ap_allow_options(r) & OPT_EXECCGI) && !is_scriptaliased(r))
+ return log_scripterror(r, conf, HTTP_FORBIDDEN, 0,
+ "Options ExecCGI is off in this directory");
+ if (nph && is_included)
+ return log_scripterror(r, conf, HTTP_FORBIDDEN, 0,
+ "attempt to include NPH CGI script");
+
+#if defined(OS2) || defined(WIN32)
+#error mod_cgid does not work on this platform. If you teach it to, look
+#error at mod_cgi.c for required code in this path.
+#else
+ if (r->finfo.filetype == 0)
+ return log_scripterror(r, conf, HTTP_NOT_FOUND, 0,
+ "script not found or unable to stat");
+#endif
+ if (r->finfo.filetype == APR_DIR)
+ return log_scripterror(r, conf, HTTP_FORBIDDEN, 0,
+ "attempt to invoke directory as script");
+
+ if ((r->used_path_info == AP_REQ_REJECT_PATH_INFO) &&
+ r->path_info && *r->path_info)
+ {
+ /* default to accept */
+ return log_scripterror(r, conf, HTTP_NOT_FOUND, 0,
+ "AcceptPathInfo off disallows user's path");
+ }
+/*
+ if (!ap_suexec_enabled) {
+ if (!ap_can_exec(&r->finfo))
+ return log_scripterror(r, conf, HTTP_FORBIDDEN, 0,
+ "file permissions deny server execution");
+ }
+*/
+ ap_add_common_vars(r);
+ ap_add_cgi_vars(r);
+ env = ap_create_environment(r->pool, r->subprocess_env);
+
+ if ((retval = connect_to_daemon(&sd, r, conf)) != OK) {
+ return retval;
+ }
+
+ rv = send_req(sd, r, argv0, env, CGI_REQ);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "write to cgi daemon process");
+ }
+
+ info = apr_palloc(r->pool, sizeof(struct cleanup_script_info));
+ info->r = r;
+ info->conn_id = r->connection->id;
+ info->conf = conf;
+ apr_pool_cleanup_register(r->pool, info,
+ cleanup_script,
+ apr_pool_cleanup_null);
+ /* We are putting the socket discriptor into an apr_file_t so that we can
+ * use a pipe bucket to send the data to the client. APR will create
+ * a cleanup for the apr_file_t which will close the socket, so we'll
+ * get rid of the cleanup we registered when we created the socket.
+ */
+
+ apr_os_pipe_put_ex(&tempsock, &sd, 1, r->pool);
+ apr_pool_cleanup_kill(r->pool, (void *)sd, close_unix_socket);
+
+ if ((argv0 = strrchr(r->filename, '/')) != NULL)
+ argv0++;
+ else
+ argv0 = r->filename;
+
+ /* Transfer any put/post args, CERN style...
+ * Note that we already ignore SIGPIPE in the core server.
+ */
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ seen_eos = 0;
+ child_stopped_reading = 0;
+ if (conf->logname) {
+ dbuf = apr_palloc(r->pool, conf->bufbytes + 1);
+ dbpos = 0;
+ }
+ do {
+ apr_bucket *bucket;
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, HUGE_STRING_LEN);
+
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "Error reading request entity data");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ APR_BRIGADE_FOREACH(bucket, bb) {
+ const char *data;
+ apr_size_t len;
+
+ if (APR_BUCKET_IS_EOS(bucket)) {
+ seen_eos = 1;
+ break;
+ }
+
+ /* We can't do much with this. */
+ if (APR_BUCKET_IS_FLUSH(bucket)) {
+ continue;
+ }
+
+ /* If the child stopped, we still must read to EOS. */
+ if (child_stopped_reading) {
+ continue;
+ }
+
+ /* read */
+ apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
+
+ if (conf->logname && dbpos < conf->bufbytes) {
+ int cursize;
+
+ if ((dbpos + len) > conf->bufbytes) {
+ cursize = conf->bufbytes - dbpos;
+ }
+ else {
+ cursize = len;
+ }
+ memcpy(dbuf + dbpos, data, cursize);
+ dbpos += cursize;
+ }
+
+ /* Keep writing data to the child until done or too much time
+ * elapses with no progress or an error occurs.
+ */
+ rv = apr_file_write_full(tempsock, data, len, NULL);
+
+ if (rv != APR_SUCCESS) {
+ /* silly script stopped reading, soak up remaining message */
+ child_stopped_reading = 1;
+ }
+ }
+ apr_brigade_cleanup(bb);
+ }
+ while (!seen_eos);
+
+ if (conf->logname) {
+ dbuf[dbpos] = '\0';
+ }
+
+ /* we're done writing, or maybe we didn't write at all;
+ * force EOF on child's stdin so that the cgi detects end (or
+ * absence) of data
+ */
+ shutdown(sd, 1);
+
+ /* Handle script return... */
+ if (!nph) {
+ const char *location;
+ char sbuf[MAX_STRING_LEN];
+ int ret;
+
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = apr_bucket_pipe_create(tempsock, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ if ((ret = ap_scan_script_header_err_brigade(r, bb, sbuf))) {
+ return log_script(r, conf, ret, dbuf, sbuf, bb, NULL);
+ }
+
+ location = apr_table_get(r->headers_out, "Location");
+
+ if (location && location[0] == '/' && r->status == 200) {
+
+ /* Soak up all the script output */
+ discard_script_output(bb);
+ apr_brigade_destroy(bb);
+ /* This redirect needs to be a GET no matter what the original
+ * method was.
+ */
+ r->method = apr_pstrdup(r->pool, "GET");
+ r->method_number = M_GET;
+
+ /* We already read the message body (if any), so don't allow
+ * the redirected request to think it has one. We can ignore
+ * Transfer-Encoding, since we used REQUEST_CHUNKED_ERROR.
+ */
+ apr_table_unset(r->headers_in, "Content-Length");
+
+ ap_internal_redirect_handler(location, r);
+ return OK;
+ }
+ else if (location && r->status == 200) {
+ /* XX Note that if a script wants to produce its own Redirect
+ * body, it now has to explicitly *say* "Status: 302"
+ */
+ discard_script_output(bb);
+ apr_brigade_destroy(bb);
+ return HTTP_MOVED_TEMPORARILY;
+ }
+
+ ap_pass_brigade(r->output_filters, bb);
+ }
+
+ if (nph) {
+ struct ap_filter_t *cur;
+
+ /* get rid of all filters up through protocol... since we
+ * haven't parsed off the headers, there is no way they can
+ * work
+ */
+
+ cur = r->proto_output_filters;
+ while (cur && cur->frec->ftype < AP_FTYPE_CONNECTION) {
+ cur = cur->next;
+ }
+ r->output_filters = r->proto_output_filters = cur;
+
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = apr_bucket_pipe_create(tempsock, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ ap_pass_brigade(r->output_filters, bb);
+ }
+
+ return OK; /* NOT r->status, even if it has changed. */
+}
+
+
+
+
+/*============================================================================
+ *============================================================================
+ * This is the beginning of the cgi filter code moved from mod_include. This
+ * is the code required to handle the "exec" SSI directive.
+ *============================================================================
+ *============================================================================*/
+static int include_cgi(char *s, request_rec *r, ap_filter_t *next,
+ apr_bucket *head_ptr, apr_bucket **inserted_head)
+{
+ request_rec *rr = ap_sub_req_lookup_uri(s, r, next);
+ int rr_status;
+ apr_bucket *tmp_buck, *tmp2_buck;
+
+ if (rr->status != HTTP_OK) {
+ ap_destroy_sub_req(rr);
+ return -1;
+ }
+
+ /* No hardwired path info or query allowed */
+
+ if ((rr->path_info && rr->path_info[0]) || rr->args) {
+ ap_destroy_sub_req(rr);
+ return -1;
+ }
+ if (rr->finfo.filetype != APR_REG) {
+ ap_destroy_sub_req(rr);
+ return -1;
+ }
+
+ /* Script gets parameters of the *document*, for back compatibility */
+
+ rr->path_info = r->path_info; /* hard to get right; see mod_cgi.c */
+ rr->args = r->args;
+
+ /* Force sub_req to be treated as a CGI request, even if ordinary
+ * typing rules would have called it something else.
+ */
+ ap_set_content_type(rr, CGI_MAGIC_TYPE);
+
+ /* Run it. */
+
+ rr_status = ap_run_sub_req(rr);
+ if (ap_is_HTTP_REDIRECT(rr_status)) {
+ apr_size_t len_loc;
+ const char *location = apr_table_get(rr->headers_out, "Location");
+ conn_rec *c = r->connection;
+
+ location = ap_escape_html(rr->pool, location);
+ len_loc = strlen(location);
+
+ /* XXX: if most of this stuff is going to get copied anyway,
+ * it'd be more efficient to pstrcat it into a single pool buffer
+ * and a single pool bucket */
+
+ tmp_buck = apr_bucket_immortal_create("<A HREF=\"",
+ sizeof("<A HREF=\"") - 1,
+ c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp_buck);
+ tmp2_buck = apr_bucket_heap_create(location, len_loc, NULL,
+ c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp2_buck);
+ tmp2_buck = apr_bucket_immortal_create("\">", sizeof("\">") - 1,
+ c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp2_buck);
+ tmp2_buck = apr_bucket_heap_create(location, len_loc, NULL,
+ c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp2_buck);
+ tmp2_buck = apr_bucket_immortal_create("</A>", sizeof("</A>") - 1,
+ c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(head_ptr, tmp2_buck);
+
+ if (*inserted_head == NULL) {
+ *inserted_head = tmp_buck;
+ }
+ }
+
+ ap_destroy_sub_req(rr);
+
+ return 0;
+}
+
+
+/* This is the special environment used for running the "exec cmd="
+ * variety of SSI directives.
+ */
+static void add_ssi_vars(request_rec *r, ap_filter_t *next)
+{
+ apr_table_t *e = r->subprocess_env;
+
+ if (r->path_info && r->path_info[0] != '\0') {
+ request_rec *pa_req;
+
+ apr_table_setn(e, "PATH_INFO", ap_escape_shell_cmd(r->pool, r->path_info));
+
+ pa_req = ap_sub_req_lookup_uri(ap_escape_uri(r->pool, r->path_info), r, NULL);
+ if (pa_req->filename) {
+ apr_table_setn(e, "PATH_TRANSLATED",
+ apr_pstrcat(r->pool, pa_req->filename, pa_req->path_info, NULL));
+ }
+ ap_destroy_sub_req(pa_req);
+ }
+
+ if (r->args) {
+ char *arg_copy = apr_pstrdup(r->pool, r->args);
+
+ apr_table_setn(e, "QUERY_STRING", r->args);
+ ap_unescape_url(arg_copy);
+ apr_table_setn(e, "QUERY_STRING_UNESCAPED", ap_escape_shell_cmd(r->pool, arg_copy));
+ }
+}
+
+static int include_cmd(include_ctx_t *ctx, apr_bucket_brigade **bb, char *command,
+ request_rec *r, ap_filter_t *f)
+{
+ char **env;
+ int sd;
+ apr_status_t rc = APR_SUCCESS;
+ int retval;
+ apr_bucket_brigade *bcgi;
+ apr_bucket *b;
+ apr_file_t *tempsock = NULL;
+ cgid_server_conf *conf = ap_get_module_config(r->server->module_config,
+ &cgid_module);
+ struct cleanup_script_info *info;
+
+ add_ssi_vars(r, f->next);
+ env = ap_create_environment(r->pool, r->subprocess_env);
+
+ if ((retval = connect_to_daemon(&sd, r, conf)) != OK) {
+ return retval;
+ }
+
+ SPLIT_AND_PASS_PRETAG_BUCKETS(*bb, ctx, f->next, rc);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
+
+ send_req(sd, r, command, env, SSI_REQ);
+
+ info = apr_palloc(r->pool, sizeof(struct cleanup_script_info));
+ info->r = r;
+ info->conn_id = r->connection->id;
+ info->conf = conf;
+ /* for this type of request, the script is invoked through an
+ * intermediate shell process... cleanup_script is only able
+ * to knock out the shell process, not the actual script
+ */
+ apr_pool_cleanup_register(r->pool, info,
+ cleanup_script,
+ apr_pool_cleanup_null);
+ /* We are putting the socket discriptor into an apr_file_t so that we can
+ * use a pipe bucket to send the data to the client. APR will create
+ * a cleanup for the apr_file_t which will close the socket, so we'll
+ * get rid of the cleanup we registered when we created the socket.
+ */
+ apr_os_pipe_put_ex(&tempsock, &sd, 1, r->pool);
+ apr_pool_cleanup_kill(r->pool, (void *)sd, close_unix_socket);
+
+ bcgi = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ b = apr_bucket_pipe_create(tempsock, r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bcgi, b);
+ ap_pass_brigade(f->next, bcgi);
+
+ return 0;
+}
+
+static int handle_exec(include_ctx_t *ctx, apr_bucket_brigade **bb, request_rec *r,
+ ap_filter_t *f, apr_bucket *head_ptr, apr_bucket **inserted_head)
+{
+ char *tag = NULL;
+ char *tag_val = NULL;
+ char *file = r->filename;
+ apr_bucket *tmp_buck;
+ char parsed_string[MAX_STRING_LEN];
+
+ *inserted_head = NULL;
+ if (ctx->flags & FLAG_PRINTING) {
+ if (ctx->flags & FLAG_NO_EXEC) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "exec used but not allowed in %s", r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ }
+ else {
+ while (1) {
+ cgid_pfn_gtv(ctx, &tag, &tag_val, 1);
+ if (tag_val == NULL) {
+ if (tag == NULL) {
+ return (0);
+ }
+ else {
+ return 1;
+ }
+ }
+ if (!strcmp(tag, "cmd")) {
+ cgid_pfn_ps(r, ctx, tag_val, parsed_string, sizeof(parsed_string), 1);
+ if (include_cmd(ctx, bb, parsed_string, r, f) == -1) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "execution failure for parameter \"%s\" "
+ "to tag exec in file %s", tag, r->filename);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ }
+ /* just in case some stooge changed directories */
+ }
+ else if (!strcmp(tag, "cgi")) {
+ apr_status_t retval = APR_SUCCESS;
+
+ cgid_pfn_ps(r, ctx, tag_val, parsed_string, sizeof(parsed_string), 0);
+ SPLIT_AND_PASS_PRETAG_BUCKETS(*bb, ctx, f->next, retval);
+ if (retval != APR_SUCCESS) {
+ return retval;
+ }
+
+ if (include_cgi(parsed_string, r, f->next, head_ptr, inserted_head) == -1) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "invalid CGI ref \"%s\" in %s", tag_val, file);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ }
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "unknown parameter \"%s\" to tag exec in %s", tag, file);
+ CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head);
+ }
+ }
+ }
+ }
+ return 0;
+}
+/*============================================================================
+ *============================================================================
+ * This is the end of the cgi filter code moved from mod_include.
+ *============================================================================
+ *============================================================================*/
+
+
+static void register_hook(apr_pool_t *p)
+{
+ static const char * const aszPre[] = { "mod_include.c", NULL };
+
+ ap_hook_post_config(cgid_init, aszPre, NULL, APR_HOOK_MIDDLE);
+ ap_hook_handler(cgid_handler, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA cgid_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ create_cgid_config, /* server config */
+ merge_cgid_config, /* merge server config */
+ cgid_cmds, /* command table */
+ register_hook /* register_handlers */
+};
+
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.exp b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.exp
new file mode 100644
index 00000000..5f10d486
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.exp
@@ -0,0 +1 @@
+cgid_module
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.la b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.la
new file mode 100644
index 00000000..7528f2d9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.la
@@ -0,0 +1,35 @@
+# mod_cgid.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_cgid.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_cgid.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.lo b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.lo
new file mode 100644
index 00000000..3d819a78
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.lo
@@ -0,0 +1,12 @@
+# mod_cgid.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_cgid.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_cgid.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.o b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.o
new file mode 100644
index 00000000..ffbaef81
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_cgid.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_info.c b/rubbos/app/httpd-2.0.64/modules/generators/mod_info.c
new file mode 100644
index 00000000..eb728442
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_info.c
@@ -0,0 +1,533 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Info Module. Display configuration information for the server and
+ * all included modules.
+ *
+ * <Location /server-info>
+ * SetHandler server-info
+ * </Location>
+ *
+ * GET /server-info - Returns full configuration page for server and all modules
+ * GET /server-info?server - Returns server configuration only
+ * GET /server-info?module_name - Returns configuration for a single module
+ * GET /server-info?list - Returns quick list of included modules
+ *
+ * Rasmus Lerdorf <rasmus@vex.net>, May 1996
+ *
+ * 05.01.96 Initial Version
+ *
+ * Lou Langholtz <ldl@usi.utah.edu>, July 1997
+ *
+ * 07.11.97 Addition of the AddModuleInfo directive
+ *
+ * Ryan Morgan <rmorgan@covalent.net>
+ *
+ * 8.11.00 Port to Apache 2.0. Read configuation from the configuration
+ * tree rather than reparse the entire configuation file.
+ *
+ */
+
+#define CORE_PRIVATE
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_script.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+#include "ap_mpm.h"
+
+typedef struct {
+ const char *name; /* matching module name */
+ const char *info; /* additional info */
+} info_entry;
+
+typedef struct {
+ apr_array_header_t *more_info;
+} info_svr_conf;
+
+module AP_MODULE_DECLARE_DATA info_module;
+
+static void *create_info_config(apr_pool_t *p, server_rec *s)
+{
+ info_svr_conf *conf = (info_svr_conf *) apr_pcalloc(p, sizeof(info_svr_conf));
+
+ conf->more_info = apr_array_make(p, 20, sizeof(info_entry));
+ return conf;
+}
+
+static void *merge_info_config(apr_pool_t *p, void *basev, void *overridesv)
+{
+ info_svr_conf *new = (info_svr_conf *) apr_pcalloc(p, sizeof(info_svr_conf));
+ info_svr_conf *base = (info_svr_conf *) basev;
+ info_svr_conf *overrides = (info_svr_conf *) overridesv;
+
+ new->more_info = apr_array_append(p, overrides->more_info, base->more_info);
+ return new;
+}
+
+static void mod_info_html_cmd_string(request_rec *r, const char *string,
+ int close)
+{
+ const char *s;
+
+ s = string;
+ /* keep space for \0 byte */
+ while (*s) {
+ if (*s == '<') {
+ if (close) {
+ ap_rputs("&lt;/", r);
+ } else {
+ ap_rputs("&lt;", r);
+ }
+ }
+ else if (*s == '>') {
+ ap_rputs("&gt;", r);
+ }
+ else if (*s == '&') {
+ ap_rputs("&amp;", r);
+ }
+ else if (*s == ' ') {
+ if (close) {
+ ap_rputs("&gt;", r);
+ break;
+ } else {
+ ap_rputc(*s, r);
+ }
+ } else {
+ ap_rputc(*s, r);
+ }
+ s++;
+ }
+}
+
+static void mod_info_module_cmds(request_rec * r, const command_rec * cmds,
+ ap_directive_t * conftree)
+{
+ const command_rec *cmd;
+ ap_directive_t *tmptree = conftree;
+ char htmlstring[MAX_STRING_LEN];
+ int block_start = 0;
+ int nest = 0;
+
+ while (tmptree != NULL) {
+ cmd = cmds;
+ while (cmd->name) {
+ if ((cmd->name[0] != '<') &&
+ (strcasecmp(cmd->name, tmptree->directive) == 0)) {
+ if (nest > block_start) {
+ block_start++;
+ apr_snprintf(htmlstring, sizeof(htmlstring), "%s %s",
+ tmptree->parent->directive,
+ tmptree->parent->args);
+ ap_rputs("<dd><tt>", r);
+ mod_info_html_cmd_string(r, htmlstring, 0);
+ ap_rputs("</tt></dd>\n", r);
+ }
+ if (nest == 2) {
+ ap_rprintf(r, "<dd><tt>&nbsp;&nbsp;&nbsp;&nbsp;%s "
+ "<i>%s</i></tt></dd>\n",
+ ap_escape_html(r->pool,tmptree->directive),
+ ap_escape_html(r->pool,tmptree->args));
+ } else if (nest == 1) {
+ ap_rprintf(r,
+ "<dd><tt>&nbsp;&nbsp;%s <i>%s</i></tt></dd>\n",
+ ap_escape_html(r->pool,tmptree->directive),
+ ap_escape_html(r->pool,tmptree->args));
+ } else {
+ ap_rputs("<dd><tt>", r);
+ mod_info_html_cmd_string(r, tmptree->directive, 0);
+ ap_rprintf(r, " <i>%s</i></tt></dd>\n",
+ ap_escape_html(r->pool,tmptree->args));
+ }
+ }
+ ++cmd;
+ }
+ if (tmptree->first_child != NULL) {
+ tmptree = tmptree->first_child;
+ nest++;
+ } else if (tmptree->next != NULL) {
+ tmptree = tmptree->next;
+ } else {
+ if (block_start) {
+ apr_snprintf(htmlstring, sizeof(htmlstring), "%s %s",
+ tmptree->parent->directive,
+ tmptree->parent->args);
+ ap_rputs("<dd><tt>", r);
+ mod_info_html_cmd_string(r, htmlstring, 1);
+ ap_rputs("</tt></dd>\n", r);
+ block_start--;
+ }
+ if (tmptree->parent) {
+ tmptree = tmptree->parent->next;
+ }
+ else {
+ tmptree = NULL;
+ }
+ nest--;
+ }
+
+ }
+}
+
+typedef struct { /*XXX: should get something from apr_hooks.h instead */
+ void (*pFunc)(void); /* just to get the right size */
+ const char *szName;
+ const char * const *aszPredecessors;
+ const char * const *aszSuccessors;
+ int nOrder;
+} hook_struct_t;
+
+/*
+ * hook_get_t is a pointer to a function that takes void as an argument and
+ * returns a pointer to an apr_array_header_t. The nasty WIN32 ifdef
+ * is required to account for the fact that the ap_hook* calls all use
+ * STDCALL calling convention.
+ */
+typedef apr_array_header_t * (
+#ifdef WIN32
+__stdcall
+#endif
+* hook_get_t)(void);
+
+typedef struct {
+ const char *name;
+ hook_get_t get;
+} hook_lookup_t;
+
+static hook_lookup_t request_hooks[] = {
+ {"Post-Read Request", ap_hook_get_post_read_request},
+ {"Header Parse", ap_hook_get_header_parser},
+ {"Translate Path", ap_hook_get_translate_name},
+ {"Check Access", ap_hook_get_access_checker},
+ {"Verify User ID", ap_hook_get_check_user_id},
+ {"Verify User Access", ap_hook_get_auth_checker},
+ {"Check Type", ap_hook_get_type_checker},
+ {"Fixups", ap_hook_get_fixups},
+ {"Logging", ap_hook_get_log_transaction},
+ {NULL},
+};
+
+static int module_find_hook(module *modp,
+ hook_get_t hook_get)
+{
+ int i;
+ apr_array_header_t *hooks = hook_get();
+ hook_struct_t *elts;
+
+ if (!hooks) {
+ return 0;
+ }
+
+ elts = (hook_struct_t *)hooks->elts;
+
+ for (i=0; i< hooks->nelts; i++) {
+ if (strcmp(elts[i].szName, modp->name) == 0) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void module_participate(request_rec *r,
+ module *modp,
+ hook_lookup_t *lookup,
+ int *comma)
+{
+ if (module_find_hook(modp, lookup->get)) {
+ if (*comma) {
+ ap_rputs(", ", r);
+ }
+ ap_rvputs(r, "<tt>", lookup->name, "</tt>", NULL);
+ *comma = 1;
+ }
+}
+
+static void module_request_hook_participate(request_rec *r, module *modp)
+{
+ int i, comma=0;
+
+ ap_rputs("<dt><strong>Request Phase Participation:</strong>\n", r);
+
+ for (i=0; request_hooks[i].name; i++) {
+ module_participate(r, modp, &request_hooks[i], &comma);
+ }
+
+ if (!comma) {
+ ap_rputs("<tt> <em>none</em></tt>", r);
+ }
+ ap_rputs("</dt>\n", r);
+}
+
+static const char *find_more_info(server_rec *s, const char *module_name)
+{
+ int i;
+ info_svr_conf *conf = (info_svr_conf *) ap_get_module_config(s->module_config,
+ &info_module);
+ info_entry *entry = (info_entry *) conf->more_info->elts;
+
+ if (!module_name) {
+ return 0;
+ }
+ for (i = 0; i < conf->more_info->nelts; i++) {
+ if (!strcmp(module_name, entry->name)) {
+ return entry->info;
+ }
+ entry++;
+ }
+ return 0;
+}
+
+static int display_info(request_rec *r)
+{
+ module *modp = NULL;
+ const char *more_info;
+ const command_rec *cmd = NULL;
+#ifdef NEVERMORE
+ const handler_rec *hand = NULL;
+#endif
+ server_rec *serv = r->server;
+ int comma = 0;
+
+ if (strcmp(r->handler, "server-info"))
+ return DECLINED;
+
+ r->allowed |= (AP_METHOD_BIT << M_GET);
+ if (r->method_number != M_GET)
+ return DECLINED;
+
+ ap_set_content_type(r, "text/html; charset=ISO-8859-1");
+
+ ap_rputs(DOCTYPE_HTML_3_2
+ "<html><head><title>Server Information</title></head>\n", r);
+ ap_rputs("<body><h1 align=\"center\">Apache Server Information</h1>\n", r);
+ if (!r->args || strcasecmp(r->args, "list")) {
+ if (!r->args) {
+ ap_rputs("<dl><dt><tt><a href=\"#server\">Server Settings</a>, ", r);
+ for (modp = ap_top_module; modp; modp = modp->next) {
+ ap_rprintf(r, "<a href=\"#%s\">%s</a>", modp->name, modp->name);
+ if (modp->next) {
+ ap_rputs(", ", r);
+ }
+ }
+ ap_rputs("</tt></dt></dl><hr />", r);
+
+ }
+ if (!r->args || !strcasecmp(r->args, "server")) {
+ int max_daemons, forked, threaded;
+
+ ap_rprintf(r, "<dl><dt><a name=\"server\"><strong>Server Version:</strong> "
+ "<font size=\"+1\"><tt>%s</tt></font></a></dt>\n",
+ ap_get_server_version());
+ ap_rprintf(r, "<dt><strong>Server Built:</strong> "
+ "<font size=\"+1\"><tt>%s</tt></font></dt>\n",
+ ap_get_server_built());
+ ap_rprintf(r, "<dt><strong>API Version:</strong> "
+ "<tt>%d:%d</tt></dt>\n",
+ MODULE_MAGIC_NUMBER_MAJOR, MODULE_MAGIC_NUMBER_MINOR);
+ ap_rprintf(r, "<dt><strong>Hostname/port:</strong> "
+ "<tt>%s:%u</tt></dt>\n",
+ ap_get_server_name(r), ap_get_server_port(r));
+ ap_rprintf(r, "<dt><strong>Timeouts:</strong> "
+ "<tt>connection: %d &nbsp;&nbsp; "
+ "keep-alive: %d</tt></dt>",
+ (int)(apr_time_sec(serv->timeout)),
+ (int)(apr_time_sec(serv->timeout)));
+ ap_mpm_query(AP_MPMQ_MAX_DAEMON_USED, &max_daemons);
+ ap_mpm_query(AP_MPMQ_IS_THREADED, &threaded);
+ ap_mpm_query(AP_MPMQ_IS_FORKED, &forked);
+ ap_rprintf(r, "<dt><strong>MPM Name:</strong> <tt>%s</tt></dt>\n", ap_show_mpm());
+ ap_rprintf(r, "<dt><strong>MPM Information:</strong> "
+ "<tt>Max Daemons: %d Threaded: %s Forked: %s</tt></dt>\n",
+ max_daemons, threaded ? "yes" : "no",
+ forked ? "yes" : "no");
+ ap_rprintf(r, "<dt><strong>Server Root:</strong> "
+ "<tt>%s</tt></dt>\n", ap_server_root);
+ ap_rprintf(r, "<dt><strong>Config File:</strong> "
+ "<tt>%s</tt></dt>\n", ap_conftree->filename);
+ ap_rputs("</dl><hr />", r);
+ }
+ for (modp = ap_top_module; modp; modp = modp->next) {
+ if (!r->args || !strcasecmp(modp->name, r->args)) {
+ ap_rprintf(r, "<dl><dt><a name=\"%s\"><strong>Module Name:</strong> "
+ "<font size=\"+1\"><tt>%s</tt></font></a></dt>\n",
+ modp->name, modp->name);
+ ap_rputs("<dt><strong>Content handlers:</strong> ", r);
+#ifdef NEVERMORE
+ hand = modp->handlers;
+ if (hand) {
+ while (hand) {
+ if (hand->content_type) {
+ ap_rprintf(r, " <tt>%s</tt>\n", hand->content_type);
+ }
+ else {
+ break;
+ }
+ hand++;
+ if (hand && hand->content_type) {
+ ap_rputs(",", r);
+ }
+ }
+ }
+ else {
+ ap_rputs("<tt> <em>none</em></tt>", r);
+ }
+#else
+ if (module_find_hook(modp, ap_hook_get_handler)) {
+ ap_rputs("<tt> <em>yes</em></tt>", r);
+ }
+ else {
+ ap_rputs("<tt> <em>none</em></tt>", r);
+ }
+#endif
+ ap_rputs("</dt>", r);
+ ap_rputs("<dt><strong>Configuration Phase Participation:</strong>\n",
+ r);
+ if (modp->create_dir_config) {
+ if (comma) {
+ ap_rputs(", ", r);
+ }
+ ap_rputs("<tt>Create Directory Config</tt>", r);
+ comma = 1;
+ }
+ if (modp->merge_dir_config) {
+ if (comma) {
+ ap_rputs(", ", r);
+ }
+ ap_rputs("<tt>Merge Directory Configs</tt>", r);
+ comma = 1;
+ }
+ if (modp->create_server_config) {
+ if (comma) {
+ ap_rputs(", ", r);
+ }
+ ap_rputs("<tt>Create Server Config</tt>", r);
+ comma = 1;
+ }
+ if (modp->merge_server_config) {
+ if (comma) {
+ ap_rputs(", ", r);
+ }
+ ap_rputs("<tt>Merge Server Configs</tt>", r);
+ comma = 1;
+ }
+ if (!comma)
+ ap_rputs("<tt> <em>none</em></tt>", r);
+ comma = 0;
+ ap_rputs("</dt>", r);
+
+ module_request_hook_participate(r, modp);
+
+ cmd = modp->cmds;
+ if (cmd) {
+ ap_rputs("<dt><strong>Module Directives:</strong></dt>", r);
+ while (cmd) {
+ if (cmd->name) {
+ ap_rputs("<dd><tt>", r);
+ mod_info_html_cmd_string(r, cmd->name, 0);
+ ap_rputs(" - <i>", r);
+ if (cmd->errmsg) {
+ ap_rputs(cmd->errmsg, r);
+ }
+ ap_rputs("</i></tt></dd>\n", r);
+ }
+ else {
+ break;
+ }
+ cmd++;
+ }
+ ap_rputs("<dt><strong>Current Configuration:</strong></dt>\n", r);
+ mod_info_module_cmds(r, modp->cmds, ap_conftree);
+ }
+ else {
+ ap_rputs("<dt><strong>Module Directives:</strong> <tt>none</tt></dt>", r);
+ }
+ more_info = find_more_info(serv, modp->name);
+ if (more_info) {
+ ap_rputs("<dt><strong>Additional Information:</strong>\n</dt><dd>",
+ r);
+ ap_rputs(more_info, r);
+ ap_rputs("</dd>", r);
+ }
+ ap_rputs("</dl><hr />\n", r);
+ if (r->args) {
+ break;
+ }
+ }
+ }
+ if (!modp && r->args && strcasecmp(r->args, "server")) {
+ ap_rputs("<p><b>No such module</b></p>\n", r);
+ }
+ }
+ else {
+ ap_rputs("<dl><dt>Server Module List</dt>", r);
+ for (modp = ap_top_module; modp; modp = modp->next) {
+ ap_rputs("<dd>", r);
+ ap_rputs(modp->name, r);
+ ap_rputs("</dd>", r);
+ }
+ ap_rputs("</dl><hr />", r);
+ }
+ ap_rputs(ap_psignature("",r), r);
+ ap_rputs("</body></html>\n", r);
+ /* Done, turn off timeout, close file and return */
+ return 0;
+}
+
+static const char *add_module_info(cmd_parms *cmd, void *dummy,
+ const char *name, const char *info)
+{
+ server_rec *s = cmd->server;
+ info_svr_conf *conf = (info_svr_conf *) ap_get_module_config(s->module_config,
+ &info_module);
+ info_entry *new = apr_array_push(conf->more_info);
+
+ new->name = name;
+ new->info = info;
+ return NULL;
+}
+
+static const command_rec info_cmds[] =
+{
+ AP_INIT_TAKE2("AddModuleInfo", add_module_info, NULL, RSRC_CONF,
+ "a module name and additional information on that module"),
+ {NULL}
+};
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_handler(display_info, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA info_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ create_info_config, /* server config */
+ merge_info_config, /* merge server config */
+ info_cmds, /* command apr_table_t */
+ register_hooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_info.dsp b/rubbos/app/httpd-2.0.64/modules/generators/mod_info.dsp
new file mode 100644
index 00000000..7b6a28ec
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_info.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_info" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_info - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_info.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_info.mak" CFG="mod_info - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_info - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_info - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_info - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_info_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_info.so" /base:@..\..\os\win32\BaseAddr.ref,mod_info.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_info.so" /base:@..\..\os\win32\BaseAddr.ref,mod_info.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_info - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_info_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_info.so" /base:@..\..\os\win32\BaseAddr.ref,mod_info.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_info.so" /base:@..\..\os\win32\BaseAddr.ref,mod_info.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_info - Win32 Release"
+# Name "mod_info - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_info.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_info.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_info - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_info.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_info.so "info_module for Apache" ../../include/ap_release.h > .\mod_info.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_info - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_info.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_info.so "info_module for Apache" ../../include/ap_release.h > .\mod_info.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_info.exp b/rubbos/app/httpd-2.0.64/modules/generators/mod_info.exp
new file mode 100644
index 00000000..c304fa77
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_info.exp
@@ -0,0 +1 @@
+info_module
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_status.c b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.c
new file mode 100644
index 00000000..ba978d63
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.c
@@ -0,0 +1,857 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Status Module. Display lots of internal data about how Apache is
+ * performing and the state of all children processes.
+ *
+ * To enable this, add the following lines into any config file:
+ *
+ * <Location /server-status>
+ * SetHandler server-status
+ * </Location>
+ *
+ * You may want to protect this location by password or domain so no one
+ * else can look at it. Then you can access the statistics with a URL like:
+ *
+ * http://your_server_name/server-status
+ *
+ * /server-status - Returns page using tables
+ * /server-status?notable - Returns page for browsers without table support
+ * /server-status?refresh - Returns page with 1 second refresh
+ * /server-status?refresh=6 - Returns page with refresh every 6 seconds
+ * /server-status?auto - Returns page with data for automatic parsing
+ *
+ * Mark Cox, mark@ukweb.com, November 1995
+ *
+ * 12.11.95 Initial version for www.telescope.org
+ * 13.3.96 Updated to remove rprintf's [Mark]
+ * 18.3.96 Added CPU usage, process information, and tidied [Ben Laurie]
+ * 18.3.96 Make extra Scoreboard variables #definable
+ * 25.3.96 Make short report have full precision [Ben Laurie suggested]
+ * 25.3.96 Show uptime better [Mark/Ben Laurie]
+ * 29.3.96 Better HTML and explanation [Mark/Rob Hartill suggested]
+ * 09.4.96 Added message for non-STATUS compiled version
+ * 18.4.96 Added per child and per slot counters [Jim Jagielski]
+ * 01.5.96 Table format, cleanup, even more spiffy data [Chuck Murcko/Jim J.]
+ * 18.5.96 Adapted to use new rprintf() routine, incidentally fixing a missing
+ * piece in short reports [Ben Laurie]
+ * 21.5.96 Additional Status codes (DNS and LOGGING only enabled if
+ * extended STATUS is enabled) [George Burgyan/Jim J.]
+ * 10.8.98 Allow for extended status info at runtime (no more STATUS)
+ * [Jim J.]
+ */
+
+#define CORE_PRIVATE
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "ap_mpm.h"
+#include "util_script.h"
+#include <time.h>
+#include "scoreboard.h"
+#include "http_log.h"
+#include "mod_status.h"
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+#include "apr_strings.h"
+
+#ifdef NEXT
+#if (NX_CURRENT_COMPILER_RELEASE == 410)
+#ifdef m68k
+#define HZ 64
+#else
+#define HZ 100
+#endif
+#else
+#include <machine/param.h>
+#endif
+#endif /* NEXT */
+
+#define STATUS_MAXLINE 64
+
+#define KBYTE 1024
+#define MBYTE 1048576L
+#define GBYTE 1073741824L
+
+#ifndef DEFAULT_TIME_FORMAT
+#define DEFAULT_TIME_FORMAT "%A, %d-%b-%Y %H:%M:%S %Z"
+#endif
+
+#define STATUS_MAGIC_TYPE "application/x-httpd-status"
+
+module AP_MODULE_DECLARE_DATA status_module;
+
+int server_limit, thread_limit;
+
+#ifdef HAVE_TIMES
+/* ugh... need to know if we're running with a pthread implementation
+ * such as linuxthreads that treats individual threads as distinct
+ * processes; that affects how we add up CPU time in a process
+ */
+static pid_t child_pid;
+#endif
+
+/* Implement 'ap_run_status_hook'. */
+APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ap, STATUS, int, status_hook,
+ (request_rec *r, int flags),
+ (r, flags),
+ OK, DECLINED)
+
+/*
+ * command-related code. This is here to prevent use of ExtendedStatus
+ * without status_module included.
+ */
+static const char *set_extended_status(cmd_parms *cmd, void *dummy, int arg)
+{
+ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (err != NULL) {
+ return err;
+ }
+ ap_extended_status = arg;
+ return NULL;
+}
+
+static const command_rec status_module_cmds[] =
+{
+ AP_INIT_FLAG("ExtendedStatus", set_extended_status, NULL, RSRC_CONF,
+ "\"On\" to enable extended status information, \"Off\" to disable"),
+ {NULL}
+};
+
+/* Format the number of bytes nicely */
+static void format_byte_out(request_rec *r, apr_off_t bytes)
+{
+ if (bytes < (5 * KBYTE))
+ ap_rprintf(r, "%d B", (int) bytes);
+ else if (bytes < (MBYTE / 2))
+ ap_rprintf(r, "%.1f kB", (float) bytes / KBYTE);
+ else if (bytes < (GBYTE / 2))
+ ap_rprintf(r, "%.1f MB", (float) bytes / MBYTE);
+ else
+ ap_rprintf(r, "%.1f GB", (float) bytes / GBYTE);
+}
+
+static void format_kbyte_out(request_rec *r, apr_off_t kbytes)
+{
+ if (kbytes < KBYTE)
+ ap_rprintf(r, "%d kB", (int) kbytes);
+ else if (kbytes < MBYTE)
+ ap_rprintf(r, "%.1f MB", (float) kbytes / KBYTE);
+ else
+ ap_rprintf(r, "%.1f GB", (float) kbytes / MBYTE);
+}
+
+static void show_time(request_rec *r, apr_interval_time_t tsecs)
+{
+ int days, hrs, mins, secs;
+
+ secs = (int)(tsecs % 60);
+ tsecs /= 60;
+ mins = (int)(tsecs % 60);
+ tsecs /= 60;
+ hrs = (int)(tsecs % 24);
+ days = (int)(tsecs / 24);
+
+ if (days)
+ ap_rprintf(r, " %d day%s", days, days == 1 ? "" : "s");
+
+ if (hrs)
+ ap_rprintf(r, " %d hour%s", hrs, hrs == 1 ? "" : "s");
+
+ if (mins)
+ ap_rprintf(r, " %d minute%s", mins, mins == 1 ? "" : "s");
+
+ if (secs)
+ ap_rprintf(r, " %d second%s", secs, secs == 1 ? "" : "s");
+}
+
+/* Main handler for x-httpd-status requests */
+
+/* ID values for command table */
+
+#define STAT_OPT_END -1
+#define STAT_OPT_REFRESH 0
+#define STAT_OPT_NOTABLE 1
+#define STAT_OPT_AUTO 2
+
+struct stat_opt {
+ int id;
+ const char *form_data_str;
+ const char *hdr_out_str;
+};
+
+static const struct stat_opt status_options[] = /* see #defines above */
+{
+ {STAT_OPT_REFRESH, "refresh", "Refresh"},
+ {STAT_OPT_NOTABLE, "notable", NULL},
+ {STAT_OPT_AUTO, "auto", NULL},
+ {STAT_OPT_END, NULL, NULL}
+};
+
+static char status_flags[SERVER_NUM_STATUS];
+
+static int status_handler(request_rec *r)
+{
+ const char *loc;
+ apr_time_t nowtime;
+ apr_interval_time_t up_time;
+ int j, i, res;
+ int ready;
+ int busy;
+ unsigned long count;
+ unsigned long lres, my_lres, conn_lres;
+ apr_off_t bytes, my_bytes, conn_bytes;
+ apr_off_t bcount, kbcount;
+ long req_time;
+#ifdef HAVE_TIMES
+ float tick;
+ int times_per_thread = getpid() != child_pid;
+#endif
+ int short_report;
+ int no_table_report;
+ worker_score *ws_record;
+ process_score *ps_record;
+ char *stat_buffer;
+ pid_t *pid_buffer;
+ clock_t tu, ts, tcu, tcs;
+
+ if (strcmp(r->handler, STATUS_MAGIC_TYPE) &&
+ strcmp(r->handler, "server-status")) {
+ return DECLINED;
+ }
+
+#ifdef HAVE_TIMES
+#ifdef _SC_CLK_TCK
+ tick = sysconf(_SC_CLK_TCK);
+#else
+ tick = HZ;
+#endif
+#endif
+
+ ready = 0;
+ busy = 0;
+ count = 0;
+ bcount = 0;
+ kbcount = 0;
+ short_report = 0;
+ no_table_report = 0;
+
+ pid_buffer = apr_palloc(r->pool, server_limit * sizeof(pid_t));
+ stat_buffer = apr_palloc(r->pool, server_limit * thread_limit * sizeof(char));
+
+ nowtime = apr_time_now();
+ tu = ts = tcu = tcs = 0;
+
+ if (!ap_exists_scoreboard_image()) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Server status unavailable in inetd mode");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ r->allowed = (AP_METHOD_BIT << M_GET);
+ if (r->method_number != M_GET)
+ return DECLINED;
+
+ ap_set_content_type(r, "text/html; charset=ISO-8859-1");
+
+ /*
+ * Simple table-driven form data set parser that lets you alter the header
+ */
+
+ if (r->args) {
+ i = 0;
+ while (status_options[i].id != STAT_OPT_END) {
+ if ((loc = ap_strstr_c(r->args,
+ status_options[i].form_data_str)) != NULL) {
+ switch (status_options[i].id) {
+ case STAT_OPT_REFRESH: {
+ apr_size_t len = strlen(status_options[i].form_data_str);
+ long t = 0;
+
+ if (*(loc + len ) == '=') {
+ t = atol(loc + len + 1);
+ }
+ apr_table_set(r->headers_out,
+ status_options[i].hdr_out_str,
+ apr_ltoa(r->pool, t < 1 ? 10 : t));
+ break;
+ }
+ case STAT_OPT_NOTABLE:
+ no_table_report = 1;
+ break;
+ case STAT_OPT_AUTO:
+ ap_set_content_type(r, "text/plain; charset=ISO-8859-1");
+ short_report = 1;
+ break;
+ }
+ }
+
+ i++;
+ }
+ }
+
+ for (i = 0; i < server_limit; ++i) {
+#ifdef HAVE_TIMES
+ clock_t proc_tu = 0, proc_ts = 0, proc_tcu = 0, proc_tcs = 0;
+ clock_t tmp_tu, tmp_ts, tmp_tcu, tmp_tcs;
+#endif
+
+ ps_record = ap_get_scoreboard_process(i);
+ for (j = 0; j < thread_limit; ++j) {
+ int indx = (i * thread_limit) + j;
+
+ ws_record = ap_get_scoreboard_worker(i, j);
+ res = ws_record->status;
+ stat_buffer[indx] = status_flags[res];
+
+ if (!ps_record->quiescing
+ && ps_record->pid) {
+ if (res == SERVER_READY
+ && ps_record->generation == ap_my_generation)
+ ready++;
+ else if (res != SERVER_DEAD &&
+ res != SERVER_STARTING &&
+ res != SERVER_IDLE_KILL)
+ busy++;
+ }
+
+ /* XXX what about the counters for quiescing/seg faulted
+ * processes? should they be counted or not? GLA
+ */
+ if (ap_extended_status) {
+ lres = ws_record->access_count;
+ bytes = ws_record->bytes_served;
+
+ if (lres != 0 || (res != SERVER_READY && res != SERVER_DEAD)) {
+#ifdef HAVE_TIMES
+ tmp_tu = ws_record->times.tms_utime;
+ tmp_ts = ws_record->times.tms_stime;
+ tmp_tcu = ws_record->times.tms_cutime;
+ tmp_tcs = ws_record->times.tms_cstime;
+
+ if (times_per_thread) {
+ proc_tu += tmp_tu;
+ proc_ts += tmp_ts;
+ proc_tcu += tmp_tcu;
+ proc_tcs += proc_tcs;
+ }
+ else {
+ if (tmp_tu > proc_tu ||
+ tmp_ts > proc_ts ||
+ tmp_tcu > proc_tcu ||
+ tmp_tcs > proc_tcs) {
+ proc_tu = tmp_tu;
+ proc_ts = tmp_ts;
+ proc_tcu = tmp_tcu;
+ proc_tcs = proc_tcs;
+ }
+ }
+#endif /* HAVE_TIMES */
+
+ count += lres;
+ bcount += bytes;
+
+ if (bcount >= KBYTE) {
+ kbcount += (bcount >> 10);
+ bcount = bcount & 0x3ff;
+ }
+ }
+ }
+ }
+#ifdef HAVE_TIMES
+ tu += proc_tu;
+ ts += proc_ts;
+ tcu += proc_tcu;
+ tcs += proc_tcs;
+#endif
+ pid_buffer[i] = ps_record->pid;
+ }
+
+ /* up_time in seconds */
+ up_time = (apr_uint32_t) apr_time_sec(nowtime -
+ ap_scoreboard_image->global->restart_time);
+
+ if (!short_report) {
+ ap_rputs(DOCTYPE_HTML_3_2
+ "<html><head>\n<title>Apache Status</title>\n</head><body>\n",
+ r);
+ ap_rputs("<h1>Apache Server Status for ", r);
+ ap_rvputs(r, ap_get_server_name(r), "</h1>\n\n", NULL);
+ ap_rvputs(r, "<dl><dt>Server Version: ",
+ ap_get_server_version(), "</dt>\n", NULL);
+ ap_rvputs(r, "<dt>Server Built: ",
+ ap_get_server_built(), "\n</dt></dl><hr /><dl>\n", NULL);
+ ap_rvputs(r, "<dt>Current Time: ",
+ ap_ht_time(r->pool, nowtime, DEFAULT_TIME_FORMAT, 0),
+ "</dt>\n", NULL);
+ ap_rvputs(r, "<dt>Restart Time: ",
+ ap_ht_time(r->pool,
+ ap_scoreboard_image->global->restart_time,
+ DEFAULT_TIME_FORMAT, 0),
+ "</dt>\n", NULL);
+ ap_rprintf(r, "<dt>Parent Server Generation: %d</dt>\n",
+ (int)ap_my_generation);
+ ap_rputs("<dt>Server uptime: ", r);
+ show_time(r, up_time);
+ ap_rputs("</dt>\n", r);
+ }
+
+ if (ap_extended_status) {
+ if (short_report) {
+ ap_rprintf(r, "Total Accesses: %lu\nTotal kBytes: %"
+ APR_OFF_T_FMT "\n",
+ count, kbcount);
+
+#ifdef HAVE_TIMES
+ /* Allow for OS/2 not having CPU stats */
+ if (ts || tu || tcu || tcs)
+ ap_rprintf(r, "CPULoad: %g\n",
+ (tu + ts + tcu + tcs) / tick / up_time * 100.);
+#endif
+
+ ap_rprintf(r, "Uptime: %ld\n", (long) (up_time));
+ if (up_time > 0)
+ ap_rprintf(r, "ReqPerSec: %g\n",
+ (float) count / (float) up_time);
+
+ if (up_time > 0)
+ ap_rprintf(r, "BytesPerSec: %g\n",
+ KBYTE * (float) kbcount / (float) up_time);
+
+ if (count > 0)
+ ap_rprintf(r, "BytesPerReq: %g\n",
+ KBYTE * (float) kbcount / (float) count);
+ }
+ else { /* !short_report */
+ ap_rprintf(r, "<dt>Total accesses: %lu - Total Traffic: ", count);
+ format_kbyte_out(r, kbcount);
+ ap_rputs("</dt>\n", r);
+
+#ifdef HAVE_TIMES
+ /* Allow for OS/2 not having CPU stats */
+ ap_rprintf(r, "<dt>CPU Usage: u%g s%g cu%g cs%g",
+ tu / tick, ts / tick, tcu / tick, tcs / tick);
+
+ if (ts || tu || tcu || tcs)
+ ap_rprintf(r, " - %.3g%% CPU load</dt>\n",
+ (tu + ts + tcu + tcs) / tick / up_time * 100.);
+#endif
+
+ if (up_time > 0)
+ ap_rprintf(r, "<dt>%.3g requests/sec - ",
+ (float) count / (float) up_time);
+
+ if (up_time > 0) {
+ format_byte_out(r, (unsigned long)(KBYTE * (float) kbcount
+ / (float) up_time));
+ ap_rputs("/second - ", r);
+ }
+
+ if (count > 0) {
+ format_byte_out(r, (unsigned long)(KBYTE * (float) kbcount
+ / (float) count));
+ ap_rputs("/request", r);
+ }
+
+ ap_rputs("</dt>\n", r);
+ } /* short_report */
+ } /* ap_extended_status */
+
+ if (!short_report)
+ ap_rprintf(r, "<dt>%d requests currently being processed, "
+ "%d idle workers</dt>\n", busy, ready);
+ else
+ ap_rprintf(r, "BusyWorkers: %d\nIdleWorkers: %d\n", busy, ready);
+
+ /* send the scoreboard 'table' out */
+ if (!short_report)
+ ap_rputs("</dl><pre>", r);
+ else
+ ap_rputs("Scoreboard: ", r);
+
+ for (i = 0; i < server_limit; ++i) {
+ for (j = 0; j < thread_limit; ++j) {
+ int indx = (i * thread_limit) + j;
+ ap_rputc(stat_buffer[indx], r);
+ if ((indx % STATUS_MAXLINE == (STATUS_MAXLINE - 1))
+ && !short_report)
+ ap_rputs("\n", r);
+ }
+ }
+
+ if (short_report)
+ ap_rputs("\n", r);
+ else {
+ ap_rputs("</pre>\n", r);
+ ap_rputs("<p>Scoreboard Key:<br />\n", r);
+ ap_rputs("\"<b><code>_</code></b>\" Waiting for Connection, \n", r);
+ ap_rputs("\"<b><code>S</code></b>\" Starting up, \n", r);
+ ap_rputs("\"<b><code>R</code></b>\" Reading Request,<br />\n", r);
+ ap_rputs("\"<b><code>W</code></b>\" Sending Reply, \n", r);
+ ap_rputs("\"<b><code>K</code></b>\" Keepalive (read), \n", r);
+ ap_rputs("\"<b><code>D</code></b>\" DNS Lookup,<br />\n", r);
+ ap_rputs("\"<b><code>C</code></b>\" Closing connection, \n", r);
+ ap_rputs("\"<b><code>L</code></b>\" Logging, \n", r);
+ ap_rputs("\"<b><code>G</code></b>\" Gracefully finishing,<br /> \n", r);
+ ap_rputs("\"<b><code>I</code></b>\" Idle cleanup of worker, \n", r);
+ ap_rputs("\"<b><code>.</code></b>\" Open slot with no current process</p>\n", r);
+ ap_rputs("<p />\n", r);
+ if (!ap_extended_status) {
+ int j;
+ int k = 0;
+ ap_rputs("PID Key: <br />\n", r);
+ ap_rputs("<pre>\n", r);
+ for (i = 0; i < server_limit; ++i) {
+ for (j = 0; j < thread_limit; ++j) {
+ int indx = (i * thread_limit) + j;
+
+ if (stat_buffer[indx] != '.') {
+ ap_rprintf(r, " %" APR_PID_T_FMT
+ " in state: %c ", pid_buffer[i],
+ stat_buffer[indx]);
+
+ if (++k >= 3) {
+ ap_rputs("\n", r);
+ k = 0;
+ } else
+ ap_rputs(",", r);
+ }
+ }
+ }
+
+ ap_rputs("\n", r);
+ ap_rputs("</pre>\n", r);
+ }
+ }
+
+ if (ap_extended_status && !short_report) {
+ if (no_table_report)
+ ap_rputs("<hr /><h2>Server Details</h2>\n\n", r);
+ else
+ ap_rputs("\n\n<table border=\"0\"><tr>"
+ "<th>Srv</th><th>PID</th><th>Acc</th>"
+ "<th>M</th>"
+#ifdef HAVE_TIMES
+ "<th>CPU\n</th>"
+#endif
+ "<th>SS</th><th>Req</th>"
+ "<th>Conn</th><th>Child</th><th>Slot</th>"
+ "<th>Client</th><th>VHost</th>"
+ "<th>Request</th></tr>\n\n", r);
+
+ for (i = 0; i < server_limit; ++i) {
+ for (j = 0; j < thread_limit; ++j) {
+ ws_record = ap_get_scoreboard_worker(i, j);
+
+ if (ws_record->access_count == 0 &&
+ (ws_record->status == SERVER_READY ||
+ ws_record->status == SERVER_DEAD)) {
+ continue;
+ }
+
+ ps_record = ap_get_scoreboard_process(i);
+
+ if (ws_record->start_time == 0L)
+ req_time = 0L;
+ else
+ req_time = (long)
+ ((ws_record->stop_time -
+ ws_record->start_time) / 1000);
+ if (req_time < 0L)
+ req_time = 0L;
+
+ lres = ws_record->access_count;
+ my_lres = ws_record->my_access_count;
+ conn_lres = ws_record->conn_count;
+ bytes = ws_record->bytes_served;
+ my_bytes = ws_record->my_bytes_served;
+ conn_bytes = ws_record->conn_bytes;
+
+ if (no_table_report) {
+ if (ws_record->status == SERVER_DEAD)
+ ap_rprintf(r,
+ "<b>Server %d-%d</b> (-): %d|%lu|%lu [",
+ i, (int)ps_record->generation,
+ (int)conn_lres, my_lres, lres);
+ else
+ ap_rprintf(r,
+ "<b>Server %d-%d</b> (%"
+ APR_PID_T_FMT "): %d|%lu|%lu [",
+ i, (int) ps_record->generation,
+ ps_record->pid,
+ (int)conn_lres, my_lres, lres);
+
+ switch (ws_record->status) {
+ case SERVER_READY:
+ ap_rputs("Ready", r);
+ break;
+ case SERVER_STARTING:
+ ap_rputs("Starting", r);
+ break;
+ case SERVER_BUSY_READ:
+ ap_rputs("<b>Read</b>", r);
+ break;
+ case SERVER_BUSY_WRITE:
+ ap_rputs("<b>Write</b>", r);
+ break;
+ case SERVER_BUSY_KEEPALIVE:
+ ap_rputs("<b>Keepalive</b>", r);
+ break;
+ case SERVER_BUSY_LOG:
+ ap_rputs("<b>Logging</b>", r);
+ break;
+ case SERVER_BUSY_DNS:
+ ap_rputs("<b>DNS lookup</b>", r);
+ break;
+ case SERVER_CLOSING:
+ ap_rputs("<b>Closing</b>", r);
+ break;
+ case SERVER_DEAD:
+ ap_rputs("Dead", r);
+ break;
+ case SERVER_GRACEFUL:
+ ap_rputs("Graceful", r);
+ break;
+ case SERVER_IDLE_KILL:
+ ap_rputs("Dying", r);
+ break;
+ default:
+ ap_rputs("?STATE?", r);
+ break;
+ }
+
+ ap_rprintf(r, "] "
+#ifdef HAVE_TIMES
+ "u%g s%g cu%g cs%g"
+#endif
+ "\n %ld %ld (",
+#ifdef HAVE_TIMES
+ ws_record->times.tms_utime / tick,
+ ws_record->times.tms_stime / tick,
+ ws_record->times.tms_cutime / tick,
+ ws_record->times.tms_cstime / tick,
+#endif
+ (long)apr_time_sec(nowtime -
+ ws_record->last_used),
+ (long) req_time);
+
+ format_byte_out(r, conn_bytes);
+ ap_rputs("|", r);
+ format_byte_out(r, my_bytes);
+ ap_rputs("|", r);
+ format_byte_out(r, bytes);
+ ap_rputs(")\n", r);
+ ap_rprintf(r,
+ " <i>%s {%s}</i> <b>[%s]</b><br />\n\n",
+ ap_escape_html(r->pool,
+ ws_record->client),
+ ap_escape_html(r->pool,
+ ap_escape_logitem(r->pool,
+ ws_record->request)),
+ ap_escape_html(r->pool,
+ ws_record->vhost));
+ }
+ else { /* !no_table_report */
+ if (ws_record->status == SERVER_DEAD)
+ ap_rprintf(r,
+ "<tr><td><b>%d-%d</b></td><td>-</td><td>%d/%lu/%lu",
+ i, (int)ps_record->generation,
+ (int)conn_lres, my_lres, lres);
+ else
+ ap_rprintf(r,
+ "<tr><td><b>%d-%d</b></td><td>%"
+ APR_PID_T_FMT
+ "</td><td>%d/%lu/%lu",
+ i, (int)ps_record->generation,
+ ps_record->pid, (int)conn_lres,
+ my_lres, lres);
+
+ switch (ws_record->status) {
+ case SERVER_READY:
+ ap_rputs("</td><td>_", r);
+ break;
+ case SERVER_STARTING:
+ ap_rputs("</td><td><b>S</b>", r);
+ break;
+ case SERVER_BUSY_READ:
+ ap_rputs("</td><td><b>R</b>", r);
+ break;
+ case SERVER_BUSY_WRITE:
+ ap_rputs("</td><td><b>W</b>", r);
+ break;
+ case SERVER_BUSY_KEEPALIVE:
+ ap_rputs("</td><td><b>K</b>", r);
+ break;
+ case SERVER_BUSY_LOG:
+ ap_rputs("</td><td><b>L</b>", r);
+ break;
+ case SERVER_BUSY_DNS:
+ ap_rputs("</td><td><b>D</b>", r);
+ break;
+ case SERVER_CLOSING:
+ ap_rputs("</td><td><b>C</b>", r);
+ break;
+ case SERVER_DEAD:
+ ap_rputs("</td><td>.", r);
+ break;
+ case SERVER_GRACEFUL:
+ ap_rputs("</td><td>G", r);
+ break;
+ case SERVER_IDLE_KILL:
+ ap_rputs("</td><td>I", r);
+ break;
+ default:
+ ap_rputs("</td><td>?", r);
+ break;
+ }
+
+ ap_rprintf(r,
+ "\n</td>"
+#ifdef HAVE_TIMES
+ "<td>%.2f</td>"
+#endif
+ "<td>%ld</td><td>%ld",
+#ifdef HAVE_TIMES
+ (ws_record->times.tms_utime +
+ ws_record->times.tms_stime +
+ ws_record->times.tms_cutime +
+ ws_record->times.tms_cstime) / tick,
+#endif
+ (long)apr_time_sec(nowtime -
+ ws_record->last_used),
+ (long)req_time);
+
+ ap_rprintf(r, "</td><td>%-1.1f</td><td>%-2.2f</td><td>%-2.2f\n",
+ (float)conn_bytes / KBYTE, (float) my_bytes / MBYTE,
+ (float)bytes / MBYTE);
+
+ if (ws_record->status == SERVER_BUSY_READ)
+ ap_rprintf(r,
+ "</td><td>?</td><td nowrap>?</td><td nowrap>..reading.. </td></tr>\n\n");
+ else
+ ap_rprintf(r,
+ "</td><td>%s</td><td nowrap>%s</td><td nowrap>%s</td></tr>\n\n",
+ ap_escape_html(r->pool,
+ ws_record->client),
+ ap_escape_html(r->pool,
+ ws_record->vhost),
+ ap_escape_html(r->pool,
+ ap_escape_logitem(r->pool,
+ ws_record->request)));
+ } /* no_table_report */
+ } /* for (j...) */
+ } /* for (i...) */
+
+ if (!no_table_report) {
+ ap_rputs("</table>\n \
+<hr /> \
+<table>\n \
+<tr><th>Srv</th><td>Child Server number - generation</td></tr>\n \
+<tr><th>PID</th><td>OS process ID</td></tr>\n \
+<tr><th>Acc</th><td>Number of accesses this connection / this child / this slot</td></tr>\n \
+<tr><th>M</th><td>Mode of operation</td></tr>\n"
+
+#ifdef HAVE_TIMES
+"<tr><th>CPU</th><td>CPU usage, number of seconds</td></tr>\n"
+#endif
+
+"<tr><th>SS</th><td>Seconds since beginning of most recent request</td></tr>\n \
+<tr><th>Req</th><td>Milliseconds required to process most recent request</td></tr>\n \
+<tr><th>Conn</th><td>Kilobytes transferred this connection</td></tr>\n \
+<tr><th>Child</th><td>Megabytes transferred this child</td></tr>\n \
+<tr><th>Slot</th><td>Total megabytes transferred this slot</td></tr>\n \
+</table>\n", r);
+ }
+ } /* if (ap_extended_status && !short_report) */
+ else {
+
+ if (!short_report) {
+ ap_rputs("<hr />To obtain a full report with current status "
+ "information you need to use the "
+ "<code>ExtendedStatus On</code> directive.\n", r);
+ }
+ }
+
+ {
+ /* Run extension hooks to insert extra content. */
+ int flags =
+ (short_report ? AP_STATUS_SHORT : 0) |
+ (no_table_report ? AP_STATUS_NOTABLE : 0) |
+ (ap_extended_status ? AP_STATUS_EXTENDED : 0);
+
+ ap_run_status_hook(r, flags);
+ }
+
+ if (!short_report) {
+ ap_rputs(ap_psignature("<hr />\n",r), r);
+ ap_rputs("</body></html>\n", r);
+ }
+
+ return 0;
+}
+
+
+static int status_init(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp,
+ server_rec *s)
+{
+ status_flags[SERVER_DEAD] = '.'; /* We don't want to assume these are in */
+ status_flags[SERVER_READY] = '_'; /* any particular order in scoreboard.h */
+ status_flags[SERVER_STARTING] = 'S';
+ status_flags[SERVER_BUSY_READ] = 'R';
+ status_flags[SERVER_BUSY_WRITE] = 'W';
+ status_flags[SERVER_BUSY_KEEPALIVE] = 'K';
+ status_flags[SERVER_BUSY_LOG] = 'L';
+ status_flags[SERVER_BUSY_DNS] = 'D';
+ status_flags[SERVER_CLOSING] = 'C';
+ status_flags[SERVER_GRACEFUL] = 'G';
+ status_flags[SERVER_IDLE_KILL] = 'I';
+ ap_mpm_query(AP_MPMQ_HARD_LIMIT_THREADS, &thread_limit);
+ ap_mpm_query(AP_MPMQ_HARD_LIMIT_DAEMONS, &server_limit);
+ return OK;
+}
+
+#ifdef HAVE_TIMES
+static void status_child_init(apr_pool_t *p, server_rec *s)
+{
+ child_pid = getpid();
+}
+#endif
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_handler(status_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_config(status_init, NULL, NULL, APR_HOOK_MIDDLE);
+#ifdef HAVE_TIMES
+ ap_hook_child_init(status_child_init, NULL, NULL, APR_HOOK_MIDDLE);
+#endif
+}
+
+module AP_MODULE_DECLARE_DATA status_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ status_module_cmds, /* command table */
+ register_hooks /* register_hooks */
+};
+
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_status.dsp b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.dsp
new file mode 100644
index 00000000..5a0eef5d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_status" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_status - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_status.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_status.mak" CFG="mod_status - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_status - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_status - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_status - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "STATUS_DECLARE_EXPORT" /Fd"Release\mod_status_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_status.so" /base:@..\..\os\win32\BaseAddr.ref,mod_status.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /debug /out:"Release/mod_status.so" /base:@..\..\os\win32\BaseAddr.ref,mod_status.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_status - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "STATUS_DECLARE_EXPORT" /Fd"Debug\mod_status_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_status.so" /base:@..\..\os\win32\BaseAddr.ref,mod_status.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_status.so" /base:@..\..\os\win32\BaseAddr.ref,mod_status.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_status - Win32 Release"
+# Name "mod_status - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_status.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_status.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_status - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_status.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_status.so "status_module for Apache" ../../include/ap_release.h > .\mod_status.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_status - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_status.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_status.so "status_module for Apache" ../../include/ap_release.h > .\mod_status.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_status.exp b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.exp
new file mode 100644
index 00000000..54380936
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.exp
@@ -0,0 +1 @@
+status_module
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_status.h b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.h
new file mode 100644
index 00000000..03911a02
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.h
@@ -0,0 +1,54 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MOD_STATUS_H
+#define MOD_STATUS_H
+
+#include "ap_config.h"
+#include "httpd.h"
+
+#define AP_STATUS_SHORT (0x1) /* short, non-HTML report requested */
+#define AP_STATUS_NOTABLE (0x2) /* HTML report without tables */
+#define AP_STATUS_EXTENDED (0x4) /* detailed report */
+
+#if !defined(WIN32)
+#define STATUS_DECLARE(type) type
+#define STATUS_DECLARE_NONSTD(type) type
+#define STATUS_DECLARE_DATA
+#elif defined(STATUS_DECLARE_STATIC)
+#define STATUS_DECLARE(type) type __stdcall
+#define STATUS_DECLARE_NONSTD(type) type
+#define STATUS_DECLARE_DATA
+#elif defined(STATUS_DECLARE_EXPORT)
+#define STATUS_DECLARE(type) __declspec(dllexport) type __stdcall
+#define STATUS_DECLARE_NONSTD(type) __declspec(dllexport) type
+#define STATUS_DECLARE_DATA __declspec(dllexport)
+#else
+#define STATUS_DECLARE(type) __declspec(dllimport) type __stdcall
+#define STATUS_DECLARE_NONSTD(type) __declspec(dllimport) type
+#define STATUS_DECLARE_DATA __declspec(dllimport)
+#endif
+
+/* Optional hooks which can insert extra content into the mod_status
+ * output. FLAGS will be set to the bitwise OR of any of the
+ * AP_STATUS_* flags.
+ *
+ * Implementations of this hook should generate content using
+ * functions in the ap_rputs/ap_rprintf family; each hook should
+ * return OK or DECLINED. */
+APR_DECLARE_EXTERNAL_HOOK(ap, STATUS, int, status_hook,
+ (request_rec *r, int flags))
+#endif
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_status.la b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.la
new file mode 100644
index 00000000..fc5f7173
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.la
@@ -0,0 +1,35 @@
+# mod_status.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_status.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_status.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_status.lo b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.lo
new file mode 100644
index 00000000..a740c8dd
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.lo
@@ -0,0 +1,12 @@
+# mod_status.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_status.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_status.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_status.o b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.o
new file mode 100644
index 00000000..107ccfef
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_status.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_suexec.c b/rubbos/app/httpd-2.0.64/modules/generators/mod_suexec.c
new file mode 100644
index 00000000..308c3520
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_suexec.c
@@ -0,0 +1,138 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define CORE_PRIVATE
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_request.h"
+#include "apr_strings.h"
+#include "unixd.h"
+#include "mpm_common.h"
+#include "mod_suexec.h"
+
+module AP_MODULE_DECLARE_DATA suexec_module;
+
+/*
+ * Create a configuration specific to this module for a server or directory
+ * location, and fill it with the default settings.
+ */
+static void *mkconfig(apr_pool_t *p)
+{
+ suexec_config_t *cfg = apr_palloc(p, sizeof(suexec_config_t));
+
+ cfg->active = 0;
+ return cfg;
+}
+
+/*
+ * Respond to a callback to create configuration record for a server or
+ * vhost environment.
+ */
+static void *create_mconfig_for_server(apr_pool_t *p, server_rec *s)
+{
+ return mkconfig(p);
+}
+
+/*
+ * Respond to a callback to create a config record for a specific directory.
+ */
+static void *create_mconfig_for_directory(apr_pool_t *p, char *dir)
+{
+ return mkconfig(p);
+}
+
+static const char *set_suexec_ugid(cmd_parms *cmd, void *mconfig,
+ const char *uid, const char *gid)
+{
+ suexec_config_t *cfg = (suexec_config_t *) mconfig;
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_LOC_FILE|NOT_IN_LIMIT);
+
+ if (err != NULL) {
+ return err;
+ }
+ if (unixd_config.suexec_enabled) {
+ cfg->ugid.uid = ap_uname2id(uid);
+ cfg->ugid.gid = ap_gname2id(gid);
+ cfg->ugid.userdir = 0;
+ cfg->active = 1;
+ }
+ else {
+ fprintf(stderr,
+ "Warning: SuexecUserGroup directive requires SUEXEC wrapper.\n");
+ }
+ return NULL;
+}
+
+static ap_unix_identity_t *get_suexec_id_doer(const request_rec *r)
+{
+ suexec_config_t *cfg =
+ (suexec_config_t *) ap_get_module_config(r->per_dir_config, &suexec_module);
+
+ return cfg->active ? &cfg->ugid : NULL;
+}
+
+#define SUEXEC_POST_CONFIG_USERDATA "suexec_post_config_userdata"
+static int suexec_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ void *reported;
+
+ apr_pool_userdata_get(&reported, SUEXEC_POST_CONFIG_USERDATA,
+ s->process->pool);
+
+ if ((reported == NULL) && unixd_config.suexec_enabled) {
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s,
+ "suEXEC mechanism enabled (wrapper: %s)", SUEXEC_BIN);
+
+ apr_pool_userdata_set((void *)1, SUEXEC_POST_CONFIG_USERDATA,
+ apr_pool_cleanup_null, s->process->pool);
+ }
+
+ return OK;
+}
+#undef SUEXEC_POST_CONFIG_USERDATA
+
+/*
+ * Define the directives specific to this module. This structure is referenced
+ * later by the 'module' structure.
+ */
+static const command_rec suexec_cmds[] =
+{
+ /* XXX - Another important reason not to allow this in .htaccess is that
+ * the ap_[ug]name2id() is not thread-safe */
+ AP_INIT_TAKE2("SuexecUserGroup", set_suexec_ugid, NULL, RSRC_CONF,
+ "User and group for spawned processes"),
+ { NULL }
+};
+
+static void suexec_hooks(apr_pool_t *p)
+{
+ ap_hook_get_suexec_identity(get_suexec_id_doer,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_post_config(suexec_post_config,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA suexec_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_mconfig_for_directory, /* create per-dir config */
+ NULL, /* merge per-dir config */
+ create_mconfig_for_server, /* server config */
+ NULL, /* merge server config */
+ suexec_cmds, /* command table */
+ suexec_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/mod_suexec.h b/rubbos/app/httpd-2.0.64/modules/generators/mod_suexec.h
new file mode 100644
index 00000000..6adc9ec4
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/mod_suexec.h
@@ -0,0 +1,23 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "unixd.h"
+
+typedef struct {
+ ap_unix_identity_t ugid;
+ int active;
+} suexec_config_t;
+
diff --git a/rubbos/app/httpd-2.0.64/modules/generators/modules.mk b/rubbos/app/httpd-2.0.64/modules/generators/modules.mk
new file mode 100644
index 00000000..5c4be7c6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/generators/modules.mk
@@ -0,0 +1,11 @@
+mod_status.la: mod_status.lo
+ $(MOD_LINK) mod_status.lo $(MOD_STATUS_LDADD)
+mod_autoindex.la: mod_autoindex.lo
+ $(MOD_LINK) mod_autoindex.lo $(MOD_AUTOINDEX_LDADD)
+mod_asis.la: mod_asis.lo
+ $(MOD_LINK) mod_asis.lo $(MOD_ASIS_LDADD)
+mod_cgid.la: mod_cgid.lo
+ $(MOD_LINK) mod_cgid.lo $(MOD_CGID_LDADD)
+DISTCLEAN_TARGETS = modules.mk
+static = mod_status.la mod_autoindex.la mod_asis.la mod_cgid.la
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/http/.deps b/rubbos/app/httpd-2.0.64/modules/http/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/http/.indent.pro b/rubbos/app/httpd-2.0.64/modules/http/.indent.pro
new file mode 100644
index 00000000..a9fbe9f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/rubbos/app/httpd-2.0.64/modules/http/.libs/http_core.o b/rubbos/app/httpd-2.0.64/modules/http/.libs/http_core.o
new file mode 100644
index 00000000..2be9da12
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/.libs/http_core.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/http/.libs/http_protocol.o b/rubbos/app/httpd-2.0.64/modules/http/.libs/http_protocol.o
new file mode 100644
index 00000000..6c2d6acf
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/.libs/http_protocol.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/http/.libs/http_request.o b/rubbos/app/httpd-2.0.64/modules/http/.libs/http_request.o
new file mode 100644
index 00000000..c1a20105
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/.libs/http_request.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_http.a b/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_http.a
new file mode 100644
index 00000000..881fa056
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_http.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_http.la b/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_http.la
new file mode 100644
index 00000000..4f24a965
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_http.la
@@ -0,0 +1,35 @@
+# mod_http.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_http.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_http.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_mime.a b/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_mime.a
new file mode 100644
index 00000000..d798f81b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_mime.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_mime.la b/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_mime.la
new file mode 100644
index 00000000..854bb02d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_mime.la
@@ -0,0 +1,35 @@
+# mod_mime.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_mime.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_mime.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_mime.o b/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_mime.o
new file mode 100644
index 00000000..dae6c77b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/.libs/mod_mime.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/http/Makefile b/rubbos/app/httpd-2.0.64/modules/http/Makefile
new file mode 100644
index 00000000..fc341312
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/http
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/http
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/http
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/http/Makefile.in b/rubbos/app/httpd-2.0.64/modules/http/Makefile.in
new file mode 100644
index 00000000..167b343d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/Makefile.in
@@ -0,0 +1,3 @@
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/http/config2.m4 b/rubbos/app/httpd-2.0.64/modules/http/config2.m4
new file mode 100644
index 00000000..ecb7e6e5
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/config2.m4
@@ -0,0 +1,20 @@
+dnl modules enabled in this directory by default
+
+APACHE_MODPATH_INIT(http)
+
+http_objects="http_core.lo http_protocol.lo http_request.lo"
+
+dnl mod_http should only be built as a static module for now.
+dnl this will hopefully be "fixed" at some point in the future by
+dnl refactoring mod_http and moving some things to the core and
+dnl vice versa so that the core does not depend upon mod_http.
+if test "$enable_http" = "yes"; then
+ enable_http="static"
+elif test "$enable_http" = "shared"; then
+ AC_MSG_ERROR([mod_http can not be built as a shared DSO])
+fi
+
+APACHE_MODULE(http, HTTP protocol handling, $http_objects, , static)
+APACHE_MODULE(mime, mapping of file-extension to MIME, , , yes)
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/http/http_core.c b/rubbos/app/httpd-2.0.64/modules/http/http_core.c
new file mode 100644
index 00000000..c214bc46
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/http_core.c
@@ -0,0 +1,322 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#define CORE_PRIVATE
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_core.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+
+#include "util_filter.h"
+#include "util_ebcdic.h"
+#include "ap_mpm.h"
+#include "scoreboard.h"
+
+#include "mod_core.h"
+
+/* Handles for core filters */
+AP_DECLARE_DATA ap_filter_rec_t *ap_http_input_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_http_header_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_chunk_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_byterange_filter_handle;
+
+static const char *set_keep_alive_timeout(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_LOC_FILE|NOT_IN_LIMIT);
+ if (err != NULL) {
+ return err;
+ }
+
+ cmd->server->keep_alive_timeout = apr_time_from_sec(atoi(arg));
+ return NULL;
+}
+
+static const char *set_keep_alive(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_LOC_FILE|NOT_IN_LIMIT);
+ if (err != NULL) {
+ return err;
+ }
+
+ /* We've changed it to On/Off, but used to use numbers
+ * so we accept anything but "Off" or "0" as "On"
+ */
+ if (!strcasecmp(arg, "off") || !strcmp(arg, "0")) {
+ cmd->server->keep_alive = 0;
+ }
+ else {
+ cmd->server->keep_alive = 1;
+ }
+ return NULL;
+}
+
+static const char *set_keep_alive_max(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_LOC_FILE|NOT_IN_LIMIT);
+ if (err != NULL) {
+ return err;
+ }
+
+ cmd->server->keep_alive_max = atoi(arg);
+ return NULL;
+}
+
+static const command_rec http_cmds[] = {
+ AP_INIT_TAKE1("KeepAliveTimeout", set_keep_alive_timeout, NULL, RSRC_CONF,
+ "Keep-Alive timeout duration (sec)"),
+ AP_INIT_TAKE1("MaxKeepAliveRequests", set_keep_alive_max, NULL, RSRC_CONF,
+ "Maximum number of Keep-Alive requests per connection, or 0 for infinite"),
+ AP_INIT_TAKE1("KeepAlive", set_keep_alive, NULL, RSRC_CONF,
+ "Whether persistent connections should be On or Off"),
+ { NULL }
+};
+
+/*
+ * HTTP/1.1 chunked transfer encoding filter.
+ */
+static apr_status_t chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
+{
+#define ASCII_CRLF "\015\012"
+#define ASCII_ZERO "\060"
+ conn_rec *c = f->r->connection;
+ apr_bucket_brigade *more;
+ apr_bucket *e;
+ apr_status_t rv;
+
+ for (more = NULL; b; b = more, more = NULL) {
+ apr_off_t bytes = 0;
+ apr_bucket *eos = NULL;
+ apr_bucket *flush = NULL;
+ /* XXX: chunk_hdr must remain at this scope since it is used in a
+ * transient bucket.
+ */
+ char chunk_hdr[20]; /* enough space for the snprintf below */
+
+ APR_BRIGADE_FOREACH(e, b) {
+ if (APR_BUCKET_IS_EOS(e)) {
+ /* there shouldn't be anything after the eos */
+ eos = e;
+ break;
+ }
+ if (APR_BUCKET_IS_FLUSH(e)) {
+ flush = e;
+ more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
+ break;
+ }
+ else if (e->length == (apr_size_t)-1) {
+ /* unknown amount of data (e.g. a pipe) */
+ const char *data;
+ apr_size_t len;
+
+ rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ if (len > 0) {
+ /*
+ * There may be a new next bucket representing the
+ * rest of the data stream on which a read() may
+ * block so we pass down what we have so far.
+ */
+ bytes += len;
+ more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
+ break;
+ }
+ else {
+ /* If there was nothing in this bucket then we can
+ * safely move on to the next one without pausing
+ * to pass down what we have counted up so far.
+ */
+ continue;
+ }
+ }
+ else {
+ bytes += e->length;
+ }
+ }
+
+ /*
+ * XXX: if there aren't very many bytes at this point it may
+ * be a good idea to set them aside and return for more,
+ * unless we haven't finished counting this brigade yet.
+ */
+ /* if there are content bytes, then wrap them in a chunk */
+ if (bytes > 0) {
+ apr_size_t hdr_len;
+ /*
+ * Insert the chunk header, specifying the number of bytes in
+ * the chunk.
+ */
+ /* XXX might be nice to have APR_OFF_T_FMT_HEX */
+ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr),
+ "%qx" CRLF, (apr_uint64_t)bytes);
+ ap_xlate_proto_to_ascii(chunk_hdr, hdr_len);
+ e = apr_bucket_transient_create(chunk_hdr, hdr_len,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(b, e);
+
+ /*
+ * Insert the end-of-chunk CRLF before an EOS or
+ * FLUSH bucket, or appended to the brigade
+ */
+ e = apr_bucket_immortal_create(ASCII_CRLF, 2, c->bucket_alloc);
+ if (eos != NULL) {
+ APR_BUCKET_INSERT_BEFORE(eos, e);
+ }
+ else if (flush != NULL) {
+ APR_BUCKET_INSERT_BEFORE(flush, e);
+ }
+ else {
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ }
+ }
+
+ /* RFC 2616, Section 3.6.1
+ *
+ * If there is an EOS bucket, then prefix it with:
+ * 1) the last-chunk marker ("0" CRLF)
+ * 2) the trailer
+ * 3) the end-of-chunked body CRLF
+ *
+ * If there is no EOS bucket, then do nothing.
+ *
+ * XXX: it would be nice to combine this with the end-of-chunk
+ * marker above, but this is a bit more straight-forward for
+ * now.
+ */
+ if (eos != NULL) {
+ /* XXX: (2) trailers ... does not yet exist */
+ e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF
+ /* <trailers> */
+ ASCII_CRLF, 5, c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(eos, e);
+ }
+
+ /* pass the brigade to the next filter. */
+ rv = ap_pass_brigade(f->next, b);
+ if (rv != APR_SUCCESS || eos != NULL) {
+ return rv;
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static const char *http_method(const request_rec *r)
+ { return "http"; }
+
+static apr_port_t http_port(const request_rec *r)
+ { return DEFAULT_HTTP_PORT; }
+
+static int ap_process_http_connection(conn_rec *c)
+{
+ request_rec *r;
+ int csd_set = 0;
+ apr_socket_t *csd = NULL;
+
+ /*
+ * Read and process each request found on our connection
+ * until no requests are left or we decide to close.
+ */
+
+ ap_update_child_status(c->sbh, SERVER_BUSY_READ, NULL);
+ while ((r = ap_read_request(c)) != NULL) {
+
+ c->keepalive = AP_CONN_UNKNOWN;
+ /* process the request if it was read without error */
+
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
+ if (r->status == HTTP_OK)
+ ap_process_request(r);
+
+ if (ap_extended_status)
+ ap_increment_counts(c->sbh, r);
+
+ if (c->keepalive != AP_CONN_KEEPALIVE || c->aborted)
+ break;
+
+ ap_update_child_status(c->sbh, SERVER_BUSY_KEEPALIVE, r);
+ apr_pool_destroy(r->pool);
+
+ if (ap_graceful_stop_signalled())
+ break;
+ /* Go straight to select() to wait for the next request */
+ if (!csd_set) {
+ csd = ap_get_module_config(c->conn_config, &core_module);
+ csd_set = 1;
+ }
+ apr_socket_opt_set(csd, APR_INCOMPLETE_READ, 1);
+ }
+
+ return OK;
+}
+
+static int http_create_request(request_rec *r)
+{
+ if (!r->main && !r->prev) {
+ ap_add_output_filter_handle(ap_byterange_filter_handle,
+ NULL, r, r->connection);
+ ap_add_output_filter_handle(ap_content_length_filter_handle,
+ NULL, r, r->connection);
+ ap_add_output_filter_handle(ap_http_header_filter_handle,
+ NULL, r, r->connection);
+ }
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_process_connection(ap_process_http_connection,NULL,NULL,
+ APR_HOOK_REALLY_LAST);
+ ap_hook_map_to_storage(ap_send_http_trace,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_http_method(http_method,NULL,NULL,APR_HOOK_REALLY_LAST);
+ ap_hook_default_port(http_port,NULL,NULL,APR_HOOK_REALLY_LAST);
+ ap_hook_create_request(http_create_request, NULL, NULL, APR_HOOK_REALLY_LAST);
+ ap_http_input_filter_handle =
+ ap_register_input_filter("HTTP_IN", ap_http_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_http_header_filter_handle =
+ ap_register_output_filter("HTTP_HEADER", ap_http_header_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_chunk_filter_handle =
+ ap_register_output_filter("CHUNK", chunk_filter,
+ NULL, AP_FTYPE_TRANSCODE);
+ ap_byterange_filter_handle =
+ ap_register_output_filter("BYTERANGE", ap_byterange_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_method_registry_init(p);
+}
+
+module AP_MODULE_DECLARE_DATA http_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ http_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/http/http_core.lo b/rubbos/app/httpd-2.0.64/modules/http/http_core.lo
new file mode 100644
index 00000000..3ab9338f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/http_core.lo
@@ -0,0 +1,12 @@
+# http_core.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/http_core.o'
+
+# Name of the non-PIC object.
+non_pic_object='http_core.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/http/http_core.o b/rubbos/app/httpd-2.0.64/modules/http/http_core.o
new file mode 100644
index 00000000..2be9da12
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/http_core.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/http/http_protocol.c b/rubbos/app/httpd-2.0.64/modules/http/http_protocol.c
new file mode 100644
index 00000000..163a9091
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/http_protocol.c
@@ -0,0 +1,3212 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_protocol.c --- routines which directly communicate with the client.
+ *
+ * Code originally by Rob McCool; much redone by Robert S. Thau
+ * and the Apache Software Foundation.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STDIO /* for sscanf */
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#define CORE_PRIVATE
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_log.h" /* For errors detected in basic auth common
+ * support code... */
+#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
+#include "util_charset.h"
+#include "util_ebcdic.h"
+#include "util_time.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+/* New Apache routine to map status codes into array indicies
+ * e.g. 100 -> 0, 101 -> 1, 200 -> 2 ...
+ * The number of status lines must equal the value of RESPONSE_CODES (httpd.h)
+ * and must be listed in order.
+ */
+
+#ifdef UTS21
+/* The second const triggers an assembler bug on UTS 2.1.
+ * Another workaround is to move some code out of this file into another,
+ * but this is easier. Dave Dykstra, 3/31/99
+ */
+static const char * status_lines[RESPONSE_CODES] =
+#else
+static const char * const status_lines[RESPONSE_CODES] =
+#endif
+{
+ "100 Continue",
+ "101 Switching Protocols",
+ "102 Processing",
+#define LEVEL_200 3
+ "200 OK",
+ "201 Created",
+ "202 Accepted",
+ "203 Non-Authoritative Information",
+ "204 No Content",
+ "205 Reset Content",
+ "206 Partial Content",
+ "207 Multi-Status",
+#define LEVEL_300 11
+ "300 Multiple Choices",
+ "301 Moved Permanently",
+ "302 Found",
+ "303 See Other",
+ "304 Not Modified",
+ "305 Use Proxy",
+ "306 unused",
+ "307 Temporary Redirect",
+#define LEVEL_400 19
+ "400 Bad Request",
+ "401 Authorization Required",
+ "402 Payment Required",
+ "403 Forbidden",
+ "404 Not Found",
+ "405 Method Not Allowed",
+ "406 Not Acceptable",
+ "407 Proxy Authentication Required",
+ "408 Request Time-out",
+ "409 Conflict",
+ "410 Gone",
+ "411 Length Required",
+ "412 Precondition Failed",
+ "413 Request Entity Too Large",
+ "414 Request-URI Too Large",
+ "415 Unsupported Media Type",
+ "416 Requested Range Not Satisfiable",
+ "417 Expectation Failed",
+ "418 unused",
+ "419 unused",
+ "420 unused",
+ "421 unused",
+ "422 Unprocessable Entity",
+ "423 Locked",
+ "424 Failed Dependency",
+ /* This is a hack, but it is required for ap_index_of_response
+ * to work with 426.
+ */
+ "425 No code",
+ "426 Upgrade Required",
+#define LEVEL_500 46
+ "500 Internal Server Error",
+ "501 Method Not Implemented",
+ "502 Bad Gateway",
+ "503 Service Temporarily Unavailable",
+ "504 Gateway Time-out",
+ "505 HTTP Version Not Supported",
+ "506 Variant Also Negotiates",
+ "507 Insufficient Storage",
+ "508 unused",
+ "509 unused",
+ "510 Not Extended"
+};
+
+APR_HOOK_STRUCT(
+ APR_HOOK_LINK(insert_error_filter)
+)
+
+AP_IMPLEMENT_HOOK_VOID(insert_error_filter, (request_rec *r), (r))
+
+/* The index of the first bit field that is used to index into a limit
+ * bitmask. M_INVALID + 1 to METHOD_NUMBER_LAST.
+ */
+#define METHOD_NUMBER_FIRST (M_INVALID + 1)
+
+/* The max method number. Method numbers are used to shift bitmasks,
+ * so this cannot exceed 63, and all bits high is equal to -1, which is a
+ * special flag, so the last bit used has index 62.
+ */
+#define METHOD_NUMBER_LAST 62
+
+
+AP_DECLARE(int) ap_set_keepalive(request_rec *r)
+{
+ int ka_sent = 0;
+ int wimpy = ap_find_token(r->pool,
+ apr_table_get(r->headers_out, "Connection"),
+ "close");
+ const char *conn = apr_table_get(r->headers_in, "Connection");
+
+ /* The following convoluted conditional determines whether or not
+ * the current connection should remain persistent after this response
+ * (a.k.a. HTTP Keep-Alive) and whether or not the output message
+ * body should use the HTTP/1.1 chunked transfer-coding. In English,
+ *
+ * IF we have not marked this connection as errored;
+ * and the response body has a defined length due to the status code
+ * being 304 or 204, the request method being HEAD, already
+ * having defined Content-Length or Transfer-Encoding: chunked, or
+ * the request version being HTTP/1.1 and thus capable of being set
+ * as chunked [we know the (r->chunked = 1) side-effect is ugly];
+ * and the server configuration enables keep-alive;
+ * and the server configuration has a reasonable inter-request timeout;
+ * and there is no maximum # requests or the max hasn't been reached;
+ * and the response status does not require a close;
+ * and the response generator has not already indicated close;
+ * and the client did not request non-persistence (Connection: close);
+ * and we haven't been configured to ignore the buggy twit
+ * or they're a buggy twit coming through a HTTP/1.1 proxy
+ * and the client is requesting an HTTP/1.0-style keep-alive
+ * or the client claims to be HTTP/1.1 compliant (perhaps a proxy);
+ * THEN we can be persistent, which requires more headers be output.
+ *
+ * Note that the condition evaluation order is extremely important.
+ */
+ if ((r->connection->keepalive != AP_CONN_CLOSE)
+ && ((r->status == HTTP_NOT_MODIFIED)
+ || (r->status == HTTP_NO_CONTENT)
+ || r->header_only
+ || apr_table_get(r->headers_out, "Content-Length")
+ || ap_find_last_token(r->pool,
+ apr_table_get(r->headers_out,
+ "Transfer-Encoding"),
+ "chunked")
+ || ((r->proto_num >= HTTP_VERSION(1,1))
+ && (r->chunked = 1))) /* THIS CODE IS CORRECT, see above. */
+ && r->server->keep_alive
+ && (r->server->keep_alive_timeout > 0)
+ && ((r->server->keep_alive_max == 0)
+ || (r->server->keep_alive_max > r->connection->keepalives))
+ && !ap_status_drops_connection(r->status)
+ && !wimpy
+ && !ap_find_token(r->pool, conn, "close")
+ && (!apr_table_get(r->subprocess_env, "nokeepalive")
+ || apr_table_get(r->headers_in, "Via"))
+ && ((ka_sent = ap_find_token(r->pool, conn, "keep-alive"))
+ || (r->proto_num >= HTTP_VERSION(1,1)))) {
+ int left = r->server->keep_alive_max - r->connection->keepalives;
+
+ r->connection->keepalive = AP_CONN_KEEPALIVE;
+ r->connection->keepalives++;
+
+ /* If they sent a Keep-Alive token, send one back */
+ if (ka_sent) {
+ if (r->server->keep_alive_max) {
+ apr_table_setn(r->headers_out, "Keep-Alive",
+ apr_psprintf(r->pool, "timeout=%d, max=%d",
+ (int)apr_time_sec(r->server->keep_alive_timeout),
+ left));
+ }
+ else {
+ apr_table_setn(r->headers_out, "Keep-Alive",
+ apr_psprintf(r->pool, "timeout=%d",
+ (int)apr_time_sec(r->server->keep_alive_timeout)));
+ }
+ apr_table_mergen(r->headers_out, "Connection", "Keep-Alive");
+ }
+
+ return 1;
+ }
+
+ /* Otherwise, we need to indicate that we will be closing this
+ * connection immediately after the current response.
+ *
+ * We only really need to send "close" to HTTP/1.1 clients, but we
+ * always send it anyway, because a broken proxy may identify itself
+ * as HTTP/1.0, but pass our request along with our HTTP/1.1 tag
+ * to a HTTP/1.1 client. Better safe than sorry.
+ */
+ if (!wimpy) {
+ apr_table_mergen(r->headers_out, "Connection", "close");
+ }
+
+ r->connection->keepalive = AP_CONN_CLOSE;
+
+ return 0;
+}
+
+AP_DECLARE(int) ap_meets_conditions(request_rec *r)
+{
+ const char *etag;
+ const char *if_match, *if_modified_since, *if_unmodified, *if_nonematch;
+ apr_time_t tmp_time;
+ apr_int64_t mtime;
+
+ /* Check for conditional requests --- note that we only want to do
+ * this if we are successful so far and we are not processing a
+ * subrequest or an ErrorDocument.
+ *
+ * The order of the checks is important, since ETag checks are supposed
+ * to be more accurate than checks relative to the modification time.
+ * However, not all documents are guaranteed to *have* ETags, and some
+ * might have Last-Modified values w/o ETags, so this gets a little
+ * complicated.
+ */
+
+ if (!ap_is_HTTP_SUCCESS(r->status) || r->no_local_copy) {
+ return OK;
+ }
+
+ etag = apr_table_get(r->headers_out, "ETag");
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+ /* XXX: we should define a "time unset" constant */
+ tmp_time = ((r->mtime != 0) ? r->mtime : apr_time_now());
+ mtime = apr_time_sec(tmp_time);
+
+ /* If an If-Match request-header field was given
+ * AND the field value is not "*" (meaning match anything)
+ * AND if our strong ETag does not match any entity tag in that field,
+ * respond with a status of 412 (Precondition Failed).
+ */
+ if ((if_match = apr_table_get(r->headers_in, "If-Match")) != NULL) {
+ if (if_match[0] != '*'
+ && (etag == NULL || etag[0] == 'W'
+ || !ap_find_list_item(r->pool, if_match, etag))) {
+ return HTTP_PRECONDITION_FAILED;
+ }
+ }
+ else {
+ /* Else if a valid If-Unmodified-Since request-header field was given
+ * AND the requested resource has been modified since the time
+ * specified in this field, then the server MUST
+ * respond with a status of 412 (Precondition Failed).
+ */
+ if_unmodified = apr_table_get(r->headers_in, "If-Unmodified-Since");
+ if (if_unmodified != NULL) {
+ apr_time_t ius = apr_date_parse_http(if_unmodified);
+
+ if ((ius != APR_DATE_BAD) && (mtime > apr_time_sec(ius))) {
+ return HTTP_PRECONDITION_FAILED;
+ }
+ }
+ }
+
+ /* If an If-None-Match request-header field was given
+ * AND the field value is "*" (meaning match anything)
+ * OR our ETag matches any of the entity tags in that field, fail.
+ *
+ * If the request method was GET or HEAD, failure means the server
+ * SHOULD respond with a 304 (Not Modified) response.
+ * For all other request methods, failure means the server MUST
+ * respond with a status of 412 (Precondition Failed).
+ *
+ * GET or HEAD allow weak etag comparison, all other methods require
+ * strong comparison. We can only use weak if it's not a range request.
+ */
+ if_nonematch = apr_table_get(r->headers_in, "If-None-Match");
+ if (if_nonematch != NULL) {
+ if (r->method_number == M_GET) {
+ if (if_nonematch[0] == '*') {
+ return HTTP_NOT_MODIFIED;
+ }
+ if (etag != NULL) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ if (etag[0] != 'W'
+ && ap_find_list_item(r->pool, if_nonematch, etag)) {
+ return HTTP_NOT_MODIFIED;
+ }
+ }
+ else if (ap_strstr_c(if_nonematch, etag)) {
+ return HTTP_NOT_MODIFIED;
+ }
+ }
+ }
+ else if (if_nonematch[0] == '*'
+ || (etag != NULL
+ && ap_find_list_item(r->pool, if_nonematch, etag))) {
+ return HTTP_PRECONDITION_FAILED;
+ }
+ }
+ /* Else if a valid If-Modified-Since request-header field was given
+ * AND it is a GET or HEAD request
+ * AND the requested resource has not been modified since the time
+ * specified in this field, then the server MUST
+ * respond with a status of 304 (Not Modified).
+ * A date later than the server's current request time is invalid.
+ */
+ else if ((r->method_number == M_GET)
+ && ((if_modified_since =
+ apr_table_get(r->headers_in,
+ "If-Modified-Since")) != NULL)) {
+ apr_time_t ims_time;
+ apr_int64_t ims, reqtime;
+
+ ims_time = apr_date_parse_http(if_modified_since);
+ ims = apr_time_sec(ims_time);
+ reqtime = apr_time_sec(r->request_time);
+
+ if ((ims >= mtime) && (ims <= reqtime)) {
+ return HTTP_NOT_MODIFIED;
+ }
+ }
+ return OK;
+}
+
+/**
+ * Singleton registry of additional methods. This maps new method names
+ * such as "MYGET" to methnums, which are int offsets into bitmasks.
+ *
+ * This follows the same technique as standard M_GET, M_POST, etc. These
+ * are dynamically assigned when modules are loaded and <Limit GET MYGET>
+ * directives are processed.
+ */
+static apr_hash_t *methods_registry = NULL;
+static int cur_method_number = METHOD_NUMBER_FIRST;
+
+/* internal function to register one method/number pair */
+static void register_one_method(apr_pool_t *p, const char *methname,
+ int methnum)
+{
+ int *pnum = apr_palloc(p, sizeof(*pnum));
+
+ *pnum = methnum;
+ apr_hash_set(methods_registry, methname, APR_HASH_KEY_STRING, pnum);
+}
+
+/* This internal function is used to clear the method registry
+ * and reset the cur_method_number counter.
+ */
+static apr_status_t ap_method_registry_destroy(void *notused)
+{
+ methods_registry = NULL;
+ cur_method_number = METHOD_NUMBER_FIRST;
+ return APR_SUCCESS;
+}
+
+AP_DECLARE(void) ap_method_registry_init(apr_pool_t *p)
+{
+ methods_registry = apr_hash_make(p);
+ apr_pool_cleanup_register(p, NULL,
+ ap_method_registry_destroy,
+ apr_pool_cleanup_null);
+
+ /* put all the standard methods into the registry hash to ease the
+ mapping operations between name and number */
+ register_one_method(p, "GET", M_GET);
+ register_one_method(p, "PUT", M_PUT);
+ register_one_method(p, "POST", M_POST);
+ register_one_method(p, "DELETE", M_DELETE);
+ register_one_method(p, "CONNECT", M_CONNECT);
+ register_one_method(p, "OPTIONS", M_OPTIONS);
+ register_one_method(p, "TRACE", M_TRACE);
+ register_one_method(p, "PATCH", M_PATCH);
+ register_one_method(p, "PROPFIND", M_PROPFIND);
+ register_one_method(p, "PROPPATCH", M_PROPPATCH);
+ register_one_method(p, "MKCOL", M_MKCOL);
+ register_one_method(p, "COPY", M_COPY);
+ register_one_method(p, "MOVE", M_MOVE);
+ register_one_method(p, "LOCK", M_LOCK);
+ register_one_method(p, "UNLOCK", M_UNLOCK);
+ register_one_method(p, "VERSION-CONTROL", M_VERSION_CONTROL);
+ register_one_method(p, "CHECKOUT", M_CHECKOUT);
+ register_one_method(p, "UNCHECKOUT", M_UNCHECKOUT);
+ register_one_method(p, "CHECKIN", M_CHECKIN);
+ register_one_method(p, "UPDATE", M_UPDATE);
+ register_one_method(p, "LABEL", M_LABEL);
+ register_one_method(p, "REPORT", M_REPORT);
+ register_one_method(p, "MKWORKSPACE", M_MKWORKSPACE);
+ register_one_method(p, "MKACTIVITY", M_MKACTIVITY);
+ register_one_method(p, "BASELINE-CONTROL", M_BASELINE_CONTROL);
+ register_one_method(p, "MERGE", M_MERGE);
+}
+
+AP_DECLARE(int) ap_method_register(apr_pool_t *p, const char *methname)
+{
+ int *methnum;
+
+ if (methods_registry == NULL) {
+ ap_method_registry_init(p);
+ }
+
+ if (methname == NULL) {
+ return M_INVALID;
+ }
+
+ /* Check if the method was previously registered. If it was
+ * return the associated method number.
+ */
+ methnum = (int *)apr_hash_get(methods_registry, methname,
+ APR_HASH_KEY_STRING);
+ if (methnum != NULL)
+ return *methnum;
+
+ if (cur_method_number > METHOD_NUMBER_LAST) {
+ /* The method registry has run out of dynamically
+ * assignable method numbers. Log this and return M_INVALID.
+ */
+ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, p,
+ "Maximum new request methods %d reached while "
+ "registering method %s.",
+ METHOD_NUMBER_LAST, methname);
+ return M_INVALID;
+ }
+
+ register_one_method(p, methname, cur_method_number);
+ return cur_method_number++;
+}
+
+#define UNKNOWN_METHOD (-1)
+
+static int lookup_builtin_method(const char *method, apr_size_t len)
+{
+ /* Note: the following code was generated by the "shilka" tool from
+ the "cocom" parsing/compilation toolkit. It is an optimized lookup
+ based on analysis of the input keywords. Postprocessing was done
+ on the shilka output, but the basic structure and analysis is
+ from there. Should new HTTP methods be added, then manual insertion
+ into this code is fine, or simply re-running the shilka tool on
+ the appropriate input. */
+
+ /* Note: it is also quite reasonable to just use our method_registry,
+ but I'm assuming (probably incorrectly) we want more speed here
+ (based on the optimizations the previous code was doing). */
+
+ switch (len)
+ {
+ case 3:
+ switch (method[0])
+ {
+ case 'P':
+ return (method[1] == 'U'
+ && method[2] == 'T'
+ ? M_PUT : UNKNOWN_METHOD);
+ case 'G':
+ return (method[1] == 'E'
+ && method[2] == 'T'
+ ? M_GET : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 4:
+ switch (method[0])
+ {
+ case 'H':
+ return (method[1] == 'E'
+ && method[2] == 'A'
+ && method[3] == 'D'
+ ? M_GET : UNKNOWN_METHOD);
+ case 'P':
+ return (method[1] == 'O'
+ && method[2] == 'S'
+ && method[3] == 'T'
+ ? M_POST : UNKNOWN_METHOD);
+ case 'M':
+ return (method[1] == 'O'
+ && method[2] == 'V'
+ && method[3] == 'E'
+ ? M_MOVE : UNKNOWN_METHOD);
+ case 'L':
+ return (method[1] == 'O'
+ && method[2] == 'C'
+ && method[3] == 'K'
+ ? M_LOCK : UNKNOWN_METHOD);
+ case 'C':
+ return (method[1] == 'O'
+ && method[2] == 'P'
+ && method[3] == 'Y'
+ ? M_COPY : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 5:
+ switch (method[2])
+ {
+ case 'T':
+ return (memcmp(method, "PATCH", 5) == 0
+ ? M_PATCH : UNKNOWN_METHOD);
+ case 'R':
+ return (memcmp(method, "MERGE", 5) == 0
+ ? M_MERGE : UNKNOWN_METHOD);
+ case 'C':
+ return (memcmp(method, "MKCOL", 5) == 0
+ ? M_MKCOL : UNKNOWN_METHOD);
+ case 'B':
+ return (memcmp(method, "LABEL", 5) == 0
+ ? M_LABEL : UNKNOWN_METHOD);
+ case 'A':
+ return (memcmp(method, "TRACE", 5) == 0
+ ? M_TRACE : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 6:
+ switch (method[0])
+ {
+ case 'U':
+ switch (method[5])
+ {
+ case 'K':
+ return (memcmp(method, "UNLOCK", 6) == 0
+ ? M_UNLOCK : UNKNOWN_METHOD);
+ case 'E':
+ return (memcmp(method, "UPDATE", 6) == 0
+ ? M_UPDATE : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+ case 'R':
+ return (memcmp(method, "REPORT", 6) == 0
+ ? M_REPORT : UNKNOWN_METHOD);
+ case 'D':
+ return (memcmp(method, "DELETE", 6) == 0
+ ? M_DELETE : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 7:
+ switch (method[1])
+ {
+ case 'P':
+ return (memcmp(method, "OPTIONS", 7) == 0
+ ? M_OPTIONS : UNKNOWN_METHOD);
+ case 'O':
+ return (memcmp(method, "CONNECT", 7) == 0
+ ? M_CONNECT : UNKNOWN_METHOD);
+ case 'H':
+ return (memcmp(method, "CHECKIN", 7) == 0
+ ? M_CHECKIN : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 8:
+ switch (method[0])
+ {
+ case 'P':
+ return (memcmp(method, "PROPFIND", 8) == 0
+ ? M_PROPFIND : UNKNOWN_METHOD);
+ case 'C':
+ return (memcmp(method, "CHECKOUT", 8) == 0
+ ? M_CHECKOUT : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 9:
+ return (memcmp(method, "PROPPATCH", 9) == 0
+ ? M_PROPPATCH : UNKNOWN_METHOD);
+
+ case 10:
+ switch (method[0])
+ {
+ case 'U':
+ return (memcmp(method, "UNCHECKOUT", 10) == 0
+ ? M_UNCHECKOUT : UNKNOWN_METHOD);
+ case 'M':
+ return (memcmp(method, "MKACTIVITY", 10) == 0
+ ? M_MKACTIVITY : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 11:
+ return (memcmp(method, "MKWORKSPACE", 11) == 0
+ ? M_MKWORKSPACE : UNKNOWN_METHOD);
+
+ case 15:
+ return (memcmp(method, "VERSION-CONTROL", 15) == 0
+ ? M_VERSION_CONTROL : UNKNOWN_METHOD);
+
+ case 16:
+ return (memcmp(method, "BASELINE-CONTROL", 16) == 0
+ ? M_BASELINE_CONTROL : UNKNOWN_METHOD);
+
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ /* NOTREACHED */
+}
+
+/* Get the method number associated with the given string, assumed to
+ * contain an HTTP method. Returns M_INVALID if not recognized.
+ *
+ * This is the first step toward placing method names in a configurable
+ * list. Hopefully it (and other routines) can eventually be moved to
+ * something like a mod_http_methods.c, complete with config stuff.
+ */
+AP_DECLARE(int) ap_method_number_of(const char *method)
+{
+ int len = strlen(method);
+ int which = lookup_builtin_method(method, len);
+
+ if (which != UNKNOWN_METHOD)
+ return which;
+
+ /* check if the method has been dynamically registered */
+ if (methods_registry != NULL) {
+ int *methnum = apr_hash_get(methods_registry, method, len);
+
+ if (methnum != NULL) {
+ return *methnum;
+ }
+ }
+
+ return M_INVALID;
+}
+
+/*
+ * Turn a known method number into a name.
+ */
+AP_DECLARE(const char *) ap_method_name_of(apr_pool_t *p, int methnum)
+{
+ apr_hash_index_t *hi = apr_hash_first(p, methods_registry);
+
+ /* scan through the hash table, looking for a value that matches
+ the provided method number. */
+ for (; hi; hi = apr_hash_next(hi)) {
+ const void *key;
+ void *val;
+
+ apr_hash_this(hi, &key, NULL, &val);
+ if (*(int *)val == methnum)
+ return key;
+ }
+
+ /* it wasn't found in the hash */
+ return NULL;
+}
+
+static long get_chunk_size(char *);
+
+typedef struct http_filter_ctx {
+ apr_off_t remaining;
+ apr_off_t limit;
+ apr_off_t limit_used;
+ enum {
+ BODY_NONE,
+ BODY_LENGTH,
+ BODY_CHUNK
+ } state;
+ int eos_sent;
+} http_ctx_t;
+
+/* This is the HTTP_INPUT filter for HTTP requests and responses from
+ * proxied servers (mod_proxy). It handles chunked and content-length
+ * bodies. This can only be inserted/used after the headers
+ * are successfully parsed.
+ */
+apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
+ ap_input_mode_t mode, apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ apr_bucket *e;
+ http_ctx_t *ctx = f->ctx;
+ apr_status_t rv;
+ apr_off_t totalread;
+
+ /* just get out of the way of things we don't want. */
+ if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) {
+ return ap_get_brigade(f->next, b, mode, block, readbytes);
+ }
+
+ if (!ctx) {
+ const char *tenc, *lenp;
+ f->ctx = ctx = apr_palloc(f->r->pool, sizeof(*ctx));
+ ctx->state = BODY_NONE;
+ ctx->remaining = 0;
+ ctx->limit_used = 0;
+ ctx->eos_sent = 0;
+
+ /* LimitRequestBody does not apply to proxied responses.
+ * Consider implementing this check in its own filter.
+ * Would adding a directive to limit the size of proxied
+ * responses be useful?
+ */
+ if (!f->r->proxyreq) {
+ ctx->limit = ap_get_limit_req_body(f->r);
+ }
+ else {
+ ctx->limit = 0;
+ }
+
+ tenc = apr_table_get(f->r->headers_in, "Transfer-Encoding");
+ lenp = apr_table_get(f->r->headers_in, "Content-Length");
+
+ if (tenc) {
+ if (!strcasecmp(tenc, "chunked")) {
+ ctx->state = BODY_CHUNK;
+ }
+ }
+ else if (lenp) {
+ int conversion_error = 0;
+ char *endstr;
+
+ ctx->state = BODY_LENGTH;
+ errno = 0;
+ ctx->remaining = strtol(lenp, &endstr, 10); /* we depend on ANSI */
+
+ /* This protects us from over/underflow (the errno check),
+ * non-digit chars in the string (excluding leading space)
+ * (the endstr checks) and a negative number. Depending
+ * on the strtol implementation, the errno check may also
+ * trigger on an all whitespace string */
+ if (errno || (endstr && *endstr) || (ctx->remaining < 0)) {
+ conversion_error = 1;
+ }
+
+ if (conversion_error) {
+ apr_bucket_brigade *bb;
+
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
+ "Invalid Content-Length");
+
+ bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+ e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL,
+ f->r->pool, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ return ap_pass_brigade(f->r->output_filters, bb);
+ }
+
+ /* If we have a limit in effect and we know the C-L ahead of
+ * time, stop it here if it is invalid.
+ */
+ if (ctx->limit && ctx->limit < ctx->remaining) {
+ apr_bucket_brigade *bb;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
+ "Requested content-length of %" APR_OFF_T_FMT
+ " is larger than the configured limit"
+ " of %" APR_OFF_T_FMT, ctx->remaining, ctx->limit);
+ bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+ e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL,
+ f->r->pool, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ return ap_pass_brigade(f->r->output_filters, bb);
+ }
+ }
+
+ /* If we don't have a request entity indicated by the headers, EOS.
+ * (BODY_NONE is a valid intermediate state due to trailers,
+ * but it isn't a valid starting state.)
+ *
+ * RFC 2616 Section 4.4 note 5 states that connection-close
+ * is invalid for a request entity - request bodies must be
+ * denoted by C-L or T-E: chunked.
+ *
+ * Note that since the proxy uses this filter to handle the
+ * proxied *response*, proxy responses MUST be exempt.
+ */
+ if (ctx->state == BODY_NONE && f->r->proxyreq != PROXYREQ_RESPONSE) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ return APR_SUCCESS;
+ }
+
+ /* Since we're about to read data, send 100-Continue if needed.
+ * Only valid on chunked and C-L bodies where the C-L is > 0. */
+ if ((ctx->state == BODY_CHUNK ||
+ (ctx->state == BODY_LENGTH && ctx->remaining > 0)) &&
+ f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1)) {
+ char *tmp;
+ apr_bucket_brigade *bb;
+
+ tmp = apr_pstrcat(f->r->pool, AP_SERVER_PROTOCOL, " ",
+ status_lines[0], CRLF CRLF, NULL);
+ bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+ e = apr_bucket_pool_create(tmp, strlen(tmp), f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(bb, e);
+ e = apr_bucket_flush_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+
+ ap_pass_brigade(f->c->output_filters, bb);
+ }
+
+ /* We can't read the chunk until after sending 100 if required. */
+ if (ctx->state == BODY_CHUNK) {
+ char line[30];
+ apr_bucket_brigade *bb;
+ apr_size_t len = 30;
+
+ bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+
+ rv = ap_get_brigade(f->next, bb, AP_MODE_GETLINE,
+ APR_BLOCK_READ, 0);
+
+ if (rv == APR_SUCCESS) {
+ rv = apr_brigade_flatten(bb, line, &len);
+ if (rv == APR_SUCCESS) {
+ ctx->remaining = get_chunk_size(line);
+ }
+ }
+ apr_brigade_cleanup(bb);
+
+ /* Detect chunksize error (such as overflow) */
+ if (rv != APR_SUCCESS || ctx->remaining < 0) {
+ ctx->remaining = 0; /* Reset it in case we have to
+ * come back here later */
+ e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL,
+ f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ return ap_pass_brigade(f->r->output_filters, bb);
+ }
+
+ if (!ctx->remaining) {
+ /* Handle trailers by calling ap_get_mime_headers again! */
+ ctx->state = BODY_NONE;
+ ap_get_mime_headers(f->r);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ return APR_SUCCESS;
+ }
+ }
+ }
+
+ if (ctx->eos_sent) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ return APR_SUCCESS;
+ }
+
+ if (!ctx->remaining) {
+ switch (ctx->state) {
+ case BODY_NONE:
+ break;
+ case BODY_LENGTH:
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ return APR_SUCCESS;
+ case BODY_CHUNK:
+ {
+ char line[30];
+ apr_bucket_brigade *bb;
+ apr_size_t len = 30;
+
+ bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+
+ /* We need to read the CRLF after the chunk. */
+ rv = ap_get_brigade(f->next, bb, AP_MODE_GETLINE,
+ APR_BLOCK_READ, 0);
+ apr_brigade_cleanup(bb);
+
+ if (rv == APR_SUCCESS) {
+ /* Read the real chunk line. */
+ rv = ap_get_brigade(f->next, bb, AP_MODE_GETLINE,
+ APR_BLOCK_READ, 0);
+ if (rv == APR_SUCCESS) {
+ rv = apr_brigade_flatten(bb, line, &len);
+ if (rv == APR_SUCCESS) {
+ ctx->remaining = get_chunk_size(line);
+ }
+ }
+ apr_brigade_cleanup(bb);
+ }
+
+ /* Detect chunksize error (such as overflow) */
+ if (rv != APR_SUCCESS || ctx->remaining < 0) {
+ ctx->remaining = 0; /* Reset it in case we have to
+ * come back here later */
+ e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE,
+ NULL, f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ return ap_pass_brigade(f->r->output_filters, bb);
+ }
+
+ if (!ctx->remaining) {
+ /* Handle trailers by calling ap_get_mime_headers again! */
+ ctx->state = BODY_NONE;
+ ap_get_mime_headers(f->r);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ return APR_SUCCESS;
+ }
+ }
+ break;
+ }
+ }
+
+ /* Ensure that the caller can not go over our boundary point. */
+ if (ctx->state == BODY_LENGTH || ctx->state == BODY_CHUNK) {
+ if (ctx->remaining < readbytes) {
+ readbytes = ctx->remaining;
+ }
+ AP_DEBUG_ASSERT(readbytes > 0);
+ }
+
+ rv = ap_get_brigade(f->next, b, mode, block, readbytes);
+
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ /* How many bytes did we just read? */
+ apr_brigade_length(b, 0, &totalread);
+
+ /* If this happens, we have a bucket of unknown length. Die because
+ * it means our assumptions have changed. */
+ AP_DEBUG_ASSERT(totalread >= 0);
+
+ if (ctx->state != BODY_NONE) {
+ ctx->remaining -= totalread;
+ }
+
+ /* If we have no more bytes remaining on a C-L request,
+ * save the callter a roundtrip to discover EOS.
+ */
+ if (ctx->state == BODY_LENGTH && ctx->remaining == 0) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ }
+
+ /* We have a limit in effect. */
+ if (ctx->limit) {
+ /* FIXME: Note that we might get slightly confused on chunked inputs
+ * as we'd need to compensate for the chunk lengths which may not
+ * really count. This seems to be up for interpretation. */
+ ctx->limit_used += totalread;
+ if (ctx->limit < ctx->limit_used) {
+ apr_bucket_brigade *bb;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
+ "Read content-length of %" APR_OFF_T_FMT
+ " is larger than the configured limit"
+ " of %" APR_OFF_T_FMT, ctx->limit_used, ctx->limit);
+ bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+ e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL,
+ f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ return ap_pass_brigade(f->r->output_filters, bb);
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+/* The index is found by its offset from the x00 code of each level.
+ * Although this is fast, it will need to be replaced if some nutcase
+ * decides to define a high-numbered code before the lower numbers.
+ * If that sad event occurs, replace the code below with a linear search
+ * from status_lines[shortcut[i]] to status_lines[shortcut[i+1]-1];
+ */
+AP_DECLARE(int) ap_index_of_response(int status)
+{
+ static int shortcut[6] = {0, LEVEL_200, LEVEL_300, LEVEL_400,
+ LEVEL_500, RESPONSE_CODES};
+ int i, pos;
+
+ if (status < 100) { /* Below 100 is illegal for HTTP status */
+ return LEVEL_500;
+ }
+
+ for (i = 0; i < 5; i++) {
+ status -= 100;
+ if (status < 100) {
+ pos = (status + shortcut[i]);
+ if (pos < shortcut[i + 1]) {
+ return pos;
+ }
+ else {
+ return LEVEL_500; /* status unknown (falls in gap) */
+ }
+ }
+ }
+ return LEVEL_500; /* 600 or above is also illegal */
+}
+
+AP_DECLARE(const char *) ap_get_status_line(int status)
+{
+ return status_lines[ap_index_of_response(status)];
+}
+
+typedef struct header_struct {
+ apr_pool_t *pool;
+ apr_bucket_brigade *bb;
+} header_struct;
+
+/* Send a single HTTP header field to the client. Note that this function
+ * is used in calls to table_do(), so their interfaces are co-dependent.
+ * In other words, don't change this one without checking table_do in alloc.c.
+ * It returns true unless there was a write error of some kind.
+ */
+static int form_header_field(header_struct *h,
+ const char *fieldname, const char *fieldval)
+{
+#if APR_CHARSET_EBCDIC
+ char *headfield;
+ apr_size_t len;
+ apr_size_t name_len;
+ apr_size_t val_len;
+ char *next;
+
+ name_len = strlen(fieldname);
+ val_len = strlen(fieldval);
+ len = name_len + val_len + 4; /* 4 for ": " plus CRLF */
+ headfield = (char *)apr_palloc(h->pool, len + 1);
+ memcpy(headfield, fieldname, name_len);
+ next = headfield + name_len;
+ *next++ = ':';
+ *next++ = ' ';
+ memcpy(next, fieldval, val_len);
+ next += val_len;
+ *next++ = CR;
+ *next++ = LF;
+ *next = 0;
+ ap_xlate_proto_to_ascii(headfield, len);
+ apr_brigade_write(h->bb, NULL, NULL, headfield, len);
+#else
+ struct iovec vec[4];
+ struct iovec *v = vec;
+ v->iov_base = (void *)fieldname;
+ v->iov_len = strlen(fieldname);
+ v++;
+ v->iov_base = ": ";
+ v->iov_len = sizeof(": ") - 1;
+ v++;
+ v->iov_base = (void *)fieldval;
+ v->iov_len = strlen(fieldval);
+ v++;
+ v->iov_base = CRLF;
+ v->iov_len = sizeof(CRLF) - 1;
+ apr_brigade_writev(h->bb, NULL, NULL, vec, 4);
+#endif /* !APR_CHARSET_EBCDIC */
+ return 1;
+}
+
+/* Send a request's HTTP response headers to the client.
+ */
+static apr_status_t send_all_header_fields(header_struct *h,
+ const request_rec *r)
+{
+ const apr_array_header_t *elts;
+ const apr_table_entry_t *t_elt;
+ const apr_table_entry_t *t_end;
+ struct iovec *vec;
+ struct iovec *vec_next;
+
+ elts = apr_table_elts(r->headers_out);
+ if (elts->nelts == 0) {
+ return APR_SUCCESS;
+ }
+ t_elt = (const apr_table_entry_t *)(elts->elts);
+ t_end = t_elt + elts->nelts;
+ vec = (struct iovec *)apr_palloc(h->pool, 4 * elts->nelts *
+ sizeof(struct iovec));
+ vec_next = vec;
+
+ /* For each field, generate
+ * name ": " value CRLF
+ */
+ do {
+ vec_next->iov_base = (void*)(t_elt->key);
+ vec_next->iov_len = strlen(t_elt->key);
+ vec_next++;
+ vec_next->iov_base = ": ";
+ vec_next->iov_len = sizeof(": ") - 1;
+ vec_next++;
+ vec_next->iov_base = (void*)(t_elt->val);
+ vec_next->iov_len = strlen(t_elt->val);
+ vec_next++;
+ vec_next->iov_base = CRLF;
+ vec_next->iov_len = sizeof(CRLF) - 1;
+ vec_next++;
+ t_elt++;
+ } while (t_elt < t_end);
+
+#if APR_CHARSET_EBCDIC
+ {
+ apr_size_t len;
+ char *tmp = apr_pstrcatv(r->pool, vec, vec_next - vec, &len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ return apr_brigade_write(h->bb, NULL, NULL, tmp, len);
+ }
+#else
+ return apr_brigade_writev(h->bb, NULL, NULL, vec, vec_next - vec);
+#endif
+}
+
+/* Confirm that the status line is well-formed and matches r->status.
+ * Otherwise, a filter may have negated the status line set by a
+ * handler.
+ * Zap r->status_line if bad.
+ */
+static void validate_status_line(request_rec *r)
+{
+ char *end;
+
+ if (r->status_line
+ && (strlen(r->status_line) <= 4
+ || apr_strtoi64(r->status_line, &end, 10) != r->status
+ || *end != ' '
+ || (end - 3) != r->status_line)) {
+ r->status_line = NULL;
+ }
+}
+
+/*
+ * Determine the protocol to use for the response. Potentially downgrade
+ * to HTTP/1.0 in some situations and/or turn off keepalives.
+ *
+ * also prepare r->status_line.
+ */
+static void basic_http_header_check(request_rec *r,
+ const char **protocol)
+{
+ if (r->assbackwards) {
+ /* no such thing as a response protocol */
+ return;
+ }
+
+ validate_status_line(r);
+
+ if (!r->status_line) {
+ r->status_line = status_lines[ap_index_of_response(r->status)];
+ }
+
+ /* Note that we must downgrade before checking for force responses. */
+ if (r->proto_num > HTTP_VERSION(1,0)
+ && apr_table_get(r->subprocess_env, "downgrade-1.0")) {
+ r->proto_num = HTTP_VERSION(1,0);
+ }
+
+ /* kludge around broken browsers when indicated by force-response-1.0
+ */
+ if (r->proto_num == HTTP_VERSION(1,0)
+ && apr_table_get(r->subprocess_env, "force-response-1.0")) {
+ *protocol = "HTTP/1.0";
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ *protocol = AP_SERVER_PROTOCOL;
+ }
+
+}
+
+/* fill "bb" with a barebones/initial HTTP response header */
+static void basic_http_header(request_rec *r, apr_bucket_brigade *bb,
+ const char *protocol)
+{
+ char *date;
+ const char *server;
+ header_struct h;
+ struct iovec vec[4];
+
+ if (r->assbackwards) {
+ /* there are no headers to send */
+ return;
+ }
+
+ /* Output the HTTP/1.x Status-Line and the Date and Server fields */
+
+ vec[0].iov_base = (void *)protocol;
+ vec[0].iov_len = strlen(protocol);
+ vec[1].iov_base = (void *)" ";
+ vec[1].iov_len = sizeof(" ") - 1;
+ vec[2].iov_base = (void *)(r->status_line);
+ vec[2].iov_len = strlen(r->status_line);
+ vec[3].iov_base = (void *)CRLF;
+ vec[3].iov_len = sizeof(CRLF) - 1;
+#if APR_CHARSET_EBCDIC
+ {
+ char *tmp;
+ apr_size_t len;
+ tmp = apr_pstrcatv(r->pool, vec, 4, &len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ apr_brigade_write(bb, NULL, NULL, tmp, len);
+ }
+#else
+ apr_brigade_writev(bb, NULL, NULL, vec, 4);
+#endif
+
+ date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+
+ h.pool = r->pool;
+ h.bb = bb;
+ form_header_field(&h, "Date", date);
+
+ /* keep the set-by-proxy server header, otherwise
+ * generate a new server header */
+ if (r->proxyreq != PROXYREQ_NONE) {
+ server = apr_table_get(r->headers_out, "Server");
+ if (server) {
+ form_header_field(&h, "Server", server);
+ }
+ }
+ else {
+ form_header_field(&h, "Server", ap_get_server_version());
+ }
+
+ /* unset so we don't send them again */
+ apr_table_unset(r->headers_out, "Date"); /* Avoid bogosity */
+ apr_table_unset(r->headers_out, "Server");
+}
+
+AP_DECLARE(void) ap_basic_http_header(request_rec *r, apr_bucket_brigade *bb)
+{
+ const char *protocol;
+
+ basic_http_header_check(r, &protocol);
+ basic_http_header(r, bb, protocol);
+}
+
+/* Navigator versions 2.x, 3.x and 4.0 betas up to and including 4.0b2
+ * have a header parsing bug. If the terminating \r\n occur starting
+ * at offset 256, 257 or 258 of output then it will not properly parse
+ * the headers. Curiously it doesn't exhibit this problem at 512, 513.
+ * We are guessing that this is because their initial read of a new request
+ * uses a 256 byte buffer, and subsequent reads use a larger buffer.
+ * So the problem might exist at different offsets as well.
+ *
+ * This should also work on keepalive connections assuming they use the
+ * same small buffer for the first read of each new request.
+ *
+ * At any rate, we check the bytes written so far and, if we are about to
+ * tickle the bug, we instead insert a bogus padding header. Since the bug
+ * manifests as a broken image in Navigator, users blame the server. :(
+ * It is more expensive to check the User-Agent than it is to just add the
+ * bytes, so we haven't used the BrowserMatch feature here.
+ */
+static void terminate_header(apr_bucket_brigade *bb)
+{
+ char tmp[] = "X-Pad: avoid browser bug" CRLF;
+ char crlf[] = CRLF;
+ apr_off_t len;
+ apr_size_t buflen;
+
+ (void) apr_brigade_length(bb, 1, &len);
+
+ if (len >= 255 && len <= 257) {
+ buflen = strlen(tmp);
+ ap_xlate_proto_to_ascii(tmp, buflen);
+ apr_brigade_write(bb, NULL, NULL, tmp, buflen);
+ }
+ buflen = strlen(crlf);
+ ap_xlate_proto_to_ascii(crlf, buflen);
+ apr_brigade_write(bb, NULL, NULL, crlf, buflen);
+}
+
+/* Build the Allow field-value from the request handler method mask.
+ * Note that we always allow TRACE, since it is handled below.
+ */
+static char *make_allow(request_rec *r)
+{
+ char *list;
+ apr_int64_t mask;
+ apr_array_header_t *allow = apr_array_make(r->pool, 10, sizeof(char *));
+ apr_hash_index_t *hi = apr_hash_first(r->pool, methods_registry);
+ /* For TRACE below */
+ core_server_config *conf =
+ ap_get_module_config(r->server->module_config, &core_module);
+
+ mask = r->allowed_methods->method_mask;
+
+ for (; hi; hi = apr_hash_next(hi)) {
+ const void *key;
+ void *val;
+
+ apr_hash_this(hi, &key, NULL, &val);
+ if ((mask & (AP_METHOD_BIT << *(int *)val)) != 0) {
+ *(const char **)apr_array_push(allow) = key;
+
+ /* the M_GET method actually refers to two methods */
+ if (*(int *)val == M_GET)
+ *(const char **)apr_array_push(allow) = "HEAD";
+ }
+ }
+
+ /* TRACE is tested on a per-server basis */
+ if (conf->trace_enable != AP_TRACE_DISABLE)
+ *(const char **)apr_array_push(allow) = "TRACE";
+
+ list = apr_array_pstrcat(r->pool, allow, ',');
+
+ /* ### this is rather annoying. we should enforce registration of
+ ### these methods */
+ if ((mask & (AP_METHOD_BIT << M_INVALID))
+ && (r->allowed_methods->method_list != NULL)
+ && (r->allowed_methods->method_list->nelts != 0)) {
+ int i;
+ char **xmethod = (char **) r->allowed_methods->method_list->elts;
+
+ /*
+ * Append all of the elements of r->allowed_methods->method_list
+ */
+ for (i = 0; i < r->allowed_methods->method_list->nelts; ++i) {
+ list = apr_pstrcat(r->pool, list, ",", xmethod[i], NULL);
+ }
+ }
+
+ return list;
+}
+
+AP_DECLARE_NONSTD(int) ap_send_http_trace(request_rec *r)
+{
+ core_server_config *conf;
+ int rv;
+ apr_bucket_brigade *bb;
+ header_struct h;
+ apr_bucket *b;
+ int body;
+ char *bodyread = NULL, *bodyoff;
+ apr_size_t bodylen = 0;
+ apr_size_t bodybuf;
+ long res;
+
+ if (r->method_number != M_TRACE) {
+ return DECLINED;
+ }
+
+ /* Get the original request */
+ while (r->prev) {
+ r = r->prev;
+ }
+ conf = (core_server_config *)ap_get_module_config(r->server->module_config,
+ &core_module);
+
+ if (conf->trace_enable == AP_TRACE_DISABLE) {
+ apr_table_setn(r->notes, "error-notes",
+ "TRACE denied by server configuration");
+ return HTTP_FORBIDDEN;
+ }
+
+ if (conf->trace_enable == AP_TRACE_EXTENDED)
+ /* XX should be = REQUEST_CHUNKED_PASS */
+ body = REQUEST_CHUNKED_DECHUNK;
+ else
+ body = REQUEST_NO_BODY;
+
+ if ((rv = ap_setup_client_block(r, body))) {
+ if (rv == HTTP_REQUEST_ENTITY_TOO_LARGE)
+ apr_table_setn(r->notes, "error-notes",
+ "TRACE with a request body is not allowed");
+ return rv;
+ }
+
+ if (ap_should_client_block(r)) {
+
+ if (r->remaining > 0) {
+ if (r->remaining > 65536) {
+ apr_table_setn(r->notes, "error-notes",
+ "Extended TRACE request bodies cannot exceed 64k");
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+ /* always 32 extra bytes to catch chunk header exceptions */
+ bodybuf = (apr_size_t)r->remaining + 32;
+ }
+ else {
+ /* Add an extra 8192 for chunk headers */
+ bodybuf = 73730;
+ }
+
+ bodyoff = bodyread = apr_palloc(r->pool, bodybuf);
+
+ /* only while we have enough for a chunked header */
+ while ((!bodylen || bodybuf >= 32) &&
+ (res = ap_get_client_block(r, bodyoff, bodybuf)) > 0) {
+ bodylen += res;
+ bodybuf -= res;
+ bodyoff += res;
+ }
+ if (res > 0 && bodybuf < 32) {
+ /* discard_rest_of_request_body into our buffer */
+ while (ap_get_client_block(r, bodyread, bodylen) > 0)
+ ;
+ apr_table_setn(r->notes, "error-notes",
+ "Extended TRACE request bodies cannot exceed 64k");
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
+ if (res < 0) {
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ ap_set_content_type(r, "message/http");
+
+ /* Now we recreate the request, and echo it back */
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ apr_brigade_putstrs(bb, NULL, NULL, r->the_request, CRLF, NULL);
+ h.pool = r->pool;
+ h.bb = bb;
+ apr_table_do((int (*) (void *, const char *, const char *))
+ form_header_field, (void *) &h, r->headers_in, NULL);
+ apr_brigade_puts(bb, NULL, NULL, CRLF);
+
+ /* If configured to accept a body, echo the body */
+ if (bodylen) {
+ b = apr_bucket_pool_create(bodyread, bodylen,
+ r->pool, bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
+
+ ap_pass_brigade(r->output_filters, bb);
+
+ return DONE;
+}
+
+AP_DECLARE(int) ap_send_http_options(request_rec *r)
+{
+ if (r->assbackwards) {
+ return DECLINED;
+ }
+
+ apr_table_setn(r->headers_out, "Allow", make_allow(r));
+
+ /* the request finalization will send an EOS, which will flush all
+ * the headers out (including the Allow header)
+ */
+
+ return OK;
+}
+
+/* This routine is called by apr_table_do and merges all instances of
+ * the passed field values into a single array that will be further
+ * processed by some later routine. Originally intended to help split
+ * and recombine multiple Vary fields, though it is generic to any field
+ * consisting of comma/space-separated tokens.
+ */
+static int uniq_field_values(void *d, const char *key, const char *val)
+{
+ apr_array_header_t *values;
+ char *start;
+ char *e;
+ char **strpp;
+ int i;
+
+ values = (apr_array_header_t *)d;
+
+ e = apr_pstrdup(values->pool, val);
+
+ do {
+ /* Find a non-empty fieldname */
+
+ while (*e == ',' || apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e == '\0') {
+ break;
+ }
+ start = e;
+ while (*e != '\0' && *e != ',' && !apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e != '\0') {
+ *e++ = '\0';
+ }
+
+ /* Now add it to values if it isn't already represented.
+ * Could be replaced by a ap_array_strcasecmp() if we had one.
+ */
+ for (i = 0, strpp = (char **) values->elts; i < values->nelts;
+ ++i, ++strpp) {
+ if (*strpp && strcasecmp(*strpp, start) == 0) {
+ break;
+ }
+ }
+ if (i == values->nelts) { /* if not found */
+ *(char **)apr_array_push(values) = start;
+ }
+ } while (*e != '\0');
+
+ return 1;
+}
+
+/*
+ * Since some clients choke violently on multiple Vary fields, or
+ * Vary fields with duplicate tokens, combine any multiples and remove
+ * any duplicates.
+ */
+static void fixup_vary(request_rec *r)
+{
+ apr_array_header_t *varies;
+
+ varies = apr_array_make(r->pool, 5, sizeof(char *));
+
+ /* Extract all Vary fields from the headers_out, separate each into
+ * its comma-separated fieldname values, and then add them to varies
+ * if not already present in the array.
+ */
+ apr_table_do((int (*)(void *, const char *, const char *))uniq_field_values,
+ (void *) varies, r->headers_out, "Vary", NULL);
+
+ /* If we found any, replace old Vary fields with unique-ified value */
+
+ if (varies->nelts > 0) {
+ apr_table_setn(r->headers_out, "Vary",
+ apr_array_pstrcat(r->pool, varies, ','));
+ }
+}
+
+AP_DECLARE(void) ap_set_content_type(request_rec *r, const char *ct)
+{
+ if (!ct) {
+ r->content_type = NULL;
+ }
+ else if (!r->content_type || strcmp(r->content_type, ct)) {
+ r->content_type = ct;
+
+ /* Insert filters requested by the AddOutputFiltersByType
+ * configuration directive. Content-type filters must be
+ * inserted after the content handlers have run because
+ * only then, do we reliably know the content-type.
+ */
+ ap_add_output_filters_by_type(r);
+ }
+}
+
+typedef struct header_filter_ctx {
+ int headers_sent;
+} header_filter_ctx;
+
+AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
+ apr_bucket_brigade *b)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ const char *clheader;
+ const char *protocol;
+ apr_bucket *e;
+ apr_bucket_brigade *b2;
+ header_struct h;
+ header_filter_ctx *ctx = f->ctx;
+
+ AP_DEBUG_ASSERT(!r->main);
+
+ if (r->header_only) {
+ if (!ctx) {
+ ctx = f->ctx = apr_pcalloc(r->pool, sizeof(header_filter_ctx));
+ }
+ else if (ctx->headers_sent) {
+ apr_brigade_destroy(b);
+ return OK;
+ }
+ }
+
+ APR_BRIGADE_FOREACH(e, b) {
+ if (e->type == &ap_bucket_type_error) {
+ ap_bucket_error *eb = e->data;
+
+ ap_die(eb->status, r);
+ return AP_FILTER_ERROR;
+ }
+ }
+
+ if (r->assbackwards) {
+ r->sent_bodyct = 1;
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, b);
+ }
+
+ /*
+ * Now that we are ready to send a response, we need to combine the two
+ * header field tables into a single table. If we don't do this, our
+ * later attempts to set or unset a given fieldname might be bypassed.
+ */
+ if (!apr_is_empty_table(r->err_headers_out)) {
+ r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ r->headers_out);
+ }
+
+ /*
+ * Remove the 'Vary' header field if the client can't handle it.
+ * Since this will have nasty effects on HTTP/1.1 caches, force
+ * the response into HTTP/1.0 mode.
+ *
+ * Note: the force-response-1.0 should come before the call to
+ * basic_http_header_check()
+ */
+ if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) {
+ apr_table_unset(r->headers_out, "Vary");
+ r->proto_num = HTTP_VERSION(1,0);
+ apr_table_set(r->subprocess_env, "force-response-1.0", "1");
+ }
+ else {
+ fixup_vary(r);
+ }
+
+ /*
+ * Now remove any ETag response header field if earlier processing
+ * says so (such as a 'FileETag None' directive).
+ */
+ if (apr_table_get(r->notes, "no-etag") != NULL) {
+ apr_table_unset(r->headers_out, "ETag");
+ }
+
+ /* determine the protocol and whether we should use keepalives. */
+ basic_http_header_check(r, &protocol);
+ ap_set_keepalive(r);
+
+ if (r->chunked) {
+ apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked");
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ apr_table_setn(r->headers_out, "Content-Type",
+ ap_make_content_type(r, r->content_type));
+
+ if (r->content_encoding) {
+ apr_table_setn(r->headers_out, "Content-Encoding",
+ r->content_encoding);
+ }
+
+ if (!apr_is_empty_array(r->content_languages)) {
+ int i;
+ char **languages = (char **)(r->content_languages->elts);
+ for (i = 0; i < r->content_languages->nelts; ++i) {
+ apr_table_mergen(r->headers_out, "Content-Language", languages[i]);
+ }
+ }
+
+ /*
+ * Control cachability for non-cachable responses if not already set by
+ * some other part of the server configuration.
+ */
+ if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
+ char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ apr_table_addn(r->headers_out, "Expires", date);
+ }
+
+ /* This is a hack, but I can't find anyway around it. The idea is that
+ * we don't want to send out 0 Content-Lengths if it is a head request.
+ * This happens when modules try to outsmart the server, and return
+ * if they see a HEAD request. Apache 1.3 handlers were supposed to
+ * just return in that situation, and the core handled the HEAD. In
+ * 2.0, if a handler returns, then the core sends an EOS bucket down
+ * the filter stack, and the content-length filter computes a C-L of
+ * zero and that gets put in the headers, and we end up sending a
+ * zero C-L to the client. We can't just remove the C-L filter,
+ * because well behaved 2.0 handlers will send their data down the stack,
+ * and we will compute a real C-L for the head request. RBB
+ */
+ if (r->header_only
+ && (clheader = apr_table_get(r->headers_out, "Content-Length"))
+ && !strcmp(clheader, "0")) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ b2 = apr_brigade_create(r->pool, c->bucket_alloc);
+ basic_http_header(r, b2, protocol);
+
+ h.pool = r->pool;
+ h.bb = b2;
+
+ if (r->status == HTTP_NOT_MODIFIED) {
+ apr_table_do((int (*)(void *, const char *, const char *)) form_header_field,
+ (void *) &h, r->headers_out,
+ "Connection",
+ "Keep-Alive",
+ "ETag",
+ "Content-Location",
+ "Expires",
+ "Cache-Control",
+ "Vary",
+ "Warning",
+ "WWW-Authenticate",
+ "Proxy-Authenticate",
+ "Set-Cookie",
+ "Set-Cookie2",
+ NULL);
+ }
+ else {
+ send_all_header_fields(&h, r);
+ }
+
+ terminate_header(b2);
+
+ ap_pass_brigade(f->next, b2);
+
+ if (r->header_only) {
+ apr_brigade_destroy(b);
+ ctx->headers_sent = 1;
+ return OK;
+ }
+
+ r->sent_bodyct = 1; /* Whatever follows is real body stuff... */
+
+ if (r->chunked) {
+ /* We can't add this filter until we have already sent the headers.
+ * If we add it before this point, then the headers will be chunked
+ * as well, and that is just wrong.
+ */
+ ap_add_output_filter("CHUNK", NULL, r, r->connection);
+ }
+
+ /* Don't remove this filter until after we have added the CHUNK filter.
+ * Otherwise, f->next won't be the CHUNK filter and thus the first
+ * brigade won't be chunked properly.
+ */
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, b);
+}
+
+/* Here we deal with getting the request message body from the client.
+ * Whether or not the request contains a body is signaled by the presence
+ * of a non-zero Content-Length or by a Transfer-Encoding: chunked.
+ *
+ * Note that this is more complicated than it was in Apache 1.1 and prior
+ * versions, because chunked support means that the module does less.
+ *
+ * The proper procedure is this:
+ *
+ * 1. Call setup_client_block() near the beginning of the request
+ * handler. This will set up all the necessary properties, and will
+ * return either OK, or an error code. If the latter, the module should
+ * return that error code. The second parameter selects the policy to
+ * apply if the request message indicates a body, and how a chunked
+ * transfer-coding should be interpreted. Choose one of
+ *
+ * REQUEST_NO_BODY Send 413 error if message has any body
+ * REQUEST_CHUNKED_ERROR Send 411 error if body without Content-Length
+ * REQUEST_CHUNKED_DECHUNK If chunked, remove the chunks for me.
+ *
+ * In order to use the last two options, the caller MUST provide a buffer
+ * large enough to hold a chunk-size line, including any extensions.
+ *
+ * 2. When you are ready to read a body (if any), call should_client_block().
+ * This will tell the module whether or not to read input. If it is 0,
+ * the module should assume that there is no message body to read.
+ * This step also sends a 100 Continue response to HTTP/1.1 clients,
+ * so should not be called until the module is *definitely* ready to
+ * read content. (otherwise, the point of the 100 response is defeated).
+ * Never call this function more than once.
+ *
+ * 3. Finally, call get_client_block in a loop. Pass it a buffer and its size.
+ * It will put data into the buffer (not necessarily a full buffer), and
+ * return the length of the input block. When it is done reading, it will
+ * return 0 if EOF, or -1 if there was an error.
+ * If an error occurs on input, we force an end to keepalive.
+ */
+
+AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy)
+{
+ const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
+ const char *lenp = apr_table_get(r->headers_in, "Content-Length");
+
+ r->read_body = read_policy;
+ r->read_chunked = 0;
+ r->remaining = 0;
+
+ if (tenc) {
+ if (strcasecmp(tenc, "chunked")) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Unknown Transfer-Encoding %s", tenc);
+ return HTTP_NOT_IMPLEMENTED;
+ }
+ if (r->read_body == REQUEST_CHUNKED_ERROR) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "chunked Transfer-Encoding forbidden: %s", r->uri);
+ return (lenp) ? HTTP_BAD_REQUEST : HTTP_LENGTH_REQUIRED;
+ }
+
+ r->read_chunked = 1;
+ }
+ else if (lenp) {
+ int conversion_error = 0;
+ char *endstr;
+
+ errno = 0;
+ r->remaining = strtol(lenp, &endstr, 10); /* depend on ANSI */
+
+ /* See comments in ap_http_filter() */
+ if (errno || (endstr && *endstr) || (r->remaining < 0)) {
+ conversion_error = 1;
+ }
+
+ if (conversion_error) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid Content-Length");
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ if ((r->read_body == REQUEST_NO_BODY)
+ && (r->read_chunked || (r->remaining > 0))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "%s with body is not allowed for %s", r->method, r->uri);
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
+#ifdef AP_DEBUG
+ {
+ /* Make sure ap_getline() didn't leave any droppings. */
+ core_request_config *req_cfg =
+ (core_request_config *)ap_get_module_config(r->request_config,
+ &core_module);
+ AP_DEBUG_ASSERT(APR_BRIGADE_EMPTY(req_cfg->bb));
+ }
+#endif
+
+ return OK;
+}
+
+AP_DECLARE(int) ap_should_client_block(request_rec *r)
+{
+ /* First check if we have already read the request body */
+
+ if (r->read_length || (!r->read_chunked && (r->remaining <= 0))) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Parse a chunk extension, detect overflow.
+ * There are two error cases:
+ * 1) If the conversion would require too many bits, a -1 is returned.
+ * 2) If the conversion used the correct number of bits, but an overflow
+ * caused only the sign bit to flip, then that negative number is
+ * returned.
+ * In general, any negative number can be considered an overflow error.
+ */
+static long get_chunk_size(char *b)
+{
+ long chunksize = 0;
+ size_t chunkbits = sizeof(long) * 8;
+
+ ap_xlate_proto_from_ascii(b, strlen(b));
+
+ /* Skip leading zeros */
+ while (*b == '0') {
+ ++b;
+ }
+
+ while (apr_isxdigit(*b) && (chunkbits > 0)) {
+ int xvalue = 0;
+
+ if (*b >= '0' && *b <= '9') {
+ xvalue = *b - '0';
+ }
+ else if (*b >= 'A' && *b <= 'F') {
+ xvalue = *b - 'A' + 0xa;
+ }
+ else if (*b >= 'a' && *b <= 'f') {
+ xvalue = *b - 'a' + 0xa;
+ }
+
+ chunksize = (chunksize << 4) | xvalue;
+ chunkbits -= 4;
+ ++b;
+ }
+ if (apr_isxdigit(*b) && (chunkbits <= 0)) {
+ /* overflow */
+ return -1;
+ }
+
+ return chunksize;
+}
+
+/* get_client_block is called in a loop to get the request message body.
+ * This is quite simple if the client includes a content-length
+ * (the normal case), but gets messy if the body is chunked. Note that
+ * r->remaining is used to maintain state across calls and that
+ * r->read_length is the total number of bytes given to the caller
+ * across all invocations. It is messy because we have to be careful not
+ * to read past the data provided by the client, since these reads block.
+ * Returns 0 on End-of-body, -1 on error or premature chunk end.
+ *
+ */
+AP_DECLARE(long) ap_get_client_block(request_rec *r, char *buffer,
+ apr_size_t bufsiz)
+{
+ apr_status_t rv;
+ apr_bucket_brigade *bb;
+
+ if (r->remaining < 0 || (!r->read_chunked && r->remaining == 0)) {
+ return 0;
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ if (bb == NULL) {
+ r->connection->keepalive = AP_CONN_CLOSE;
+ return -1;
+ }
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, bufsiz);
+
+ /* We lose the failure code here. This is why ap_get_client_block should
+ * not be used.
+ */
+ if (rv != APR_SUCCESS) {
+ /* if we actually fail here, we want to just return and
+ * stop trying to read data from the client.
+ */
+ r->connection->keepalive = AP_CONN_CLOSE;
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+
+ /* If this fails, it means that a filter is written incorrectly and that
+ * it needs to learn how to properly handle APR_BLOCK_READ requests by
+ * returning data when requested.
+ */
+ AP_DEBUG_ASSERT(!APR_BRIGADE_EMPTY(bb));
+
+ /* Check to see if EOS in the brigade.
+ *
+ * If so, we have to leave a nugget for the *next* ap_get_client_block
+ * call to return 0.
+ */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
+ if (r->read_chunked) {
+ r->remaining = -1;
+ }
+ else {
+ r->remaining = 0;
+ }
+ }
+
+ rv = apr_brigade_flatten(bb, buffer, &bufsiz);
+ if (rv != APR_SUCCESS) {
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+
+ /* XXX yank me? */
+ r->read_length += bufsiz;
+
+ apr_brigade_destroy(bb);
+ return bufsiz;
+}
+
+/* In HTTP/1.1, any method can have a body. However, most GET handlers
+ * wouldn't know what to do with a request body if they received one.
+ * This helper routine tests for and reads any message body in the request,
+ * simply discarding whatever it receives. We need to do this because
+ * failing to read the request body would cause it to be interpreted
+ * as the next request on a persistent connection.
+ *
+ * Since we return an error status if the request is malformed, this
+ * routine should be called at the beginning of a no-body handler, e.g.,
+ *
+ * if ((retval = ap_discard_request_body(r)) != OK) {
+ * return retval;
+ * }
+ */
+AP_DECLARE(int) ap_discard_request_body(request_rec *r)
+{
+ apr_bucket_brigade *bb;
+ int rv, seen_eos;
+
+ /* Sometimes we'll get in a state where the input handling has
+ * detected an error where we want to drop the connection, so if
+ * that's the case, don't read the data as that is what we're trying
+ * to avoid.
+ *
+ * This function is also a no-op on a subrequest.
+ */
+ if (r->main || r->connection->keepalive == AP_CONN_CLOSE ||
+ ap_status_drops_connection(r->status)) {
+ return OK;
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ seen_eos = 0;
+ do {
+ apr_bucket *bucket;
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, HUGE_STRING_LEN);
+
+ if (rv != APR_SUCCESS) {
+ /* FIXME: If we ever have a mapping from filters (apr_status_t)
+ * to HTTP error codes, this would be a good place for them.
+ *
+ * If we received the special case AP_FILTER_ERROR, it means
+ * that the filters have already handled this error.
+ * Otherwise, we should assume we have a bad request.
+ */
+ if (rv == AP_FILTER_ERROR) {
+ apr_brigade_destroy(bb);
+ return rv;
+ }
+ else {
+ apr_brigade_destroy(bb);
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ APR_BRIGADE_FOREACH(bucket, bb) {
+ const char *data;
+ apr_size_t len;
+
+ if (APR_BUCKET_IS_EOS(bucket)) {
+ seen_eos = 1;
+ break;
+ }
+
+ /* These are metadata buckets. */
+ if (bucket->length == 0) {
+ continue;
+ }
+
+ /* We MUST read because in case we have an unknown-length
+ * bucket or one that morphs, we want to exhaust it.
+ */
+ rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ apr_brigade_destroy(bb);
+ return HTTP_BAD_REQUEST;
+ }
+ }
+ apr_brigade_cleanup(bb);
+ } while (!seen_eos);
+
+ return OK;
+}
+
+static const char *add_optional_notes(request_rec *r,
+ const char *prefix,
+ const char *key,
+ const char *suffix)
+{
+ const char *notes, *result;
+
+ if ((notes = apr_table_get(r->notes, key)) == NULL) {
+ result = apr_pstrcat(r->pool, prefix, suffix, NULL);
+ }
+ else {
+ result = apr_pstrcat(r->pool, prefix, notes, suffix, NULL);
+ }
+
+ return result;
+}
+
+/* construct and return the default error message for a given
+ * HTTP defined error code
+ */
+static const char *get_canned_error_string(int status,
+ request_rec *r,
+ const char *location)
+{
+ apr_pool_t *p = r->pool;
+ const char *error_notes, *h1, *s1;
+
+ switch (status) {
+ case HTTP_MOVED_PERMANENTLY:
+ case HTTP_MOVED_TEMPORARILY:
+ case HTTP_TEMPORARY_REDIRECT:
+ return(apr_pstrcat(p,
+ "<p>The document has moved <a href=\"",
+ ap_escape_html(r->pool, location),
+ "\">here</a>.</p>\n",
+ NULL));
+ case HTTP_SEE_OTHER:
+ return(apr_pstrcat(p,
+ "<p>The answer to your request is located "
+ "<a href=\"",
+ ap_escape_html(r->pool, location),
+ "\">here</a>.</p>\n",
+ NULL));
+ case HTTP_USE_PROXY:
+ return(apr_pstrcat(p,
+ "<p>This resource is only accessible "
+ "through the proxy\n",
+ ap_escape_html(r->pool, location),
+ "<br />\nYou will need to configure "
+ "your client to use that proxy.</p>\n",
+ NULL));
+ case HTTP_PROXY_AUTHENTICATION_REQUIRED:
+ case HTTP_UNAUTHORIZED:
+ return("<p>This server could not verify that you\n"
+ "are authorized to access the document\n"
+ "requested. Either you supplied the wrong\n"
+ "credentials (e.g., bad password), or your\n"
+ "browser doesn't understand how to supply\n"
+ "the credentials required.</p>\n");
+ case HTTP_BAD_REQUEST:
+ return(add_optional_notes(r,
+ "<p>Your browser sent a request that "
+ "this server could not understand.<br />\n",
+ "error-notes",
+ "</p>\n"));
+ case HTTP_FORBIDDEN:
+ return(apr_pstrcat(p,
+ "<p>You don't have permission to access ",
+ ap_escape_html(r->pool, r->uri),
+ "\non this server.</p>\n",
+ NULL));
+ case HTTP_NOT_FOUND:
+ return(apr_pstrcat(p,
+ "<p>The requested URL ",
+ ap_escape_html(r->pool, r->uri),
+ " was not found on this server.</p>\n",
+ NULL));
+ case HTTP_METHOD_NOT_ALLOWED:
+ return(apr_pstrcat(p,
+ "<p>The requested method ",
+ ap_escape_html(r->pool, r->method),
+ " is not allowed for the URL ",
+ ap_escape_html(r->pool, r->uri),
+ ".</p>\n",
+ NULL));
+ case HTTP_NOT_ACCEPTABLE:
+ s1 = apr_pstrcat(p,
+ "<p>An appropriate representation of the "
+ "requested resource ",
+ ap_escape_html(r->pool, r->uri),
+ " could not be found on this server.</p>\n",
+ NULL);
+ return(add_optional_notes(r, s1, "variant-list", ""));
+ case HTTP_MULTIPLE_CHOICES:
+ return(add_optional_notes(r, "", "variant-list", ""));
+ case HTTP_LENGTH_REQUIRED:
+ s1 = apr_pstrcat(p,
+ "<p>A request of the requested method ",
+ ap_escape_html(r->pool, r->method),
+ " requires a valid Content-length.<br />\n",
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_PRECONDITION_FAILED:
+ return(apr_pstrcat(p,
+ "<p>The precondition on the request "
+ "for the URL ",
+ ap_escape_html(r->pool, r->uri),
+ " evaluated to false.</p>\n",
+ NULL));
+ case HTTP_NOT_IMPLEMENTED:
+ s1 = apr_pstrcat(p,
+ "<p>",
+ ap_escape_html(r->pool, r->method), " to ",
+ ap_escape_html(r->pool, r->uri),
+ " not supported.<br />\n",
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_BAD_GATEWAY:
+ s1 = "<p>The proxy server received an invalid" CRLF
+ "response from an upstream server.<br />" CRLF;
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_VARIANT_ALSO_VARIES:
+ return(apr_pstrcat(p,
+ "<p>A variant for the requested "
+ "resource\n<pre>\n",
+ ap_escape_html(r->pool, r->uri),
+ "\n</pre>\nis itself a negotiable resource. "
+ "This indicates a configuration error.</p>\n",
+ NULL));
+ case HTTP_REQUEST_TIME_OUT:
+ return("<p>Server timeout waiting for the HTTP request from the client.</p>\n");
+ case HTTP_GONE:
+ return(apr_pstrcat(p,
+ "<p>The requested resource<br />",
+ ap_escape_html(r->pool, r->uri),
+ "<br />\nis no longer available on this server "
+ "and there is no forwarding address.\n"
+ "Please remove all references to this "
+ "resource.</p>\n",
+ NULL));
+ case HTTP_REQUEST_ENTITY_TOO_LARGE:
+ return(apr_pstrcat(p,
+ "The requested resource<br />",
+ ap_escape_html(r->pool, r->uri), "<br />\n",
+ "does not allow request data with ",
+ ap_escape_html(r->pool, r->method),
+ " requests, or the amount of data provided in\n"
+ "the request exceeds the capacity limit.\n",
+ NULL));
+ case HTTP_REQUEST_URI_TOO_LARGE:
+ s1 = "<p>The requested URL's length exceeds the capacity\n"
+ "limit for this server.<br />\n";
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_UNSUPPORTED_MEDIA_TYPE:
+ return("<p>The supplied request data is not in a format\n"
+ "acceptable for processing by this resource.</p>\n");
+ case HTTP_RANGE_NOT_SATISFIABLE:
+ return("<p>None of the range-specifier values in the Range\n"
+ "request-header field overlap the current extent\n"
+ "of the selected resource.</p>\n");
+ case HTTP_EXPECTATION_FAILED:
+ return(apr_pstrcat(p,
+ "<p>The expectation given in the Expect "
+ "request-header"
+ "\nfield could not be met by this server.</p>\n"
+ "<p>The client sent<pre>\n Expect: ",
+ ap_escape_html(r->pool, apr_table_get(r->headers_in, "Expect")),
+ "\n</pre>\n"
+ "but we only allow the 100-continue "
+ "expectation.</p>\n",
+ NULL));
+ case HTTP_UNPROCESSABLE_ENTITY:
+ return("<p>The server understands the media type of the\n"
+ "request entity, but was unable to process the\n"
+ "contained instructions.</p>\n");
+ case HTTP_LOCKED:
+ return("<p>The requested resource is currently locked.\n"
+ "The lock must be released or proper identification\n"
+ "given before the method can be applied.</p>\n");
+ case HTTP_FAILED_DEPENDENCY:
+ return("<p>The method could not be performed on the resource\n"
+ "because the requested action depended on another\n"
+ "action and that other action failed.</p>\n");
+ case HTTP_UPGRADE_REQUIRED:
+ return("<p>The requested resource can only be retrieved\n"
+ "using SSL. The server is willing to upgrade the current\n"
+ "connection to SSL, but your client doesn't support it.\n"
+ "Either upgrade your client, or try requesting the page\n"
+ "using https://\n");
+ case HTTP_INSUFFICIENT_STORAGE:
+ return("<p>The method could not be performed on the resource\n"
+ "because the server is unable to store the\n"
+ "representation needed to successfully complete the\n"
+ "request. There is insufficient free space left in\n"
+ "your storage allocation.</p>\n");
+ case HTTP_SERVICE_UNAVAILABLE:
+ return("<p>The server is temporarily unable to service your\n"
+ "request due to maintenance downtime or capacity\n"
+ "problems. Please try again later.</p>\n");
+ case HTTP_GATEWAY_TIME_OUT:
+ return("<p>The proxy server did not receive a timely response\n"
+ "from the upstream server.</p>\n");
+ case HTTP_NOT_EXTENDED:
+ return("<p>A mandatory extension policy in the request is not\n"
+ "accepted by the server for this resource.</p>\n");
+ default: /* HTTP_INTERNAL_SERVER_ERROR */
+ /*
+ * This comparison to expose error-notes could be modified to
+ * use a configuration directive and export based on that
+ * directive. For now "*" is used to designate an error-notes
+ * that is totally safe for any user to see (ie lacks paths,
+ * database passwords, etc.)
+ */
+ if (((error_notes = apr_table_get(r->notes,
+ "error-notes")) != NULL)
+ && (h1 = apr_table_get(r->notes, "verbose-error-to")) != NULL
+ && (strcmp(h1, "*") == 0)) {
+ return(apr_pstrcat(p, error_notes, "<p />\n", NULL));
+ }
+ else {
+ return(apr_pstrcat(p,
+ "<p>The server encountered an internal "
+ "error or\n"
+ "misconfiguration and was unable to complete\n"
+ "your request.</p>\n"
+ "<p>Please contact the server "
+ "administrator,\n ",
+ ap_escape_html(r->pool,
+ r->server->server_admin),
+ " and inform them of the time the "
+ "error occurred,\n"
+ "and anything you might have done that "
+ "may have\n"
+ "caused the error.</p>\n"
+ "<p>More information about this error "
+ "may be available\n"
+ "in the server error log.</p>\n",
+ NULL));
+ }
+ /*
+ * It would be nice to give the user the information they need to
+ * fix the problem directly since many users don't have access to
+ * the error_log (think University sites) even though they can easily
+ * get this error by misconfiguring an htaccess file. However, the
+ * e error notes tend to include the real file pathname in this case,
+ * which some people consider to be a breach of privacy. Until we
+ * can figure out a way to remove the pathname, leave this commented.
+ *
+ * if ((error_notes = apr_table_get(r->notes,
+ * "error-notes")) != NULL) {
+ * return(apr_pstrcat(p, error_notes, "<p />\n", NULL);
+ * }
+ * else {
+ * return "";
+ * }
+ */
+ }
+}
+
+/* We should have named this send_canned_response, since it is used for any
+ * response that can be generated by the server from the request record.
+ * This includes all 204 (no content), 3xx (redirect), 4xx (client error),
+ * and 5xx (server error) messages that have not been redirected to another
+ * handler via the ErrorDocument feature.
+ */
+AP_DECLARE(void) ap_send_error_response(request_rec *r, int recursive_error)
+{
+ int status = r->status;
+ int idx = ap_index_of_response(status);
+ char *custom_response;
+ const char *location = apr_table_get(r->headers_out, "Location");
+
+ /* At this point, we are starting the response over, so we have to reset
+ * this value.
+ */
+ r->eos_sent = 0;
+
+ /* and we need to get rid of any RESOURCE filters that might be lurking
+ * around, thinking they are in the middle of the original request
+ */
+
+ r->output_filters = r->proto_output_filters;
+
+ ap_run_insert_error_filter(r);
+
+ /*
+ * It's possible that the Location field might be in r->err_headers_out
+ * instead of r->headers_out; use the latter if possible, else the
+ * former.
+ */
+ if (location == NULL) {
+ location = apr_table_get(r->err_headers_out, "Location");
+ }
+ /* We need to special-case the handling of 204 and 304 responses,
+ * since they have specific HTTP requirements and do not include a
+ * message body. Note that being assbackwards here is not an option.
+ */
+ if (status == HTTP_NOT_MODIFIED) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+
+ if (status == HTTP_NO_CONTENT) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+
+ if (!r->assbackwards) {
+ apr_table_t *tmp = r->headers_out;
+
+ /* For all HTTP/1.x responses for which we generate the message,
+ * we need to avoid inheriting the "normal status" header fields
+ * that may have been set by the request handler before the
+ * error or redirect, except for Location on external redirects.
+ */
+ r->headers_out = r->err_headers_out;
+ r->err_headers_out = tmp;
+ apr_table_clear(r->err_headers_out);
+
+ if (ap_is_HTTP_REDIRECT(status) || (status == HTTP_CREATED)) {
+ if ((location != NULL) && *location) {
+ apr_table_setn(r->headers_out, "Location", location);
+ }
+ else {
+ location = ""; /* avoids coredump when printing, below */
+ }
+ }
+
+ r->content_languages = NULL;
+ r->content_encoding = NULL;
+ r->clength = 0;
+
+ if (apr_table_get(r->subprocess_env,
+ "suppress-error-charset") != NULL) {
+ core_request_config *request_conf =
+ ap_get_module_config(r->request_config, &core_module);
+ request_conf->suppress_charset = 1; /* avoid adding default
+ * charset later
+ */
+ ap_set_content_type(r, "text/html");
+ }
+ else {
+ ap_set_content_type(r, "text/html; charset=iso-8859-1");
+ }
+
+ if ((status == HTTP_METHOD_NOT_ALLOWED)
+ || (status == HTTP_NOT_IMPLEMENTED)) {
+ apr_table_setn(r->headers_out, "Allow", make_allow(r));
+ }
+
+ if (r->header_only) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+ }
+
+ if ((custom_response = ap_response_code_string(r, idx))) {
+ /*
+ * We have a custom response output. This should only be
+ * a text-string to write back. But if the ErrorDocument
+ * was a local redirect and the requested resource failed
+ * for any reason, the custom_response will still hold the
+ * redirect URL. We don't really want to output this URL
+ * as a text message, so first check the custom response
+ * string to ensure that it is a text-string (using the
+ * same test used in ap_die(), i.e. does it start with a ").
+ *
+ * If it's not a text string, we've got a recursive error or
+ * an external redirect. If it's a recursive error, ap_die passes
+ * us the second error code so we can write both, and has already
+ * backed up to the original error. If it's an external redirect,
+ * it hasn't happened yet; we may never know if it fails.
+ */
+ if (custom_response[0] == '\"') {
+ ap_rputs(custom_response + 1, r);
+ ap_finalize_request_protocol(r);
+ return;
+ }
+ }
+ {
+ const char *title = status_lines[idx];
+ const char *h1;
+
+ /* Accept a status_line set by a module, but only if it begins
+ * with the 3 digit status code
+ */
+ if (r->status_line != NULL
+ && strlen(r->status_line) > 4 /* long enough */
+ && apr_isdigit(r->status_line[0])
+ && apr_isdigit(r->status_line[1])
+ && apr_isdigit(r->status_line[2])
+ && apr_isspace(r->status_line[3])
+ && apr_isalnum(r->status_line[4])) {
+ title = r->status_line;
+ }
+
+ /* folks decided they didn't want the error code in the H1 text */
+ h1 = &title[4];
+
+ /* can't count on a charset filter being in place here,
+ * so do ebcdic->ascii translation explicitly (if needed)
+ */
+
+ ap_rvputs_proto_in_ascii(r,
+ DOCTYPE_HTML_2_0
+ "<html><head>\n<title>", title,
+ "</title>\n</head><body>\n<h1>", h1, "</h1>\n",
+ NULL);
+
+ ap_rvputs_proto_in_ascii(r,
+ get_canned_error_string(status, r, location),
+ NULL);
+
+ if (recursive_error) {
+ ap_rvputs_proto_in_ascii(r, "<p>Additionally, a ",
+ status_lines[ap_index_of_response(recursive_error)],
+ "\nerror was encountered while trying to use an "
+ "ErrorDocument to handle the request.</p>\n", NULL);
+ }
+ ap_rvputs_proto_in_ascii(r, ap_psignature("<hr>\n", r), NULL);
+ ap_rvputs_proto_in_ascii(r, "</body></html>\n", NULL);
+ }
+ ap_finalize_request_protocol(r);
+}
+
+/*
+ * Create a new method list with the specified number of preallocated
+ * extension slots.
+ */
+AP_DECLARE(ap_method_list_t *) ap_make_method_list(apr_pool_t *p, int nelts)
+{
+ ap_method_list_t *ml;
+
+ ml = (ap_method_list_t *) apr_palloc(p, sizeof(ap_method_list_t));
+ ml->method_mask = 0;
+ ml->method_list = apr_array_make(p, nelts, sizeof(char *));
+ return ml;
+}
+
+/*
+ * Make a copy of a method list (primarily for subrequests that may
+ * subsequently change it; don't want them changing the parent's, too!).
+ */
+AP_DECLARE(void) ap_copy_method_list(ap_method_list_t *dest,
+ ap_method_list_t *src)
+{
+ int i;
+ char **imethods;
+ char **omethods;
+
+ dest->method_mask = src->method_mask;
+ imethods = (char **) src->method_list->elts;
+ for (i = 0; i < src->method_list->nelts; ++i) {
+ omethods = (char **) apr_array_push(dest->method_list);
+ *omethods = apr_pstrdup(dest->method_list->pool, imethods[i]);
+ }
+}
+
+/*
+ * Invoke a callback routine for each method in the specified list.
+ */
+AP_DECLARE_NONSTD(void) ap_method_list_do(int (*comp) (void *urec,
+ const char *mname,
+ int mnum),
+ void *rec,
+ const ap_method_list_t *ml, ...)
+{
+ va_list vp;
+ va_start(vp, ml);
+ ap_method_list_vdo(comp, rec, ml, vp);
+ va_end(vp);
+}
+
+AP_DECLARE(void) ap_method_list_vdo(int (*comp) (void *mrec,
+ const char *mname,
+ int mnum),
+ void *rec, const ap_method_list_t *ml,
+ va_list vp)
+{
+
+}
+
+/*
+ * Return true if the specified HTTP method is in the provided
+ * method list.
+ */
+AP_DECLARE(int) ap_method_in_list(ap_method_list_t *l, const char *method)
+{
+ int methnum;
+ int i;
+ char **methods;
+
+ /*
+ * If it's one of our known methods, use the shortcut and check the
+ * bitmask.
+ */
+ methnum = ap_method_number_of(method);
+ if (methnum != M_INVALID) {
+ return !!(l->method_mask & (AP_METHOD_BIT << methnum));
+ }
+ /*
+ * Otherwise, see if the method name is in the array or string names
+ */
+ if ((l->method_list == NULL) || (l->method_list->nelts == 0)) {
+ return 0;
+ }
+ methods = (char **)l->method_list->elts;
+ for (i = 0; i < l->method_list->nelts; ++i) {
+ if (strcmp(method, methods[i]) == 0) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Add the specified method to a method list (if it isn't already there).
+ */
+AP_DECLARE(void) ap_method_list_add(ap_method_list_t *l, const char *method)
+{
+ int methnum;
+ int i;
+ const char **xmethod;
+ char **methods;
+
+ /*
+ * If it's one of our known methods, use the shortcut and use the
+ * bitmask.
+ */
+ methnum = ap_method_number_of(method);
+ l->method_mask |= (AP_METHOD_BIT << methnum);
+ if (methnum != M_INVALID) {
+ return;
+ }
+ /*
+ * Otherwise, see if the method name is in the array of string names.
+ */
+ if (l->method_list->nelts != 0) {
+ methods = (char **)l->method_list->elts;
+ for (i = 0; i < l->method_list->nelts; ++i) {
+ if (strcmp(method, methods[i]) == 0) {
+ return;
+ }
+ }
+ }
+ xmethod = (const char **) apr_array_push(l->method_list);
+ *xmethod = method;
+}
+
+/*
+ * Remove the specified method from a method list.
+ */
+AP_DECLARE(void) ap_method_list_remove(ap_method_list_t *l,
+ const char *method)
+{
+ int methnum;
+ char **methods;
+
+ /*
+ * If it's a known methods, either builtin or registered
+ * by a module, use the bitmask.
+ */
+ methnum = ap_method_number_of(method);
+ l->method_mask |= ~(AP_METHOD_BIT << methnum);
+ if (methnum != M_INVALID) {
+ return;
+ }
+ /*
+ * Otherwise, see if the method name is in the array of string names.
+ */
+ if (l->method_list->nelts != 0) {
+ register int i, j, k;
+ methods = (char **)l->method_list->elts;
+ for (i = 0; i < l->method_list->nelts; ) {
+ if (strcmp(method, methods[i]) == 0) {
+ for (j = i, k = i + 1; k < l->method_list->nelts; ++j, ++k) {
+ methods[j] = methods[k];
+ }
+ --l->method_list->nelts;
+ }
+ else {
+ ++i;
+ }
+ }
+ }
+}
+
+/*
+ * Reset a method list to be completely empty.
+ */
+AP_DECLARE(void) ap_clear_method_list(ap_method_list_t *l)
+{
+ l->method_mask = 0;
+ l->method_list->nelts = 0;
+}
+
+/* Generate the human-readable hex representation of an unsigned long
+ * (basically a faster version of 'sprintf("%lx")')
+ */
+#define HEX_DIGITS "0123456789abcdef"
+static char *etag_ulong_to_hex(char *next, unsigned long u)
+{
+ int printing = 0;
+ int shift = sizeof(unsigned long) * 8 - 4;
+ do {
+ unsigned long next_digit = ((u >> shift) & (unsigned long)0xf);
+ if (next_digit) {
+ *next++ = HEX_DIGITS[next_digit];
+ printing = 1;
+ }
+ else if (printing) {
+ *next++ = HEX_DIGITS[next_digit];
+ }
+ shift -= 4;
+ } while (shift);
+ *next++ = HEX_DIGITS[u & (unsigned long)0xf];
+ return next;
+}
+
+#define ETAG_WEAK "W/"
+#define CHARS_PER_UNSIGNED_LONG (sizeof(unsigned long) * 2)
+/*
+ * Construct an entity tag (ETag) from resource information. If it's a real
+ * file, build in some of the file characteristics. If the modification time
+ * is newer than (request-time minus 1 second), mark the ETag as weak - it
+ * could be modified again in as short an interval. We rationalize the
+ * modification time we're given to keep it from being in the future.
+ */
+AP_DECLARE(char *) ap_make_etag(request_rec *r, int force_weak)
+{
+ char *weak;
+ apr_size_t weak_len;
+ char *etag;
+ char *next;
+ core_dir_config *cfg;
+ etag_components_t etag_bits;
+ etag_components_t bits_added;
+
+ cfg = (core_dir_config *)ap_get_module_config(r->per_dir_config,
+ &core_module);
+ etag_bits = (cfg->etag_bits & (~ cfg->etag_remove)) | cfg->etag_add;
+
+ /*
+ * If it's a file (or we wouldn't be here) and no ETags
+ * should be set for files, return an empty string and
+ * note it for the header-sender to ignore.
+ */
+ if (etag_bits & ETAG_NONE) {
+ apr_table_setn(r->notes, "no-etag", "omit");
+ return "";
+ }
+
+ if (etag_bits == ETAG_UNSET) {
+ etag_bits = ETAG_BACKWARD;
+ }
+ /*
+ * Make an ETag header out of various pieces of information. We use
+ * the last-modified date and, if we have a real file, the
+ * length and inode number - note that this doesn't have to match
+ * the content-length (i.e. includes), it just has to be unique
+ * for the file.
+ *
+ * If the request was made within a second of the last-modified date,
+ * we send a weak tag instead of a strong one, since it could
+ * be modified again later in the second, and the validation
+ * would be incorrect.
+ */
+ if ((r->request_time - r->mtime > (1 * APR_USEC_PER_SEC)) &&
+ !force_weak) {
+ weak = NULL;
+ weak_len = 0;
+ }
+ else {
+ weak = ETAG_WEAK;
+ weak_len = sizeof(ETAG_WEAK);
+ }
+
+ if (r->finfo.filetype != 0) {
+ /*
+ * ETag gets set to [W/]"inode-size-mtime", modulo any
+ * FileETag keywords.
+ */
+ etag = apr_palloc(r->pool, weak_len + sizeof("\"--\"") +
+ 3 * CHARS_PER_UNSIGNED_LONG + 1);
+ next = etag;
+ if (weak) {
+ while (*weak) {
+ *next++ = *weak++;
+ }
+ }
+ *next++ = '"';
+ bits_added = 0;
+ if (etag_bits & ETAG_INODE) {
+ next = etag_ulong_to_hex(next, (unsigned long)r->finfo.inode);
+ bits_added |= ETAG_INODE;
+ }
+ if (etag_bits & ETAG_SIZE) {
+ if (bits_added != 0) {
+ *next++ = '-';
+ }
+ next = etag_ulong_to_hex(next, (unsigned long)r->finfo.size);
+ bits_added |= ETAG_SIZE;
+ }
+ if (etag_bits & ETAG_MTIME) {
+ if (bits_added != 0) {
+ *next++ = '-';
+ }
+ next = etag_ulong_to_hex(next, (unsigned long)r->mtime);
+ }
+ *next++ = '"';
+ *next = '\0';
+ }
+ else {
+ /*
+ * Not a file document, so just use the mtime: [W/]"mtime"
+ */
+ etag = apr_palloc(r->pool, weak_len + sizeof("\"\"") +
+ CHARS_PER_UNSIGNED_LONG + 1);
+ next = etag;
+ if (weak) {
+ while (*weak) {
+ *next++ = *weak++;
+ }
+ }
+ *next++ = '"';
+ next = etag_ulong_to_hex(next, (unsigned long)r->mtime);
+ *next++ = '"';
+ *next = '\0';
+ }
+
+ return etag;
+}
+
+AP_DECLARE(void) ap_set_etag(request_rec *r)
+{
+ char *etag;
+ char *variant_etag, *vlv;
+ int vlv_weak;
+
+ if (!r->vlist_validator) {
+ etag = ap_make_etag(r, 0);
+
+ /* If we get a blank etag back, don't set the header. */
+ if (!etag[0]) {
+ return;
+ }
+ }
+ else {
+ /* If we have a variant list validator (vlv) due to the
+ * response being negotiated, then we create a structured
+ * entity tag which merges the variant etag with the variant
+ * list validator (vlv). This merging makes revalidation
+ * somewhat safer, ensures that caches which can deal with
+ * Vary will (eventually) be updated if the set of variants is
+ * changed, and is also a protocol requirement for transparent
+ * content negotiation.
+ */
+
+ /* if the variant list validator is weak, we make the whole
+ * structured etag weak. If we would not, then clients could
+ * have problems merging range responses if we have different
+ * variants with the same non-globally-unique strong etag.
+ */
+
+ vlv = r->vlist_validator;
+ vlv_weak = (vlv[0] == 'W');
+
+ variant_etag = ap_make_etag(r, vlv_weak);
+
+ /* If we get a blank etag back, don't append vlv and stop now. */
+ if (!variant_etag[0]) {
+ return;
+ }
+
+ /* merge variant_etag and vlv into a structured etag */
+ variant_etag[strlen(variant_etag) - 1] = '\0';
+ if (vlv_weak) {
+ vlv += 3;
+ }
+ else {
+ vlv++;
+ }
+ etag = apr_pstrcat(r->pool, variant_etag, ";", vlv, NULL);
+ }
+
+ apr_table_setn(r->headers_out, "ETag", etag);
+}
+
+static int parse_byterange(char *range, apr_off_t clength,
+ apr_off_t *start, apr_off_t *end)
+{
+ char *dash = strchr(range, '-');
+
+ if (!dash) {
+ return 0;
+ }
+
+ if ((dash == range)) {
+ /* In the form "-5" */
+ *start = clength - apr_atoi64(dash + 1);
+ *end = clength - 1;
+ }
+ else {
+ *dash = '\0';
+ dash++;
+ *start = apr_atoi64(range);
+ if (*dash) {
+ *end = apr_atoi64(dash);
+ }
+ else { /* "5-" */
+ *end = clength - 1;
+ }
+ }
+
+ if (*start < 0) {
+ *start = 0;
+ }
+
+ if (*end >= clength) {
+ *end = clength - 1;
+ }
+
+ if (*start > *end) {
+ return -1;
+ }
+
+ return (*start > 0 || *end < clength);
+}
+
+static int ap_set_byterange(request_rec *r);
+
+typedef struct byterange_ctx {
+ apr_bucket_brigade *bb;
+ int num_ranges;
+ char *boundary;
+ char *bound_head;
+} byterange_ctx;
+
+/*
+ * Here we try to be compatible with clients that want multipart/x-byteranges
+ * instead of multipart/byteranges (also see above), as per HTTP/1.1. We
+ * look for the Request-Range header (e.g. Netscape 2 and 3) as an indication
+ * that the browser supports an older protocol. We also check User-Agent
+ * for Microsoft Internet Explorer 3, which needs this as well.
+ */
+static int use_range_x(request_rec *r)
+{
+ const char *ua;
+ return (apr_table_get(r->headers_in, "Request-Range")
+ || ((ua = apr_table_get(r->headers_in, "User-Agent"))
+ && ap_strstr_c(ua, "MSIE 3")));
+}
+
+#define BYTERANGE_FMT "%" APR_OFF_T_FMT "-%" APR_OFF_T_FMT "/%" APR_OFF_T_FMT
+#define PARTITION_ERR_FMT "apr_brigade_partition() failed " \
+ "[%" APR_OFF_T_FMT ",%" APR_OFF_T_FMT "]"
+
+AP_CORE_DECLARE_NONSTD(apr_status_t) ap_byterange_filter(ap_filter_t *f,
+ apr_bucket_brigade *bb)
+{
+#define MIN_LENGTH(len1, len2) ((len1 > len2) ? len2 : len1)
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ byterange_ctx *ctx;
+ apr_bucket *e;
+ apr_bucket_brigade *bsend;
+ apr_off_t range_start;
+ apr_off_t range_end;
+ char *current;
+ apr_off_t clength = 0;
+ apr_status_t rv;
+ int found = 0;
+
+ /* Iterate through the brigade until reaching EOS or a bucket with
+ * unknown length. */
+ for (e = APR_BRIGADE_FIRST(bb);
+ (e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e)
+ && e->length != (apr_size_t)-1);
+ e = APR_BUCKET_NEXT(e)) {
+ clength += e->length;
+ }
+
+ /* Don't attempt to do byte range work if this brigade doesn't
+ * contain an EOS, or if any of the buckets has an unknown length;
+ * this avoids the cases where it is expensive to perform
+ * byteranging (i.e. may require arbitrary amounts of memory). */
+ if (!APR_BUCKET_IS_EOS(e) || clength <= 0) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ {
+ int num_ranges = ap_set_byterange(r);
+
+ /* We have nothing to do, get out of the way. */
+ if (num_ranges == 0) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ ctx = apr_pcalloc(r->pool, sizeof(*ctx));
+ ctx->num_ranges = num_ranges;
+ /* create a brigade in case we never call ap_save_brigade() */
+ ctx->bb = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ if (ctx->num_ranges > 1) {
+ /* Is ap_make_content_type required here? */
+ const char *orig_ct = ap_make_content_type(r, r->content_type);
+ /* need APR_TIME_T_FMT_HEX */
+ ctx->boundary = apr_psprintf(r->pool, "%qx%lx",
+ r->request_time, (long) getpid());
+
+ ap_set_content_type(r, apr_pstrcat(r->pool, "multipart",
+ use_range_x(r) ? "/x-" : "/",
+ "byteranges; boundary=",
+ ctx->boundary, NULL));
+
+ ctx->bound_head = apr_pstrcat(r->pool,
+ CRLF "--", ctx->boundary,
+ CRLF "Content-type: ",
+ orig_ct,
+ CRLF "Content-range: bytes ",
+ NULL);
+ ap_xlate_proto_to_ascii(ctx->bound_head, strlen(ctx->bound_head));
+ }
+ }
+
+ /* this brigade holds what we will be sending */
+ bsend = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ while ((current = ap_getword(r->pool, &r->range, ','))
+ && (rv = parse_byterange(current, clength, &range_start,
+ &range_end))) {
+ apr_bucket *e2;
+ apr_bucket *ec;
+
+ if (rv == -1) {
+ continue;
+ }
+
+ /* these calls to apr_brigade_partition() should theoretically
+ * never fail because of the above call to apr_brigade_length(),
+ * but what the heck, we'll check for an error anyway */
+ if ((rv = apr_brigade_partition(bb, range_start, &ec)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ PARTITION_ERR_FMT, range_start, clength);
+ continue;
+ }
+ if ((rv = apr_brigade_partition(bb, range_end+1, &e2)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ PARTITION_ERR_FMT, range_end+1, clength);
+ continue;
+ }
+
+ found = 1;
+
+ /* For single range requests, we must produce Content-Range header.
+ * Otherwise, we need to produce the multipart boundaries.
+ */
+ if (ctx->num_ranges == 1) {
+ apr_table_setn(r->headers_out, "Content-Range",
+ apr_psprintf(r->pool, "bytes " BYTERANGE_FMT,
+ range_start, range_end, clength));
+ }
+ else {
+ char *ts;
+
+ e = apr_bucket_pool_create(ctx->bound_head, strlen(ctx->bound_head),
+ r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+
+ ts = apr_psprintf(r->pool, BYTERANGE_FMT CRLF CRLF,
+ range_start, range_end, clength);
+ ap_xlate_proto_to_ascii(ts, strlen(ts));
+ e = apr_bucket_pool_create(ts, strlen(ts), r->pool,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ }
+
+ do {
+ apr_bucket *foo;
+ const char *str;
+ apr_size_t len;
+
+ if (apr_bucket_copy(ec, &foo) != APR_SUCCESS) {
+ /* this shouldn't ever happen due to the call to
+ * apr_brigade_length() above which normalizes
+ * indeterminate-length buckets. just to be sure,
+ * though, this takes care of uncopyable buckets that
+ * do somehow manage to slip through.
+ */
+ /* XXX: check for failure? */
+ apr_bucket_read(ec, &str, &len, APR_BLOCK_READ);
+ apr_bucket_copy(ec, &foo);
+ }
+ APR_BRIGADE_INSERT_TAIL(bsend, foo);
+ ec = APR_BUCKET_NEXT(ec);
+ } while (ec != e2);
+ }
+
+ if (found == 0) {
+ ap_remove_output_filter(f);
+ r->status = HTTP_OK;
+ /* bsend is assumed to be empty if we get here. */
+ e = ap_bucket_error_create(HTTP_RANGE_NOT_SATISFIABLE, NULL,
+ r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ return ap_pass_brigade(f->next, bsend);
+ }
+
+ if (ctx->num_ranges > 1) {
+ char *end;
+
+ /* add the final boundary */
+ end = apr_pstrcat(r->pool, CRLF "--", ctx->boundary, "--" CRLF, NULL);
+ ap_xlate_proto_to_ascii(end, strlen(end));
+ e = apr_bucket_pool_create(end, strlen(end), r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ }
+
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+
+ /* we're done with the original content - all of our data is in bsend. */
+ apr_brigade_destroy(bb);
+
+ /* send our multipart output */
+ return ap_pass_brigade(f->next, bsend);
+}
+
+static int ap_set_byterange(request_rec *r)
+{
+ const char *range;
+ const char *if_range;
+ const char *match;
+ const char *ct;
+ int num_ranges;
+
+ if (r->assbackwards) {
+ return 0;
+ }
+
+ /* Check for Range request-header (HTTP/1.1) or Request-Range for
+ * backwards-compatibility with second-draft Luotonen/Franks
+ * byte-ranges (e.g. Netscape Navigator 2-3).
+ *
+ * We support this form, with Request-Range, and (farther down) we
+ * send multipart/x-byteranges instead of multipart/byteranges for
+ * Request-Range based requests to work around a bug in Netscape
+ * Navigator 2-3 and MSIE 3.
+ */
+
+ if (!(range = apr_table_get(r->headers_in, "Range"))) {
+ range = apr_table_get(r->headers_in, "Request-Range");
+ }
+
+ if (!range || strncasecmp(range, "bytes=", 6) || r->status != HTTP_OK) {
+ return 0;
+ }
+
+ /* is content already a single range? */
+ if (apr_table_get(r->headers_out, "Content-Range")) {
+ return 0;
+ }
+
+ /* is content already a multiple range? */
+ if ((ct = apr_table_get(r->headers_out, "Content-Type"))
+ && (!strncasecmp(ct, "multipart/byteranges", 20)
+ || !strncasecmp(ct, "multipart/x-byteranges", 22))) {
+ return 0;
+ }
+
+ /* Check the If-Range header for Etag or Date.
+ * Note that this check will return false (as required) if either
+ * of the two etags are weak.
+ */
+ if ((if_range = apr_table_get(r->headers_in, "If-Range"))) {
+ if (if_range[0] == '"') {
+ if (!(match = apr_table_get(r->headers_out, "Etag"))
+ || (strcmp(if_range, match) != 0)) {
+ return 0;
+ }
+ }
+ else if (!(match = apr_table_get(r->headers_out, "Last-Modified"))
+ || (strcmp(if_range, match) != 0)) {
+ return 0;
+ }
+ }
+
+ if (!ap_strchr_c(range, ',')) {
+ /* a single range */
+ num_ranges = 1;
+ }
+ else {
+ /* a multiple range */
+ num_ranges = 2;
+ }
+
+ r->status = HTTP_PARTIAL_CONTENT;
+ r->range = range + 6;
+
+ return num_ranges;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/http/http_protocol.lo b/rubbos/app/httpd-2.0.64/modules/http/http_protocol.lo
new file mode 100644
index 00000000..dc7313d2
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/http_protocol.lo
@@ -0,0 +1,12 @@
+# http_protocol.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/http_protocol.o'
+
+# Name of the non-PIC object.
+non_pic_object='http_protocol.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/http/http_protocol.o b/rubbos/app/httpd-2.0.64/modules/http/http_protocol.o
new file mode 100644
index 00000000..6c2d6acf
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/http_protocol.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/http/http_request.c b/rubbos/app/httpd-2.0.64/modules/http/http_request.c
new file mode 100644
index 00000000..c80816d2
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/http_request.c
@@ -0,0 +1,548 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_request.c: functions to get and process requests
+ *
+ * Rob McCool 3/21/93
+ *
+ * Thoroughly revamped by rst for Apache. NB this file reads
+ * best from the bottom up.
+ *
+ */
+
+#include "apr_strings.h"
+#include "apr_file_io.h"
+#include "apr_fnmatch.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#define CORE_PRIVATE
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "util_filter.h"
+#include "util_charset.h"
+
+#include "mod_core.h"
+#include "scoreboard.h"
+
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+
+/*****************************************************************
+ *
+ * Mainline request processing...
+ */
+
+/* XXX A cleaner and faster way to do this might be to pass the request_rec
+ * down the filter chain as a parameter. It would need to change for
+ * subrequest vs. main request filters; perhaps the subrequest filter could
+ * make the switch.
+ */
+static void update_r_in_filters(ap_filter_t *f,
+ request_rec *from,
+ request_rec *to)
+{
+ while (f) {
+ if (f->r == from) {
+ f->r = to;
+ }
+ f = f->next;
+ }
+}
+
+AP_DECLARE(void) ap_die(int type, request_rec *r)
+{
+ int error_index = ap_index_of_response(type);
+ char *custom_response = ap_response_code_string(r, error_index);
+ int recursive_error = 0;
+ request_rec *r_1st_err = r;
+
+ if (type == AP_FILTER_ERROR) {
+ return;
+ }
+
+ if (type == DONE) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+
+ /*
+ * The following takes care of Apache redirects to custom response URLs
+ * Note that if we are already dealing with the response to some other
+ * error condition, we just report on the original error, and give up on
+ * any attempt to handle the other thing "intelligently"...
+ */
+ if (r->status != HTTP_OK) {
+ recursive_error = type;
+
+ while (r_1st_err->prev && (r_1st_err->prev->status != HTTP_OK))
+ r_1st_err = r_1st_err->prev; /* Get back to original error */
+
+ if (r_1st_err != r) {
+ /* The recursive error was caused by an ErrorDocument specifying
+ * an internal redirect to a bad URI. ap_internal_redirect has
+ * changed the filter chains to point to the ErrorDocument's
+ * request_rec. Back out those changes so we can safely use the
+ * original failing request_rec to send the canned error message.
+ *
+ * ap_send_error_response gets rid of existing resource filters
+ * on the output side, so we can skip those.
+ */
+ update_r_in_filters(r_1st_err->proto_output_filters, r, r_1st_err);
+ update_r_in_filters(r_1st_err->input_filters, r, r_1st_err);
+ }
+
+ custom_response = NULL; /* Do NOT retry the custom thing! */
+ }
+
+ r->status = type;
+
+ /*
+ * This test is done here so that none of the auth modules needs to know
+ * about proxy authentication. They treat it like normal auth, and then
+ * we tweak the status.
+ */
+ if (HTTP_UNAUTHORIZED == r->status && PROXYREQ_PROXY == r->proxyreq) {
+ r->status = HTTP_PROXY_AUTHENTICATION_REQUIRED;
+ }
+
+ /* If we don't want to keep the connection, make sure we mark that the
+ * connection is not eligible for keepalive. If we want to keep the
+ * connection, be sure that the request body (if any) has been read.
+ */
+ if (ap_status_drops_connection(r->status)) {
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+
+ /*
+ * Two types of custom redirects --- plain text, and URLs. Plain text has
+ * a leading '"', so the URL code, here, is triggered on its absence
+ */
+
+ if (custom_response && custom_response[0] != '"') {
+
+ if (ap_is_url(custom_response)) {
+ /*
+ * The URL isn't local, so lets drop through the rest of this
+ * apache code, and continue with the usual REDIRECT handler.
+ * But note that the client will ultimately see the wrong
+ * status...
+ */
+ r->status = HTTP_MOVED_TEMPORARILY;
+ apr_table_setn(r->headers_out, "Location", custom_response);
+ }
+ else if (custom_response[0] == '/') {
+ const char *error_notes;
+ r->no_local_copy = 1; /* Do NOT send HTTP_NOT_MODIFIED for
+ * error documents! */
+ /*
+ * This redirect needs to be a GET no matter what the original
+ * method was.
+ */
+ apr_table_setn(r->subprocess_env, "REQUEST_METHOD", r->method);
+
+ /*
+ * Provide a special method for modules to communicate
+ * more informative (than the plain canned) messages to us.
+ * Propagate them to ErrorDocuments via the ERROR_NOTES variable:
+ */
+ if ((error_notes = apr_table_get(r->notes,
+ "error-notes")) != NULL) {
+ apr_table_setn(r->subprocess_env, "ERROR_NOTES", error_notes);
+ }
+ r->method = apr_pstrdup(r->pool, "GET");
+ r->method_number = M_GET;
+ ap_internal_redirect(custom_response, r);
+ return;
+ }
+ else {
+ /*
+ * Dumb user has given us a bad url to redirect to --- fake up
+ * dying with a recursive server error...
+ */
+ recursive_error = HTTP_INTERNAL_SERVER_ERROR;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid error redirection directive: %s",
+ custom_response);
+ }
+ }
+ ap_send_error_response(r_1st_err, recursive_error);
+}
+
+static void check_pipeline_flush(request_rec *r)
+{
+ conn_rec *c = r->connection;
+ /* ### if would be nice if we could PEEK without a brigade. that would
+ ### allow us to defer creation of the brigade to when we actually
+ ### need to send a FLUSH. */
+ apr_bucket_brigade *bb = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ /* Flush the filter contents if:
+ *
+ * 1) the connection will be closed
+ * 2) there isn't a request ready to be read
+ */
+ /* ### shouldn't this read from the connection input filters? */
+ /* ### is zero correct? that means "read one line" */
+ if (r->connection->keepalive == AP_CONN_CLOSE ||
+ ap_get_brigade(r->input_filters, bb, AP_MODE_EATCRLF,
+ APR_NONBLOCK_READ, 0) != APR_SUCCESS) {
+ apr_bucket *e = apr_bucket_flush_create(c->bucket_alloc);
+
+ /* We just send directly to the connection based filters. At
+ * this point, we know that we have seen all of the data
+ * (request finalization sent an EOS bucket, which empties all
+ * of the request filters). We just want to flush the buckets
+ * if something hasn't been sent to the network yet.
+ */
+ APR_BRIGADE_INSERT_HEAD(bb, e);
+ ap_pass_brigade(r->connection->output_filters, bb);
+ }
+}
+
+void ap_process_request(request_rec *r)
+{
+ int access_status;
+
+ /* Give quick handlers a shot at serving the request on the fast
+ * path, bypassing all of the other Apache hooks.
+ *
+ * This hook was added to enable serving files out of a URI keyed
+ * content cache ( e.g., Mike Abbott's Quick Shortcut Cache,
+ * described here: http://oss.sgi.com/projects/apache/mod_qsc.html )
+ *
+ * It may have other uses as well, such as routing requests directly to
+ * content handlers that have the ability to grok HTTP and do their
+ * own access checking, etc (e.g. servlet engines).
+ *
+ * Use this hook with extreme care and only if you know what you are
+ * doing.
+ */
+ if (ap_extended_status)
+ ap_time_process_request(r->connection->sbh, START_PREQUEST);
+ access_status = ap_run_quick_handler(r, 0); /* Not a look-up request */
+ if (access_status == DECLINED) {
+ access_status = ap_process_request_internal(r);
+ if (access_status == OK) {
+ access_status = ap_invoke_handler(r);
+ }
+ }
+
+ if (access_status == DONE) {
+ /* e.g., something not in storage like TRACE */
+ access_status = OK;
+ }
+
+ if (access_status == OK) {
+ ap_finalize_request_protocol(r);
+ }
+ else {
+ r->status = HTTP_OK;
+ ap_die(access_status, r);
+ }
+
+ /*
+ * We want to flush the last packet if this isn't a pipelining connection
+ * *before* we start into logging. Suppose that the logging causes a DNS
+ * lookup to occur, which may have a high latency. If we hold off on
+ * this packet, then it'll appear like the link is stalled when really
+ * it's the application that's stalled.
+ */
+ check_pipeline_flush(r);
+ ap_update_child_status(r->connection->sbh, SERVER_BUSY_LOG, r);
+ ap_run_log_transaction(r);
+ if (ap_extended_status)
+ ap_time_process_request(r->connection->sbh, STOP_PREQUEST);
+}
+
+static apr_table_t *rename_original_env(apr_pool_t *p, apr_table_t *t)
+{
+ const apr_array_header_t *env_arr = apr_table_elts(t);
+ const apr_table_entry_t *elts = (const apr_table_entry_t *) env_arr->elts;
+ apr_table_t *new = apr_table_make(p, env_arr->nalloc);
+ int i;
+
+ for (i = 0; i < env_arr->nelts; ++i) {
+ if (!elts[i].key)
+ continue;
+ apr_table_setn(new, apr_pstrcat(p, "REDIRECT_", elts[i].key, NULL),
+ elts[i].val);
+ }
+
+ return new;
+}
+
+static request_rec *internal_internal_redirect(const char *new_uri,
+ request_rec *r) {
+ int access_status;
+ request_rec *new;
+
+ if (ap_is_recursion_limit_exceeded(r)) {
+ ap_die(HTTP_INTERNAL_SERVER_ERROR, r);
+ return NULL;
+ }
+
+ new = (request_rec *) apr_pcalloc(r->pool, sizeof(request_rec));
+
+ new->connection = r->connection;
+ new->server = r->server;
+ new->pool = r->pool;
+
+ /*
+ * A whole lot of this really ought to be shared with http_protocol.c...
+ * another missing cleanup. It's particularly inappropriate to be
+ * setting header_only, etc., here.
+ */
+
+ new->method = r->method;
+ new->method_number = r->method_number;
+ new->allowed_methods = ap_make_method_list(new->pool, 2);
+ ap_parse_uri(new, new_uri);
+
+ new->request_config = ap_create_request_config(r->pool);
+
+ new->per_dir_config = r->server->lookup_defaults;
+
+ new->prev = r;
+ r->next = new;
+
+ /* Must have prev and next pointers set before calling create_request
+ * hook.
+ */
+ ap_run_create_request(new);
+
+ /* Inherit the rest of the protocol info... */
+
+ new->the_request = r->the_request;
+
+ new->allowed = r->allowed;
+
+ new->status = r->status;
+ new->assbackwards = r->assbackwards;
+ new->header_only = r->header_only;
+ new->protocol = r->protocol;
+ new->proto_num = r->proto_num;
+ new->hostname = r->hostname;
+ new->request_time = r->request_time;
+ new->main = r->main;
+
+ new->headers_in = r->headers_in;
+ new->headers_out = apr_table_make(r->pool, 12);
+ new->err_headers_out = r->err_headers_out;
+ new->subprocess_env = rename_original_env(r->pool, r->subprocess_env);
+ new->notes = apr_table_make(r->pool, 5);
+ new->allowed_methods = ap_make_method_list(new->pool, 2);
+
+ new->htaccess = r->htaccess;
+ new->no_cache = r->no_cache;
+ new->expecting_100 = r->expecting_100;
+ new->no_local_copy = r->no_local_copy;
+ new->read_length = r->read_length; /* We can only read it once */
+ new->vlist_validator = r->vlist_validator;
+
+ new->proto_output_filters = r->proto_output_filters;
+ new->proto_input_filters = r->proto_input_filters;
+
+ new->output_filters = new->proto_output_filters;
+ new->input_filters = new->proto_input_filters;
+
+ if (new->main) {
+ /* Add back the subrequest filter, which we lost when
+ * we set output_filters to include only the protocol
+ * output filters from the original request.
+ */
+ ap_add_output_filter_handle(ap_subreq_core_filter_handle,
+ NULL, new, new->connection);
+ }
+
+ update_r_in_filters(new->input_filters, r, new);
+ update_r_in_filters(new->output_filters, r, new);
+
+ apr_table_setn(new->subprocess_env, "REDIRECT_STATUS",
+ apr_itoa(r->pool, r->status));
+
+ /*
+ * XXX: hmm. This is because mod_setenvif and mod_unique_id really need
+ * to do their thing on internal redirects as well. Perhaps this is a
+ * misnamed function.
+ */
+ if ((access_status = ap_run_post_read_request(new))) {
+ ap_die(access_status, new);
+ return NULL;
+ }
+
+ return new;
+}
+
+/* XXX: Is this function is so bogus and fragile that we deep-6 it? */
+AP_DECLARE(void) ap_internal_fast_redirect(request_rec *rr, request_rec *r)
+{
+ /* We need to tell POOL_DEBUG that we're guaranteeing that rr->pool
+ * will exist as long as r->pool. Otherwise we run into troubles because
+ * some values in this request will be allocated in r->pool, and others in
+ * rr->pool.
+ */
+ apr_pool_join(r->pool, rr->pool);
+ r->proxyreq = rr->proxyreq;
+ r->no_cache = (r->no_cache && rr->no_cache);
+ r->no_local_copy = (r->no_local_copy && rr->no_local_copy);
+ r->mtime = rr->mtime;
+ r->uri = rr->uri;
+ r->filename = rr->filename;
+ r->canonical_filename = rr->canonical_filename;
+ r->path_info = rr->path_info;
+ r->args = rr->args;
+ r->finfo = rr->finfo;
+ r->handler = rr->handler;
+ ap_set_content_type(r, rr->content_type);
+ r->content_encoding = rr->content_encoding;
+ r->content_languages = rr->content_languages;
+ r->per_dir_config = rr->per_dir_config;
+ /* copy output headers from subrequest, but leave negotiation headers */
+ r->notes = apr_table_overlay(r->pool, rr->notes, r->notes);
+ r->headers_out = apr_table_overlay(r->pool, rr->headers_out,
+ r->headers_out);
+ r->err_headers_out = apr_table_overlay(r->pool, rr->err_headers_out,
+ r->err_headers_out);
+ r->subprocess_env = apr_table_overlay(r->pool, rr->subprocess_env,
+ r->subprocess_env);
+
+ r->output_filters = rr->output_filters;
+ r->input_filters = rr->input_filters;
+
+ if (r->main) {
+ ap_add_output_filter_handle(ap_subreq_core_filter_handle,
+ NULL, r, r->connection);
+ }
+ else if (r->output_filters->frec == ap_subreq_core_filter_handle) {
+ ap_remove_output_filter(r->output_filters);
+ r->output_filters = r->output_filters->next;
+ }
+
+ /* If any filters pointed at the now-defunct rr, we must point them
+ * at our "new" instance of r. In particular, some of rr's structures
+ * will now be bogus (say rr->headers_out). If a filter tried to modify
+ * their f->r structure when it is pointing to rr, the real request_rec
+ * will not get updated. Fix that here.
+ */
+ update_r_in_filters(r->input_filters, rr, r);
+ update_r_in_filters(r->output_filters, rr, r);
+}
+
+AP_DECLARE(void) ap_internal_redirect(const char *new_uri, request_rec *r)
+{
+ request_rec *new = internal_internal_redirect(new_uri, r);
+ int access_status;
+
+ /* ap_die was already called, if an error occured */
+ if (!new) {
+ return;
+ }
+
+ access_status = ap_process_request_internal(new);
+ if (access_status == OK) {
+ if ((access_status = ap_invoke_handler(new)) != 0) {
+ ap_die(access_status, new);
+ return;
+ }
+ ap_finalize_request_protocol(new);
+ }
+ else {
+ ap_die(access_status, new);
+ }
+}
+
+/* This function is designed for things like actions or CGI scripts, when
+ * using AddHandler, and you want to preserve the content type across
+ * an internal redirect.
+ */
+AP_DECLARE(void) ap_internal_redirect_handler(const char *new_uri, request_rec *r)
+{
+ int access_status;
+ request_rec *new = internal_internal_redirect(new_uri, r);
+
+ /* ap_die was already called, if an error occured */
+ if (!new) {
+ return;
+ }
+
+ if (r->handler)
+ ap_set_content_type(new, r->content_type);
+ access_status = ap_process_request_internal(new);
+ if (access_status == OK) {
+ if ((access_status = ap_invoke_handler(new)) != 0) {
+ ap_die(access_status, new);
+ return;
+ }
+ ap_finalize_request_protocol(new);
+ }
+ else {
+ ap_die(access_status, new);
+ }
+}
+
+AP_DECLARE(void) ap_allow_methods(request_rec *r, int reset, ...)
+{
+ const char *method;
+ va_list methods;
+
+ /*
+ * Get rid of any current settings if requested; not just the
+ * well-known methods but any extensions as well.
+ */
+ if (reset) {
+ ap_clear_method_list(r->allowed_methods);
+ }
+
+ va_start(methods, reset);
+ while ((method = va_arg(methods, const char *)) != NULL) {
+ ap_method_list_add(r->allowed_methods, method);
+ }
+ va_end(methods);
+}
+
+AP_DECLARE(void) ap_allow_standard_methods(request_rec *r, int reset, ...)
+{
+ int method;
+ va_list methods;
+ apr_int64_t mask;
+
+ /*
+ * Get rid of any current settings if requested; not just the
+ * well-known methods but any extensions as well.
+ */
+ if (reset) {
+ ap_clear_method_list(r->allowed_methods);
+ }
+
+ mask = 0;
+ va_start(methods, reset);
+ while ((method = va_arg(methods, int)) != -1) {
+ mask |= (AP_METHOD_BIT << method);
+ }
+ va_end(methods);
+
+ r->allowed_methods->method_mask |= mask;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/http/http_request.lo b/rubbos/app/httpd-2.0.64/modules/http/http_request.lo
new file mode 100644
index 00000000..678ea930
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/http_request.lo
@@ -0,0 +1,12 @@
+# http_request.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/http_request.o'
+
+# Name of the non-PIC object.
+non_pic_object='http_request.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/http/http_request.o b/rubbos/app/httpd-2.0.64/modules/http/http_request.o
new file mode 100644
index 00000000..c1a20105
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/http_request.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/http/mod_core.h b/rubbos/app/httpd-2.0.64/modules/http/mod_core.h
new file mode 100644
index 00000000..093f38d1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/mod_core.h
@@ -0,0 +1,80 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MOD_CORE_H
+#define MOD_CORE_H
+
+#include "apr.h"
+#include "apr_buckets.h"
+
+#include "httpd.h"
+#include "util_filter.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @package mod_core private header file
+ */
+
+/* Handles for core filters */
+extern AP_DECLARE_DATA ap_filter_rec_t *ap_http_input_filter_handle;
+extern AP_DECLARE_DATA ap_filter_rec_t *ap_http_header_filter_handle;
+extern AP_DECLARE_DATA ap_filter_rec_t *ap_chunk_filter_handle;
+extern AP_DECLARE_DATA ap_filter_rec_t *ap_byterange_filter_handle;
+
+/*
+ * These (input) filters are internal to the mod_core operation.
+ */
+apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
+ ap_input_mode_t mode, apr_read_type_e block,
+ apr_off_t readbytes);
+
+char *ap_response_code_string(request_rec *r, int error_index);
+
+/**
+ * Send the minimal part of an HTTP response header.
+ * @param r The current request
+ * @param bb The brigade to add the header to.
+ * @warning Modules should be very careful about using this, and should
+ * the default behavior. Much of the HTTP/1.1 implementation
+ * correctness depends on the full headers.
+ * @deffunc void ap_basic_http_header(request_rec *r, apr_bucket_brigade *bb)
+ */
+AP_DECLARE(void) ap_basic_http_header(request_rec *r, apr_bucket_brigade *bb);
+
+/**
+ * Send an appropriate response to an http TRACE request.
+ * @param r The current request
+ * @tip returns DONE or the HTTP status error if it handles the TRACE,
+ * or DECLINED if the request was not for TRACE.
+ * request method was not TRACE.
+ */
+AP_DECLARE_NONSTD(int) ap_send_http_trace(request_rec *r);
+
+/**
+ * Send an appropriate response to an http OPTIONS request.
+ * @param r The current request
+ */
+AP_DECLARE(int) ap_send_http_options(request_rec *r);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !MOD_CORE_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/http/mod_http.la b/rubbos/app/httpd-2.0.64/modules/http/mod_http.la
new file mode 100644
index 00000000..4f24a965
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/mod_http.la
@@ -0,0 +1,35 @@
+# mod_http.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_http.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_http.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/http/mod_mime.c b/rubbos/app/httpd-2.0.64/modules/http/mod_mime.c
new file mode 100644
index 00000000..214cd8bf
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/mod_mime.c
@@ -0,0 +1,987 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_mime.c: Sends/gets MIME headers for requests
+ *
+ * Rob McCool
+ *
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#include "apr_hash.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "http_request.h"
+#include "http_protocol.h"
+
+/* XXXX - fix me / EBCDIC
+ * there was a cludge here which would use its
+ * own version apr_isascii(). Indicating that
+ * on some platforms that might be needed.
+ *
+ * #define OS_ASC(c) (c) -- for mere mortals
+ * or
+ * #define OS_ASC(c) (ebcdic2ascii[c]) -- for dino's
+ *
+ * #define apr_isascii(c) ((OS_ASC(c) & 0x80) == 0)
+ */
+
+/* XXXXX - fix me - See note with NOT_PROXY
+ */
+
+typedef struct attrib_info {
+ char *name;
+ int offset;
+} attrib_info;
+
+/* Information to which an extension can be mapped
+ */
+typedef struct extension_info {
+ char *forced_type; /* Additional AddTyped stuff */
+ char *encoding_type; /* Added with AddEncoding... */
+ char *language_type; /* Added with AddLanguage... */
+ char *handler; /* Added with AddHandler... */
+ char *charset_type; /* Added with AddCharset... */
+ char *input_filters; /* Added with AddInputFilter... */
+ char *output_filters; /* Added with AddOutputFilter... */
+} extension_info;
+
+#define MULTIMATCH_UNSET 0
+#define MULTIMATCH_ANY 1
+#define MULTIMATCH_NEGOTIATED 2
+#define MULTIMATCH_HANDLERS 4
+#define MULTIMATCH_FILTERS 8
+
+typedef struct {
+ apr_hash_t *extension_mappings; /* Map from extension name to
+ * extension_info structure */
+
+ apr_array_header_t *remove_mappings; /* A simple list, walked once */
+
+ char *default_language; /* Language if no AddLanguage ext found */
+
+ int multimatch; /* Extensions to include in multiview matching
+ * for filenames, e.g. Filters and Handlers
+ */
+ int use_path_info; /* If set to 0, only use filename.
+ * If set to 1, append PATH_INFO to filename for
+ * lookups.
+ * If set to 2, this value is unset and is
+ * effectively 0.
+ */
+} mime_dir_config;
+
+typedef struct param_s {
+ char *attr;
+ char *val;
+ struct param_s *next;
+} param;
+
+typedef struct {
+ const char *type;
+ apr_size_t type_len;
+ const char *subtype;
+ apr_size_t subtype_len;
+ param *param;
+} content_type;
+
+static char tspecial[] = {
+ '(', ')', '<', '>', '@', ',', ';', ':',
+ '\\', '"', '/', '[', ']', '?', '=',
+ '\0'
+};
+
+module AP_MODULE_DECLARE_DATA mime_module;
+
+static void *create_mime_dir_config(apr_pool_t *p, char *dummy)
+{
+ mime_dir_config *new = apr_palloc(p, sizeof(mime_dir_config));
+
+ new->extension_mappings = NULL;
+ new->remove_mappings = NULL;
+
+ new->default_language = NULL;
+
+ new->multimatch = MULTIMATCH_UNSET;
+
+ new->use_path_info = 2;
+
+ return new;
+}
+/*
+ * Overlay one hash table of extension_mappings onto another
+ */
+static void *overlay_extension_mappings(apr_pool_t *p,
+ const void *key,
+ apr_ssize_t klen,
+ const void *overlay_val,
+ const void *base_val,
+ const void *data)
+{
+ extension_info *new_info = apr_palloc(p, sizeof(extension_info));
+ const extension_info *overlay_info = (const extension_info *)overlay_val;
+ const extension_info *base_info = (const extension_info *)base_val;
+
+ memcpy(new_info, base_info, sizeof(extension_info));
+ if (overlay_info->forced_type) {
+ new_info->forced_type = overlay_info->forced_type;
+ }
+ if (overlay_info->encoding_type) {
+ new_info->encoding_type = overlay_info->encoding_type;
+ }
+ if (overlay_info->language_type) {
+ new_info->language_type = overlay_info->language_type;
+ }
+ if (overlay_info->handler) {
+ new_info->handler = overlay_info->handler;
+ }
+ if (overlay_info->charset_type) {
+ new_info->charset_type = overlay_info->charset_type;
+ }
+ if (overlay_info->input_filters) {
+ new_info->input_filters = overlay_info->input_filters;
+ }
+ if (overlay_info->output_filters) {
+ new_info->output_filters = overlay_info->output_filters;
+ }
+
+ return new_info;
+}
+
+/* Member is the offset within an extension_info of the pointer to reset
+ */
+static void remove_items(apr_pool_t *p, apr_array_header_t *remove,
+ apr_hash_t *mappings)
+{
+ attrib_info *suffix = (attrib_info *) remove->elts;
+ int i;
+ for (i = 0; i < remove->nelts; i++) {
+ extension_info *exinfo = apr_hash_get(mappings,
+ suffix[i].name,
+ APR_HASH_KEY_STRING);
+ if (exinfo && *(const char**)((char *)exinfo + suffix[i].offset)) {
+ extension_info *copyinfo = exinfo;
+ exinfo = (extension_info*)apr_palloc(p, sizeof(*exinfo));
+ apr_hash_set(mappings, suffix[i].name,
+ APR_HASH_KEY_STRING, exinfo);
+ memcpy(exinfo, copyinfo, sizeof(*exinfo));
+ *(const char**)((char *)exinfo + suffix[i].offset) = NULL;
+ }
+ }
+}
+
+static void *merge_mime_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ mime_dir_config *base = (mime_dir_config *)basev;
+ mime_dir_config *add = (mime_dir_config *)addv;
+ mime_dir_config *new = apr_palloc(p, sizeof(mime_dir_config));
+
+ if (base->extension_mappings && add->extension_mappings) {
+ new->extension_mappings = apr_hash_merge(p, add->extension_mappings,
+ base->extension_mappings,
+ overlay_extension_mappings,
+ NULL);
+ }
+ else {
+ if (base->extension_mappings == NULL) {
+ new->extension_mappings = add->extension_mappings;
+ }
+ else {
+ new->extension_mappings = base->extension_mappings;
+ }
+ /* We may not be merging the tables, but if we potentially will change
+ * an exinfo member, then we are about to trounce it anyways.
+ * We must have a copy for safety.
+ */
+ if (new->extension_mappings && add->remove_mappings) {
+ new->extension_mappings =
+ apr_hash_copy(p, new->extension_mappings);
+ }
+ }
+
+ if (new->extension_mappings) {
+ if (add->remove_mappings)
+ remove_items(p, add->remove_mappings, new->extension_mappings);
+ }
+ new->remove_mappings = NULL;
+
+ new->default_language = add->default_language ?
+ add->default_language : base->default_language;
+
+ new->multimatch = (add->multimatch != MULTIMATCH_UNSET) ?
+ add->multimatch : base->multimatch;
+
+ if ((add->use_path_info & 2) == 0) {
+ new->use_path_info = add->use_path_info;
+ }
+ else {
+ new->use_path_info = base->use_path_info;
+ }
+
+ return new;
+}
+
+static const char *add_extension_info(cmd_parms *cmd, void *m_,
+ const char *value_, const char* ext)
+{
+ mime_dir_config *m=m_;
+ extension_info *exinfo;
+ int offset = (int) (long) cmd->info;
+ char *key = apr_pstrdup(cmd->temp_pool, ext);
+ char *value = apr_pstrdup(cmd->pool, value_);
+ ap_str_tolower(value);
+ ap_str_tolower(key);
+
+ if (*key == '.') {
+ ++key;
+ }
+ if (!m->extension_mappings) {
+ m->extension_mappings = apr_hash_make(cmd->pool);
+ exinfo = NULL;
+ }
+ else {
+ exinfo = (extension_info*)apr_hash_get(m->extension_mappings, key,
+ APR_HASH_KEY_STRING);
+ }
+ if (!exinfo) {
+ exinfo = apr_pcalloc(cmd->pool, sizeof(extension_info));
+ key = apr_pstrdup(cmd->pool, key);
+ apr_hash_set(m->extension_mappings, key, APR_HASH_KEY_STRING, exinfo);
+ }
+ *(const char**)((char *)exinfo + offset) = value;
+ return NULL;
+}
+
+/*
+ * Note handler names are un-added with each per_dir_config merge.
+ * This keeps the association from being inherited, but not
+ * from being re-added at a subordinate level.
+ */
+static const char *remove_extension_info(cmd_parms *cmd, void *m_,
+ const char *ext)
+{
+ mime_dir_config *m = (mime_dir_config *) m_;
+ attrib_info *suffix;
+ if (*ext == '.') {
+ ++ext;
+ }
+ if (!m->remove_mappings) {
+ m->remove_mappings = apr_array_make(cmd->pool, 4, sizeof(*suffix));
+ }
+ suffix = (attrib_info *)apr_array_push(m->remove_mappings);
+ suffix->name = apr_pstrdup(cmd->pool, ext);
+ ap_str_tolower(suffix->name);
+ suffix->offset = (int) (long) cmd->info;
+ return NULL;
+}
+
+/* The sole bit of server configuration that the MIME module has is
+ * the name of its config file, so...
+ */
+
+static const char *set_types_config(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ ap_set_module_config(cmd->server->module_config, &mime_module,
+ (void *)arg);
+ return NULL;
+}
+
+static const char *multiviews_match(cmd_parms *cmd, void *m_,
+ const char *include)
+{
+ mime_dir_config *m = (mime_dir_config *) m_;
+
+ if (strcasecmp(include, "Any") == 0) {
+ if (m->multimatch && (m->multimatch & ~MULTIMATCH_ANY)) {
+ return "Any is incompatible with NegotiatedOnly, "
+ "Filters and Handlers";
+ }
+ m->multimatch |= MULTIMATCH_ANY;
+ }
+ else if (strcasecmp(include, "NegotiatedOnly") == 0) {
+ if (m->multimatch && (m->multimatch & ~MULTIMATCH_NEGOTIATED)) {
+ return "Any is incompatible with NegotiatedOnly, "
+ "Filters and Handlers";
+ }
+ m->multimatch |= MULTIMATCH_NEGOTIATED;
+ }
+ else if (strcasecmp(include, "Filters") == 0) {
+ if (m->multimatch && (m->multimatch & (MULTIMATCH_NEGOTIATED
+ | MULTIMATCH_ANY))) {
+ return "Filters is incompatible with Any and NegotiatedOnly";
+ }
+ m->multimatch |= MULTIMATCH_FILTERS;
+ }
+ else if (strcasecmp(include, "Handlers") == 0) {
+ if (m->multimatch && (m->multimatch & (MULTIMATCH_NEGOTIATED
+ | MULTIMATCH_ANY))) {
+ return "Handlers is incompatible with Any and NegotiatedOnly";
+ }
+ m->multimatch |= MULTIMATCH_HANDLERS;
+ }
+ else {
+ return "Unrecognized option";
+ }
+
+ return NULL;
+}
+
+static const command_rec mime_cmds[] =
+{
+ AP_INIT_ITERATE2("AddCharset", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, charset_type), OR_FILEINFO,
+ "a charset (e.g., iso-2022-jp), followed by one or more "
+ "file extensions"),
+ AP_INIT_ITERATE2("AddEncoding", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, encoding_type), OR_FILEINFO,
+ "an encoding (e.g., gzip), followed by one or more file extensions"),
+ AP_INIT_ITERATE2("AddHandler", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, handler), OR_FILEINFO,
+ "a handler name followed by one or more file extensions"),
+ AP_INIT_ITERATE2("AddInputFilter", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, input_filters), OR_FILEINFO,
+ "input filter name (or ; delimited names) followed by one or "
+ "more file extensions"),
+ AP_INIT_ITERATE2("AddLanguage", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, language_type), OR_FILEINFO,
+ "a language (e.g., fr), followed by one or more file extensions"),
+ AP_INIT_ITERATE2("AddOutputFilter", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, output_filters), OR_FILEINFO,
+ "output filter name (or ; delimited names) followed by one or "
+ "more file extensions"),
+ AP_INIT_ITERATE2("AddType", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, forced_type), OR_FILEINFO,
+ "a mime type followed by one or more file extensions"),
+ AP_INIT_TAKE1("DefaultLanguage", ap_set_string_slot,
+ (void*)APR_OFFSETOF(mime_dir_config, default_language), OR_FILEINFO,
+ "language to use for documents with no other language file extension"),
+ AP_INIT_ITERATE("MultiviewsMatch", multiviews_match, NULL, OR_FILEINFO,
+ "NegotiatedOnly (default), Handlers and/or Filters, or Any"),
+ AP_INIT_ITERATE("RemoveCharset", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, charset_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveEncoding", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, encoding_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveHandler", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, handler), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveInputFilter", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, input_filters), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveLanguage", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, language_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveOutputFilter", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, output_filters), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveType", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, forced_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_TAKE1("TypesConfig", set_types_config, NULL, RSRC_CONF,
+ "the MIME types config file"),
+ AP_INIT_FLAG("ModMimeUsePathInfo", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mime_dir_config, use_path_info), ACCESS_CONF,
+ "Set to 'yes' to allow mod_mime to use path info for type checking"),
+ {NULL}
+};
+
+static apr_hash_t *mime_type_extensions;
+
+static int mime_post_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s)
+{
+ ap_configfile_t *f;
+ char l[MAX_STRING_LEN];
+ const char *types_confname = ap_get_module_config(s->module_config,
+ &mime_module);
+ apr_status_t status;
+
+ if (!types_confname) {
+ types_confname = AP_TYPES_CONFIG_FILE;
+ }
+
+ types_confname = ap_server_root_relative(p, types_confname);
+ if (!types_confname) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, APR_EBADPATH, s,
+ "Invalid mime types config path %s",
+ (const char *)ap_get_module_config(s->module_config,
+ &mime_module));
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ if ((status = ap_pcfg_openfile(&f, ptemp, types_confname))
+ != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, s,
+ "could not open mime types config file %s.",
+ types_confname);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ mime_type_extensions = apr_hash_make(p);
+
+ while (!(ap_cfg_getline(l, MAX_STRING_LEN, f))) {
+ const char *ll = l, *ct;
+
+ if (l[0] == '#') {
+ continue;
+ }
+ ct = ap_getword_conf(p, &ll);
+
+ while (ll[0]) {
+ char *ext = ap_getword_conf(p, &ll);
+ ap_str_tolower(ext);
+ apr_hash_set(mime_type_extensions, ext, APR_HASH_KEY_STRING, ct);
+ }
+ }
+ ap_cfg_closefile(f);
+ return OK;
+}
+
+static const char *zap_sp(const char *s)
+{
+ if (s == NULL) {
+ return (NULL);
+ }
+ if (*s == '\0') {
+ return (s);
+ }
+
+ /* skip prefixed white space */
+ for (; *s == ' ' || *s == '\t' || *s == '\n'; s++)
+ ;
+
+ return (s);
+}
+
+static char *zap_sp_and_dup(apr_pool_t *p, const char *start,
+ const char *end, apr_size_t *len)
+{
+ while ((start < end) && apr_isspace(*start)) {
+ start++;
+ }
+ while ((end > start) && apr_isspace(*(end - 1))) {
+ end--;
+ }
+ if (len) {
+ *len = end - start;
+ }
+ return apr_pstrmemdup(p, start, end - start);
+}
+
+static int is_token(char c)
+{
+ int res;
+
+ res = (apr_isascii(c) && apr_isgraph(c)
+ && (strchr(tspecial, c) == NULL)) ? 1 : -1;
+ return res;
+}
+
+static int is_qtext(char c)
+{
+ int res;
+
+ res = (apr_isascii(c) && (c != '"') && (c != '\\') && (c != '\n'))
+ ? 1 : -1;
+ return res;
+}
+
+static int is_quoted_pair(const char *s)
+{
+ int res = -1;
+ int c;
+
+ if (((s + 1) != NULL) && (*s == '\\')) {
+ c = (int) *(s + 1);
+ if (apr_isascii(c)) {
+ res = 1;
+ }
+ }
+ return (res);
+}
+
+static content_type *analyze_ct(request_rec *r, const char *s)
+{
+ const char *cp, *mp;
+ char *attribute, *value;
+ int quoted = 0;
+ server_rec * ss = r->server;
+ apr_pool_t * p = r->pool;
+
+ content_type *ctp;
+ param *pp, *npp;
+
+ /* initialize ctp */
+ ctp = (content_type *)apr_palloc(p, sizeof(content_type));
+ ctp->type = NULL;
+ ctp->subtype = NULL;
+ ctp->param = NULL;
+
+ mp = s;
+
+ /* getting a type */
+ cp = mp;
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+ if (!*cp) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss,
+ "mod_mime: analyze_ct: cannot get media type from '%s'",
+ (const char *) mp);
+ return (NULL);
+ }
+ ctp->type = cp;
+ do {
+ cp++;
+ } while (*cp && (*cp != '/') && !apr_isspace(*cp) && (*cp != ';'));
+ if (!*cp || (*cp == ';')) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss,
+ "Cannot get media type from '%s'",
+ (const char *) mp);
+ return (NULL);
+ }
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+ if (*cp != '/') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss,
+ "mod_mime: analyze_ct: cannot get media type from '%s'",
+ (const char *) mp);
+ return (NULL);
+ }
+ ctp->type_len = cp - ctp->type;
+
+ cp++; /* skip the '/' */
+
+ /* getting a subtype */
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+ if (!*cp) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss,
+ "Cannot get media subtype.");
+ return (NULL);
+ }
+ ctp->subtype = cp;
+ do {
+ cp++;
+ } while (*cp && !apr_isspace(*cp) && (*cp != ';'));
+ ctp->subtype_len = cp - ctp->subtype;
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+
+ if (*cp == '\0') {
+ return (ctp);
+ }
+
+ /* getting parameters */
+ cp++; /* skip the ';' */
+ cp = zap_sp(cp);
+ if (cp == NULL || *cp == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss,
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ mp = cp;
+ attribute = NULL;
+ value = NULL;
+
+ while (cp != NULL && *cp != '\0') {
+ if (attribute == NULL) {
+ if (is_token(*cp) > 0) {
+ cp++;
+ continue;
+ }
+ else if (*cp == ' ' || *cp == '\t' || *cp == '\n') {
+ cp++;
+ continue;
+ }
+ else if (*cp == '=') {
+ attribute = zap_sp_and_dup(p, mp, cp, NULL);
+ if (attribute == NULL || *attribute == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss,
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ cp++;
+ cp = zap_sp(cp);
+ if (cp == NULL || *cp == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss,
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ mp = cp;
+ continue;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss,
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ }
+ else {
+ if (mp == cp) {
+ if (*cp == '"') {
+ quoted = 1;
+ cp++;
+ }
+ else {
+ quoted = 0;
+ }
+ }
+ if (quoted > 0) {
+ while (quoted && *cp != '\0') {
+ if (is_qtext(*cp) > 0) {
+ cp++;
+ }
+ else if (is_quoted_pair(cp) > 0) {
+ cp += 2;
+ }
+ else if (*cp == '"') {
+ cp++;
+ while (*cp == ' ' || *cp == '\t' || *cp == '\n') {
+ cp++;
+ }
+ if (*cp != ';' && *cp != '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss,
+ "Cannot get media parameter.");
+ return(NULL);
+ }
+ quoted = 0;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss,
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ }
+ }
+ else {
+ while (1) {
+ if (is_token(*cp) > 0) {
+ cp++;
+ }
+ else if (*cp == '\0' || *cp == ';') {
+ break;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss,
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ }
+ }
+ value = zap_sp_and_dup(p, mp, cp, NULL);
+ if (value == NULL || *value == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss,
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+
+ pp = apr_palloc(p, sizeof(param));
+ pp->attr = attribute;
+ pp->val = value;
+ pp->next = NULL;
+
+ if (ctp->param == NULL) {
+ ctp->param = pp;
+ }
+ else {
+ npp = ctp->param;
+ while (npp->next) {
+ npp = npp->next;
+ }
+ npp->next = pp;
+ }
+ quoted = 0;
+ attribute = NULL;
+ value = NULL;
+ if (*cp == '\0') {
+ break;
+ }
+ cp++;
+ mp = cp;
+ }
+ }
+ return (ctp);
+}
+
+/*
+ * find_ct is the hook routine for determining content-type and other
+ * MIME-related metadata. It assumes that r->filename has already been
+ * set and stat has been called for r->finfo. It also assumes that the
+ * non-path base file name is not the empty string unless it is a dir.
+ */
+static int find_ct(request_rec *r)
+{
+ mime_dir_config *conf;
+ apr_array_header_t *exception_list;
+ char *ext;
+ const char *fn, *type, *charset = NULL, *resource_name;
+ int found_metadata = 0;
+
+ if (r->finfo.filetype == APR_DIR) {
+ ap_set_content_type(r, DIR_MAGIC_TYPE);
+ return OK;
+ }
+
+ if (!r->filename) {
+ return DECLINED;
+ }
+
+ conf = (mime_dir_config *)ap_get_module_config(r->per_dir_config,
+ &mime_module);
+ exception_list = apr_array_make(r->pool, 2, sizeof(char *));
+
+ /* If use_path_info is explicitly set to on (value & 1 == 1), append. */
+ if (conf->use_path_info & 1) {
+ resource_name = apr_pstrcat(r->pool, r->filename, r->path_info, NULL);
+ }
+ else {
+ resource_name = r->filename;
+ }
+
+ /* Always drop the path leading up to the file name.
+ */
+ if ((fn = ap_strrchr_c(resource_name, '/')) == NULL) {
+ fn = resource_name;
+ }
+ else {
+ ++fn;
+ }
+
+ /* The exception list keeps track of those filename components that
+ * are not associated with extensions indicating metadata.
+ * The base name is always the first exception (i.e., "txt.html" has
+ * a basename of "txt" even though it might look like an extension).
+ */
+ ext = ap_getword(r->pool, &fn, '.');
+ *((const char **)apr_array_push(exception_list)) = ext;
+
+ /* Parse filename extensions which can be in any order
+ */
+ while (*fn && (ext = ap_getword(r->pool, &fn, '.'))) {
+ const extension_info *exinfo = NULL;
+ int found;
+
+ if (*ext == '\0') { /* ignore empty extensions "bad..html" */
+ continue;
+ }
+
+ found = 0;
+
+ ap_str_tolower(ext);
+
+ if (conf->extension_mappings != NULL) {
+ exinfo = (extension_info*)apr_hash_get(conf->extension_mappings,
+ ext, APR_HASH_KEY_STRING);
+ }
+
+ if (exinfo == NULL || !exinfo->forced_type) {
+ if ((type = apr_hash_get(mime_type_extensions, ext,
+ APR_HASH_KEY_STRING)) != NULL) {
+ ap_set_content_type(r, (char*) type);
+ found = 1;
+ }
+ }
+
+ if (exinfo != NULL) {
+
+ if (exinfo->forced_type) {
+ ap_set_content_type(r, exinfo->forced_type);
+ found = 1;
+ }
+
+ if (exinfo->charset_type) {
+ charset = exinfo->charset_type;
+ found = 1;
+ }
+ if (exinfo->language_type) {
+ if (!r->content_languages) {
+ r->content_languages = apr_array_make(r->pool, 2,
+ sizeof(char *));
+ }
+ *((const char **)apr_array_push(r->content_languages))
+ = exinfo->language_type;
+ found = 1;
+ }
+ if (exinfo->encoding_type) {
+ if (!r->content_encoding) {
+ r->content_encoding = exinfo->encoding_type;
+ }
+ else {
+ /* XXX should eliminate duplicate entities */
+ r->content_encoding = apr_pstrcat(r->pool,
+ r->content_encoding,
+ ", ",
+ exinfo->encoding_type,
+ NULL);
+ }
+ found = 1;
+ }
+ /* The following extensions are not 'Found'. That is, they don't
+ * make any contribution to metadata negotation, so they must have
+ * been explicitly requested by name.
+ */
+ if (exinfo->handler && r->proxyreq == PROXYREQ_NONE) {
+ r->handler = exinfo->handler;
+ if (conf->multimatch & MULTIMATCH_HANDLERS) {
+ found = 1;
+ }
+ }
+ /* XXX Two significant problems; 1, we don't check to see if we are
+ * setting redundant filters. 2, we insert these in the types config
+ * hook, which may be too early (dunno.)
+ */
+ if (exinfo->input_filters && r->proxyreq == PROXYREQ_NONE) {
+ const char *filter, *filters = exinfo->input_filters;
+ while (*filters
+ && (filter = ap_getword(r->pool, &filters, ';'))) {
+ ap_add_input_filter(filter, NULL, r, r->connection);
+ }
+ if (conf->multimatch & MULTIMATCH_FILTERS) {
+ found = 1;
+ }
+ }
+ if (exinfo->output_filters && r->proxyreq == PROXYREQ_NONE) {
+ const char *filter, *filters = exinfo->output_filters;
+ while (*filters
+ && (filter = ap_getword(r->pool, &filters, ';'))) {
+ ap_add_output_filter(filter, NULL, r, r->connection);
+ }
+ if (conf->multimatch & MULTIMATCH_FILTERS) {
+ found = 1;
+ }
+ }
+ }
+
+ if (found || (conf->multimatch & MULTIMATCH_ANY)) {
+ found_metadata = 1;
+ }
+ else {
+ *((const char **) apr_array_push(exception_list)) = ext;
+ }
+ }
+
+ /*
+ * Need to set a notes entry on r for unrecognized elements.
+ * Somebody better claim them! If we did absolutely nothing,
+ * skip the notes to alert mod_negotiation we are clueless.
+ */
+ if (found_metadata) {
+ apr_table_setn(r->notes, "ap-mime-exceptions-list",
+ (void *)exception_list);
+ }
+
+ if (r->content_type) {
+ content_type *ctp;
+ int override = 0;
+
+ if ((ctp = analyze_ct(r, r->content_type))) {
+ param *pp = ctp->param;
+ char *base_content_type = apr_palloc(r->pool, ctp->type_len +
+ ctp->subtype_len +
+ sizeof("/"));
+ char *tmp = base_content_type;
+ memcpy(tmp, ctp->type, ctp->type_len);
+ tmp += ctp->type_len;
+ *tmp++ = '/';
+ memcpy(tmp, ctp->subtype, ctp->subtype_len);
+ tmp += ctp->subtype_len;
+ *tmp = 0;
+ ap_set_content_type(r, base_content_type);
+ while (pp != NULL) {
+ if (charset && !strcmp(pp->attr, "charset")) {
+ if (!override) {
+ ap_set_content_type(r,
+ apr_pstrcat(r->pool,
+ r->content_type,
+ "; charset=",
+ charset,
+ NULL));
+ override = 1;
+ }
+ }
+ else {
+ ap_set_content_type(r,
+ apr_pstrcat(r->pool,
+ r->content_type,
+ "; ", pp->attr,
+ "=", pp->val,
+ NULL));
+ }
+ pp = pp->next;
+ }
+ if (charset && !override) {
+ ap_set_content_type(r, apr_pstrcat(r->pool, r->content_type,
+ "; charset=", charset,
+ NULL));
+ }
+ }
+ }
+
+ /* Set default language, if none was specified by the extensions
+ * and we have a DefaultLanguage setting in force
+ */
+
+ if (!r->content_languages && conf->default_language) {
+ const char **new;
+
+ if (!r->content_languages) {
+ r->content_languages = apr_array_make(r->pool, 2, sizeof(char *));
+ }
+ new = (const char **)apr_array_push(r->content_languages);
+ *new = conf->default_language;
+ }
+
+ if (!r->content_type) {
+ return DECLINED;
+ }
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(mime_post_config,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_type_checker(find_ct,NULL,NULL,APR_HOOK_MIDDLE);
+ /*
+ * this hook seems redundant ... is there any reason a type checker isn't
+ * allowed to do this already? I'd think that fixups in general would be
+ * the last opportunity to get the filters right.
+ * ap_hook_insert_filter(mime_insert_filters,NULL,NULL,APR_HOOK_MIDDLE);
+ */
+}
+
+module AP_MODULE_DECLARE_DATA mime_module = {
+ STANDARD20_MODULE_STUFF,
+ create_mime_dir_config, /* create per-directory config structure */
+ merge_mime_dir_configs, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ mime_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/http/mod_mime.dsp b/rubbos/app/httpd-2.0.64/modules/http/mod_mime.dsp
new file mode 100644
index 00000000..2d50e032
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/mod_mime.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_mime" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_mime - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime.mak" CFG="mod_mime - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_mime - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_mime - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_mime_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_mime_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_mime - Win32 Release"
+# Name "mod_mime - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_mime.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_mime.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_mime.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_mime.so "mime_module for Apache" ../../include/ap_release.h > .\mod_mime.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_mime.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_mime.so "mime_module for Apache" ../../include/ap_release.h > .\mod_mime.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/http/mod_mime.exp b/rubbos/app/httpd-2.0.64/modules/http/mod_mime.exp
new file mode 100644
index 00000000..f2e38dbd
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/mod_mime.exp
@@ -0,0 +1 @@
+mime_module
diff --git a/rubbos/app/httpd-2.0.64/modules/http/mod_mime.la b/rubbos/app/httpd-2.0.64/modules/http/mod_mime.la
new file mode 100644
index 00000000..854bb02d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/mod_mime.la
@@ -0,0 +1,35 @@
+# mod_mime.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_mime.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_mime.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/http/mod_mime.lo b/rubbos/app/httpd-2.0.64/modules/http/mod_mime.lo
new file mode 100644
index 00000000..e64d8500
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/mod_mime.lo
@@ -0,0 +1,12 @@
+# mod_mime.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_mime.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_mime.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/http/mod_mime.o b/rubbos/app/httpd-2.0.64/modules/http/mod_mime.o
new file mode 100644
index 00000000..dae6c77b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/mod_mime.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/http/modules.mk b/rubbos/app/httpd-2.0.64/modules/http/modules.mk
new file mode 100644
index 00000000..a94da85b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/http/modules.mk
@@ -0,0 +1,7 @@
+mod_http.la: http_core.lo http_protocol.lo http_request.lo
+ $(MOD_LINK) http_core.lo http_protocol.lo http_request.lo $(MOD_HTTP_LDADD)
+mod_mime.la: mod_mime.lo
+ $(MOD_LINK) mod_mime.lo $(MOD_MIME_LDADD)
+DISTCLEAN_TARGETS = modules.mk
+static = mod_http.la mod_mime.la
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/.deps b/rubbos/app/httpd-2.0.64/modules/loggers/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/.indent.pro b/rubbos/app/httpd-2.0.64/modules/loggers/.indent.pro
new file mode 100644
index 00000000..a9fbe9f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/.libs/mod_log_config.a b/rubbos/app/httpd-2.0.64/modules/loggers/.libs/mod_log_config.a
new file mode 100644
index 00000000..a9e57678
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/.libs/mod_log_config.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/.libs/mod_log_config.la b/rubbos/app/httpd-2.0.64/modules/loggers/.libs/mod_log_config.la
new file mode 100644
index 00000000..e08ddd31
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/.libs/mod_log_config.la
@@ -0,0 +1,35 @@
+# mod_log_config.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_log_config.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_log_config.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/.libs/mod_log_config.o b/rubbos/app/httpd-2.0.64/modules/loggers/.libs/mod_log_config.o
new file mode 100644
index 00000000..55dc12bc
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/.libs/mod_log_config.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/Makefile b/rubbos/app/httpd-2.0.64/modules/loggers/Makefile
new file mode 100644
index 00000000..223684b2
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/loggers
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/loggers
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/loggers
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/Makefile.in b/rubbos/app/httpd-2.0.64/modules/loggers/Makefile.in
new file mode 100644
index 00000000..167b343d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/Makefile.in
@@ -0,0 +1,3 @@
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/NWGNUforensic b/rubbos/app/httpd-2.0.64/modules/loggers/NWGNUforensic
new file mode 100644
index 00000000..e4978550
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/NWGNUforensic
@@ -0,0 +1,257 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = forensic
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Forensic Logging Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Forensic Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/forensic.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_log_forensic.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ log_forensic_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/loggers/NWGNUmakefile
new file mode 100644
index 00000000..9c42c52f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/NWGNUmakefile
@@ -0,0 +1,247 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME =
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION =
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME =
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE =
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/modlogio.nlm \
+ $(OBJDIR)/forensic.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/NWGNUmodlogio b/rubbos/app/httpd-2.0.64/modules/loggers/NWGNUmodlogio
new file mode 100644
index 00000000..58ed2a47
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/NWGNUmodlogio
@@ -0,0 +1,257 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = logio
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) IO Logging Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Logio Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/modlogio.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_logio.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ logio_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/config.m4 b/rubbos/app/httpd-2.0.64/modules/loggers/config.m4
new file mode 100644
index 00000000..8efc14c3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/config.m4
@@ -0,0 +1,18 @@
+dnl modules enabled in this directory by default
+
+dnl APACHE_MODULE(name, helptext[, objects[, structname[, default[, config]]]])
+
+APACHE_MODPATH_INIT(loggers)
+
+APACHE_MODULE(log_config, logging configuration, , , yes)
+
+APACHE_MODULE(log_forensic, forensic logging)
+
+if test "x$enable_log_forensic" != "xno"; then
+ # mod_log_forensic needs test_char.h
+ APR_ADDTO(INCLUDES, [-I\$(top_builddir)/server])
+fi
+
+APACHE_MODULE(logio, input and output logging, , , no)
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.c b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.c
new file mode 100644
index 00000000..2bfdf348
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.c
@@ -0,0 +1,1519 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Modified by djm@va.pubnix.com:
+ * If no TransferLog is given explicitly, decline to log.
+ *
+ * This is module implements the TransferLog directive (same as the
+ * common log module), and additional directives, LogFormat and CustomLog.
+ *
+ *
+ * Syntax:
+ *
+ * TransferLog fn Logs transfers to fn in standard log format, unless
+ * a custom format is set with LogFormat
+ * LogFormat format Set a log format from TransferLog files
+ * CustomLog fn format
+ * Log to file fn with format given by the format
+ * argument
+ *
+ * CookieLog fn For backwards compatability with old Cookie
+ * logging module - now deprecated.
+ *
+ * There can be any number of TransferLog and CustomLog
+ * commands. Each request will be logged to _ALL_ the
+ * named files, in the appropriate format.
+ *
+ * If no TransferLog or CustomLog directive appears in a VirtualHost,
+ * the request will be logged to the log file(s) defined outside
+ * the virtual host section. If a TransferLog or CustomLog directive
+ * appears in the VirtualHost section, the log files defined outside
+ * the VirtualHost will _not_ be used. This makes this module compatable
+ * with the CLF and config log modules, where the use of TransferLog
+ * inside the VirtualHost section overrides its use outside.
+ *
+ * Examples:
+ *
+ * TransferLog logs/access_log
+ * <VirtualHost>
+ * LogFormat "... custom format ..."
+ * TransferLog log/virtual_only
+ * CustomLog log/virtual_useragents "%t %{user-agent}i"
+ * </VirtualHost>
+ *
+ * This will log using CLF to access_log any requests handled by the
+ * main server, while any requests to the virtual host will be logged
+ * with the "... custom format..." to virtual_only _AND_ using
+ * the custom user-agent log to virtual_useragents.
+ *
+ * Note that the NCSA referer and user-agent logs are easily added with
+ * CustomLog:
+ * CustomLog logs/referer "%{referer}i -> %U"
+ * CustomLog logs/agent "%{user-agent}i"
+ *
+ * RefererIgnore functionality can be obtained with conditional
+ * logging (SetEnvIf and CustomLog ... env=!VAR).
+ *
+ * But using this method allows much easier modification of the
+ * log format, e.g. to log hosts along with UA:
+ * CustomLog logs/referer "%{referer}i %U %h"
+ *
+ * The argument to LogFormat and CustomLog is a string, which can include
+ * literal characters copied into the log files, and '%' directives as
+ * follows:
+ *
+ * %...B: bytes sent, excluding HTTP headers.
+ * %...b: bytes sent, excluding HTTP headers in CLF format, i.e. a '-'
+ * when no bytes where sent (rather than a '0'.
+ * %...{FOOBAR}C: The contents of the HTTP cookie FOOBAR
+ * %...{FOOBAR}e: The contents of the environment variable FOOBAR
+ * %...f: filename
+ * %...h: remote host
+ * %...a: remote IP-address
+ * %...A: local IP-address
+ * %...{Foobar}i: The contents of Foobar: header line(s) in the request
+ * sent to the client.
+ * %...l: remote logname (from identd, if supplied)
+ * %...{Foobar}n: The contents of note "Foobar" from another module.
+ * %...{Foobar}o: The contents of Foobar: header line(s) in the reply.
+ * %...p: the port the request was served to
+ * %...P: the process ID of the child that serviced the request.
+ * %...{format}P: the process ID or thread ID of the child/thread that
+ * serviced the request
+ * %...r: first line of request
+ * %...s: status. For requests that got internally redirected, this
+ * is status of the *original* request --- %...>s for the last.
+ * %...t: time, in common log format time format
+ * %...{format}t: The time, in the form given by format, which should
+ * be in strftime(3) format.
+ * %...T: the time taken to serve the request, in seconds.
+ * %...D: the time taken to serve the request, in micro seconds.
+ * %...u: remote user (from auth; may be bogus if return status (%s) is 401)
+ * %...U: the URL path requested.
+ * %...v: the configured name of the server (i.e. which virtual host?)
+ * %...V: the server name according to the UseCanonicalName setting
+ * %...m: the request method
+ * %...H: the request protocol
+ * %...q: the query string prepended by "?", or empty if no query string
+ * %...X: Status of the connection.
+ * 'X' = connection aborted before the response completed.
+ * '+' = connection may be kept alive after the response is sent.
+ * '-' = connection will be closed after the response is sent.
+ (This directive was %...c in late versions of Apache 1.3, but
+ this conflicted with the historical ssl %...{var}c syntax.)
+*
+ * The '...' can be nothing at all (e.g. "%h %u %r %s %b"), or it can
+ * indicate conditions for inclusion of the item (which will cause it
+ * to be replaced with '-' if the condition is not met). Note that
+ * there is no escaping performed on the strings from %r, %...i and
+ * %...o; some with long memories may remember that I thought this was
+ * a bad idea, once upon a time, and I'm still not comfortable with
+ * it, but it is difficult to see how to "do the right thing" with all
+ * of '%..i', unless we URL-escape everything and break with CLF.
+ *
+ * The forms of condition are a list of HTTP status codes, which may
+ * or may not be preceded by '!'. Thus, '%400,501{User-agent}i' logs
+ * User-agent: on 400 errors and 501 errors (Bad Request, Not
+ * Implemented) only; '%!200,304,302{Referer}i' logs Referer: on all
+ * requests which did *not* return some sort of normal status.
+ *
+ * The default LogFormat reproduces CLF; see below.
+ *
+ * The way this is supposed to work with virtual hosts is as follows:
+ * a virtual host can have its own LogFormat, or its own TransferLog.
+ * If it doesn't have its own LogFormat, it inherits from the main
+ * server. If it doesn't have its own TransferLog, it writes to the
+ * same descriptor (meaning the same process for "| ...").
+ *
+ * --- rst */
+
+#include "apr_strings.h"
+#include "apr_lib.h"
+#include "apr_hash.h"
+#include "apr_optional.h"
+#include "apr_anylock.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "mod_log_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h" /* For REMOTE_NAME */
+#include "http_log.h"
+#include "http_protocol.h"
+#include "util_time.h"
+#include "ap_mpm.h"
+
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef HAVE_LIMITS_H
+#include <limits.h>
+#endif
+
+#define DEFAULT_LOG_FORMAT "%h %l %u %t \"%r\" %>s %b"
+
+module AP_MODULE_DECLARE_DATA log_config_module;
+
+#ifndef APR_LARGEFILE
+#define APR_LARGEFILE 0
+#endif
+
+static int xfer_flags = (APR_WRITE | APR_APPEND | APR_CREATE | APR_LARGEFILE);
+static apr_fileperms_t xfer_perms = APR_OS_DEFAULT;
+static apr_hash_t *log_hash;
+static apr_status_t ap_default_log_writer(request_rec *r,
+ void *handle,
+ const char **strs,
+ int *strl,
+ int nelts,
+ apr_size_t len);
+static apr_status_t ap_buffered_log_writer(request_rec *r,
+ void *handle,
+ const char **strs,
+ int *strl,
+ int nelts,
+ apr_size_t len);
+static void *ap_default_log_writer_init(apr_pool_t *p, server_rec *s,
+ const char* name);
+static void *ap_buffered_log_writer_init(apr_pool_t *p, server_rec *s,
+ const char* name);
+
+static void ap_log_set_writer_init(ap_log_writer_init *handle);
+static void ap_log_set_writer(ap_log_writer *handle);
+static ap_log_writer *log_writer = ap_default_log_writer;
+static ap_log_writer_init *log_writer_init = ap_default_log_writer_init;
+static int buffered_logs = 0; /* default unbuffered */
+static apr_array_header_t *all_buffered_logs = NULL;
+
+/* POSIX.1 defines PIPE_BUF as the maximum number of bytes that is
+ * guaranteed to be atomic when writing a pipe. And PIPE_BUF >= 512
+ * is guaranteed. So we'll just guess 512 in the event the system
+ * doesn't have this. Now, for file writes there is actually no limit,
+ * the entire write is atomic. Whether all systems implement this
+ * correctly is another question entirely ... so we'll just use PIPE_BUF
+ * because it's probably a good guess as to what is implemented correctly
+ * everywhere.
+ */
+#ifdef PIPE_BUF
+#define LOG_BUFSIZE PIPE_BUF
+#else
+#define LOG_BUFSIZE (512)
+#endif
+
+/*
+ * multi_log_state is our per-(virtual)-server configuration. We store
+ * an array of the logs we are going to use, each of type config_log_state.
+ * If a default log format is given by LogFormat, store in default_format
+ * (backward compat. with mod_log_config). We also store for each virtual
+ * server a pointer to the logs specified for the main server, so that if this
+ * vhost has no logs defined, we can use the main server's logs instead.
+ *
+ * So, for the main server, config_logs contains a list of the log files
+ * and server_config_logs is empty. For a vhost, server_config_logs
+ * points to the same array as config_logs in the main server, and
+ * config_logs points to the array of logs defined inside this vhost,
+ * which might be empty.
+ */
+
+typedef struct {
+ const char *default_format_string;
+ apr_array_header_t *default_format;
+ apr_array_header_t *config_logs;
+ apr_array_header_t *server_config_logs;
+ apr_table_t *formats;
+} multi_log_state;
+
+/*
+ * config_log_state holds the status of a single log file. fname might
+ * be NULL, which means this module does no logging for this
+ * request. format might be NULL, in which case the default_format
+ * from the multi_log_state should be used, or if that is NULL as
+ * well, use the CLF.
+ * log_writer is NULL before the log file is opened and is
+ * set to a opaque structure (usually a fd) after it is opened.
+
+ */
+typedef struct {
+ apr_file_t *handle;
+ apr_size_t outcnt;
+ char outbuf[LOG_BUFSIZE];
+ apr_anylock_t mutex;
+} buffered_log;
+
+typedef struct {
+ const char *fname;
+ const char *format_string;
+ apr_array_header_t *format;
+ void *log_writer;
+ char *condition_var;
+} config_log_state;
+
+/*
+ * Format items...
+ * Note that many of these could have ap_sprintfs replaced with static buffers.
+ */
+
+typedef struct {
+ ap_log_handler_fn_t *func;
+ char *arg;
+ int condition_sense;
+ int want_orig;
+ apr_array_header_t *conditions;
+} log_format_item;
+
+static char *format_integer(apr_pool_t *p, int i)
+{
+ return apr_itoa(p, i);
+}
+
+static char *pfmt(apr_pool_t *p, int i)
+{
+ if (i <= 0) {
+ return "-";
+ }
+ else {
+ return format_integer(p, i);
+ }
+}
+
+static const char *constant_item(request_rec *dummy, char *stuff)
+{
+ return stuff;
+}
+
+static const char *log_remote_host(request_rec *r, char *a)
+{
+ return ap_escape_logitem(r->pool, ap_get_remote_host(r->connection,
+ r->per_dir_config,
+ REMOTE_NAME, NULL));
+}
+
+static const char *log_remote_address(request_rec *r, char *a)
+{
+ return r->connection->remote_ip;
+}
+
+static const char *log_local_address(request_rec *r, char *a)
+{
+ return r->connection->local_ip;
+}
+
+static const char *log_remote_logname(request_rec *r, char *a)
+{
+ return ap_escape_logitem(r->pool, ap_get_remote_logname(r));
+}
+
+static const char *log_remote_user(request_rec *r, char *a)
+{
+ char *rvalue = r->user;
+
+ if (rvalue == NULL) {
+ rvalue = "-";
+ }
+ else if (strlen(rvalue) == 0) {
+ rvalue = "\"\"";
+ }
+ else {
+ rvalue = ap_escape_logitem(r->pool, rvalue);
+ }
+
+ return rvalue;
+}
+
+static const char *log_request_line(request_rec *r, char *a)
+{
+ /* NOTE: If the original request contained a password, we
+ * re-write the request line here to contain XXXXXX instead:
+ * (note the truncation before the protocol string for HTTP/0.9 requests)
+ * (note also that r->the_request contains the unmodified request)
+ */
+ return ap_escape_logitem(r->pool,
+ (r->parsed_uri.password)
+ ? apr_pstrcat(r->pool, r->method, " ",
+ apr_uri_unparse(r->pool,
+ &r->parsed_uri, 0),
+ r->assbackwards ? NULL : " ",
+ r->protocol, NULL)
+ : r->the_request);
+}
+
+static const char *log_request_file(request_rec *r, char *a)
+{
+ return ap_escape_logitem(r->pool, r->filename);
+}
+static const char *log_request_uri(request_rec *r, char *a)
+{
+ return ap_escape_logitem(r->pool, r->uri);
+}
+static const char *log_request_method(request_rec *r, char *a)
+{
+ return ap_escape_logitem(r->pool, r->method);
+}
+static const char *log_request_protocol(request_rec *r, char *a)
+{
+ return ap_escape_logitem(r->pool, r->protocol);
+}
+static const char *log_request_query(request_rec *r, char *a)
+{
+ return (r->args) ? apr_pstrcat(r->pool, "?",
+ ap_escape_logitem(r->pool, r->args), NULL)
+ : "";
+}
+static const char *log_status(request_rec *r, char *a)
+{
+ return pfmt(r->pool, r->status);
+}
+
+static const char *clf_log_bytes_sent(request_rec *r, char *a)
+{
+ if (!r->sent_bodyct || !r->bytes_sent) {
+ return "-";
+ }
+ else {
+ return apr_off_t_toa(r->pool, r->bytes_sent);
+ }
+}
+
+static const char *log_bytes_sent(request_rec *r, char *a)
+{
+ if (!r->sent_bodyct || !r->bytes_sent) {
+ return "0";
+ }
+ else {
+ return apr_psprintf(r->pool, "%" APR_OFF_T_FMT, r->bytes_sent);
+ }
+}
+
+
+static const char *log_header_in(request_rec *r, char *a)
+{
+ return ap_escape_logitem(r->pool, apr_table_get(r->headers_in, a));
+}
+
+static APR_INLINE char *find_multiple_headers(apr_pool_t *pool,
+ const apr_table_t *table,
+ const char *key)
+{
+ const apr_array_header_t *elts;
+ const apr_table_entry_t *t_elt;
+ const apr_table_entry_t *t_end;
+ apr_size_t len;
+ struct sle {
+ struct sle *next;
+ const char *value;
+ apr_size_t len;
+ } *result_list, *rp;
+
+ elts = apr_table_elts(table);
+
+ if (!elts->nelts) {
+ return NULL;
+ }
+
+ t_elt = (const apr_table_entry_t *)elts->elts;
+ t_end = t_elt + elts->nelts;
+ len = 1; /* \0 */
+ result_list = rp = NULL;
+
+ do {
+ if (!strcasecmp(t_elt->key, key)) {
+ if (!result_list) {
+ result_list = rp = apr_palloc(pool, sizeof(*rp));
+ }
+ else {
+ rp = rp->next = apr_palloc(pool, sizeof(*rp));
+ len += 2; /* ", " */
+ }
+
+ rp->next = NULL;
+ rp->value = t_elt->val;
+ rp->len = strlen(rp->value);
+
+ len += rp->len;
+ }
+ ++t_elt;
+ } while (t_elt < t_end);
+
+ if (result_list) {
+ char *result = apr_palloc(pool, len);
+ char *cp = result;
+
+ rp = result_list;
+ while (rp) {
+ if (rp != result_list) {
+ *cp++ = ',';
+ *cp++ = ' ';
+ }
+ memcpy(cp, rp->value, rp->len);
+ cp += rp->len;
+ rp = rp->next;
+ }
+ *cp = '\0';
+
+ return result;
+ }
+
+ return NULL;
+}
+
+static const char *log_header_out(request_rec *r, char *a)
+{
+ const char *cp = NULL;
+
+ if (!strcasecmp(a, "Content-type") && r->content_type) {
+ cp = ap_field_noparam(r->pool, r->content_type);
+ }
+ else if (!strcasecmp(a, "Set-Cookie")) {
+ cp = find_multiple_headers(r->pool, r->headers_out, a);
+ }
+ else {
+ cp = apr_table_get(r->headers_out, a);
+ }
+
+ return ap_escape_logitem(r->pool, cp);
+}
+
+static const char *log_note(request_rec *r, char *a)
+{
+ return ap_escape_logitem(r->pool, apr_table_get(r->notes, a));
+}
+static const char *log_env_var(request_rec *r, char *a)
+{
+ return ap_escape_logitem(r->pool, apr_table_get(r->subprocess_env, a));
+}
+
+static const char *log_cookie(request_rec *r, char *a)
+{
+ const char *cookies;
+ const char *start_cookie;
+
+ if ((cookies = apr_table_get(r->headers_in, "Cookie"))) {
+ if ((start_cookie = ap_strstr_c(cookies,a))) {
+ char *cookie, *end_cookie;
+ start_cookie += strlen(a) + 1; /* cookie_name + '=' */
+ cookie = apr_pstrdup(r->pool, start_cookie);
+ /* kill everything in cookie after ';' */
+ end_cookie = strchr(cookie, ';');
+ if (end_cookie) {
+ *end_cookie = '\0';
+ }
+ return ap_escape_logitem(r->pool, cookie);
+ }
+ }
+ return NULL;
+}
+
+static const char *log_request_time_custom(request_rec *r, char *a,
+ apr_time_exp_t *xt)
+{
+ apr_size_t retcode;
+ char tstr[MAX_STRING_LEN];
+ apr_strftime(tstr, &retcode, sizeof(tstr), a, xt);
+ return apr_pstrdup(r->pool, tstr);
+}
+
+#define DEFAULT_REQUEST_TIME_SIZE 32
+typedef struct {
+ unsigned t;
+ char timestr[DEFAULT_REQUEST_TIME_SIZE];
+ unsigned t_validate;
+} cached_request_time;
+
+#define TIME_CACHE_SIZE 4
+#define TIME_CACHE_MASK 3
+static cached_request_time request_time_cache[TIME_CACHE_SIZE];
+
+static const char *log_request_time(request_rec *r, char *a)
+{
+ apr_time_exp_t xt;
+
+ /* ### I think getting the time again at the end of the request
+ * just for logging is dumb. i know it's "required" for CLF.
+ * folks writing log parsing tools don't realise that out of order
+ * times have always been possible (consider what happens if one
+ * process calculates the time to log, but then there's a context
+ * switch before it writes and before that process is run again the
+ * log rotation occurs) and they should just fix their tools rather
+ * than force the server to pay extra cpu cycles. if you've got
+ * a problem with this, you can set the define. -djg
+ */
+ if (a && *a) { /* Custom format */
+ /* The custom time formatting uses a very large temp buffer
+ * on the stack. To avoid using so much stack space in the
+ * common case where we're not using a custom format, the code
+ * for the custom format in a separate function. (That's why
+ * log_request_time_custom is not inlined right here.)
+ */
+#ifdef I_INSIST_ON_EXTRA_CYCLES_FOR_CLF_COMPLIANCE
+ ap_explode_recent_localtime(&xt, apr_time_now());
+#else
+ ap_explode_recent_localtime(&xt, r->request_time);
+#endif
+ return log_request_time_custom(r, a, &xt);
+ }
+ else { /* CLF format */
+ /* This code uses the same technique as ap_explode_recent_localtime():
+ * optimistic caching with logic to detect and correct race conditions.
+ * See the comments in server/util_time.c for more information.
+ */
+ cached_request_time* cached_time = apr_palloc(r->pool,
+ sizeof(*cached_time));
+#ifdef I_INSIST_ON_EXTRA_CYCLES_FOR_CLF_COMPLIANCE
+ apr_time_t request_time = apr_time_now();
+#else
+ apr_time_t request_time = r->request_time;
+#endif
+ unsigned t_seconds = (unsigned)apr_time_sec(request_time);
+ unsigned i = t_seconds & TIME_CACHE_MASK;
+ memcpy(cached_time, &(request_time_cache[i]), sizeof(*cached_time));
+ if ((t_seconds != cached_time->t) ||
+ (t_seconds != cached_time->t_validate)) {
+
+ /* Invalid or old snapshot, so compute the proper time string
+ * and store it in the cache
+ */
+ char sign;
+ int timz;
+
+ ap_explode_recent_localtime(&xt, request_time);
+ timz = xt.tm_gmtoff;
+ if (timz < 0) {
+ timz = -timz;
+ sign = '-';
+ }
+ else {
+ sign = '+';
+ }
+ cached_time->t = t_seconds;
+ apr_snprintf(cached_time->timestr, DEFAULT_REQUEST_TIME_SIZE,
+ "[%02d/%s/%d:%02d:%02d:%02d %c%.2d%.2d]",
+ xt.tm_mday, apr_month_snames[xt.tm_mon],
+ xt.tm_year+1900, xt.tm_hour, xt.tm_min, xt.tm_sec,
+ sign, timz / (60*60), (timz % (60*60)) / 60);
+ cached_time->t_validate = t_seconds;
+ memcpy(&(request_time_cache[i]), cached_time,
+ sizeof(*cached_time));
+ }
+ return cached_time->timestr;
+ }
+}
+
+static const char *log_request_duration(request_rec *r, char *a)
+{
+ apr_time_t duration = apr_time_now() - r->request_time;
+ return apr_psprintf(r->pool, "%" APR_TIME_T_FMT, apr_time_sec(duration));
+}
+
+static const char *log_request_duration_microseconds(request_rec *r, char *a)
+{
+ return apr_psprintf(r->pool, "%" APR_TIME_T_FMT,
+ (apr_time_now() - r->request_time));
+}
+
+/* These next two routines use the canonical name:port so that log
+ * parsers don't need to duplicate all the vhost parsing crud.
+ */
+static const char *log_virtual_host(request_rec *r, char *a)
+{
+ return ap_escape_logitem(r->pool, r->server->server_hostname);
+}
+
+static const char *log_server_port(request_rec *r, char *a)
+{
+ return apr_psprintf(r->pool, "%u",
+ r->server->port ? r->server->port : ap_default_port(r));
+}
+
+/* This respects the setting of UseCanonicalName so that
+ * the dynamic mass virtual hosting trick works better.
+ */
+static const char *log_server_name(request_rec *r, char *a)
+{
+ return ap_escape_logitem(r->pool, ap_get_server_name(r));
+}
+
+static const char *log_pid_tid(request_rec *r, char *a)
+{
+ if (*a == '\0' || !strcmp(a, "pid")) {
+ return apr_psprintf(r->pool, "%" APR_PID_T_FMT, getpid());
+ }
+ else if (!strcmp(a, "tid")) {
+#if APR_HAS_THREADS
+ apr_os_thread_t tid = apr_os_thread_current();
+#else
+ int tid = 0; /* APR will format "0" anyway but an arg is needed */
+#endif
+ return apr_psprintf(r->pool, "%pT", &tid);
+ }
+ /* bogus format */
+ return a;
+}
+
+static const char *log_connection_status(request_rec *r, char *a)
+{
+ if (r->connection->aborted)
+ return "X";
+
+ if (r->connection->keepalive == AP_CONN_KEEPALIVE &&
+ (!r->server->keep_alive_max ||
+ (r->server->keep_alive_max - r->connection->keepalives) > 0)) {
+ return "+";
+ }
+ return "-";
+}
+
+/*****************************************************************
+ *
+ * Parsing the log format string
+ */
+
+static char *parse_log_misc_string(apr_pool_t *p, log_format_item *it,
+ const char **sa)
+{
+ const char *s;
+ char *d;
+
+ it->func = constant_item;
+ it->conditions = NULL;
+
+ s = *sa;
+ while (*s && *s != '%') {
+ s++;
+ }
+ /*
+ * This might allocate a few chars extra if there's a backslash
+ * escape in the format string.
+ */
+ it->arg = apr_palloc(p, s - *sa + 1);
+
+ d = it->arg;
+ s = *sa;
+ while (*s && *s != '%') {
+ if (*s != '\\') {
+ *d++ = *s++;
+ }
+ else {
+ s++;
+ switch (*s) {
+ case '\\':
+ *d++ = '\\';
+ s++;
+ break;
+ case 'r':
+ *d++ = '\r';
+ s++;
+ break;
+ case 'n':
+ *d++ = '\n';
+ s++;
+ break;
+ case 't':
+ *d++ = '\t';
+ s++;
+ break;
+ default:
+ /* copy verbatim */
+ *d++ = '\\';
+ /*
+ * Allow the loop to deal with this *s in the normal
+ * fashion so that it handles end of string etc.
+ * properly.
+ */
+ break;
+ }
+ }
+ }
+ *d = '\0';
+
+ *sa = s;
+ return NULL;
+}
+
+static char *parse_log_item(apr_pool_t *p, log_format_item *it, const char **sa)
+{
+ const char *s = *sa;
+ ap_log_handler *handler;
+
+ if (*s != '%') {
+ return parse_log_misc_string(p, it, sa);
+ }
+
+ ++s;
+ it->condition_sense = 0;
+ it->conditions = NULL;
+
+ if (*s == '%') {
+ it->arg = "%";
+ it->func = constant_item;
+ *sa = ++s;
+
+ return NULL;
+ }
+
+ it->want_orig = -1;
+ it->arg = ""; /* For safety's sake... */
+
+ while (*s) {
+ int i;
+
+ switch (*s) {
+ case '!':
+ ++s;
+ it->condition_sense = !it->condition_sense;
+ break;
+
+ case '<':
+ ++s;
+ it->want_orig = 1;
+ break;
+
+ case '>':
+ ++s;
+ it->want_orig = 0;
+ break;
+
+ case ',':
+ ++s;
+ break;
+
+ case '{':
+ ++s;
+ it->arg = ap_getword(p, &s, '}');
+ break;
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ i = *s - '0';
+ while (apr_isdigit(*++s)) {
+ i = i * 10 + (*s) - '0';
+ }
+ if (!it->conditions) {
+ it->conditions = apr_array_make(p, 4, sizeof(int));
+ }
+ *(int *) apr_array_push(it->conditions) = i;
+ break;
+
+ default:
+ handler = (ap_log_handler *)apr_hash_get(log_hash, s++, 1);
+ if (!handler) {
+ char dummy[2];
+
+ dummy[0] = s[-1];
+ dummy[1] = '\0';
+ return apr_pstrcat(p, "Unrecognized LogFormat directive %",
+ dummy, NULL);
+ }
+ it->func = handler->func;
+ if (it->want_orig == -1) {
+ it->want_orig = handler->want_orig_default;
+ }
+ *sa = s;
+ return NULL;
+ }
+ }
+
+ return "Ran off end of LogFormat parsing args to some directive";
+}
+
+static apr_array_header_t *parse_log_string(apr_pool_t *p, const char *s, const char **err)
+{
+ apr_array_header_t *a = apr_array_make(p, 30, sizeof(log_format_item));
+ char *res;
+
+ while (*s) {
+ if ((res = parse_log_item(p, (log_format_item *) apr_array_push(a), &s))) {
+ *err = res;
+ return NULL;
+ }
+ }
+
+ s = APR_EOL_STR;
+ parse_log_item(p, (log_format_item *) apr_array_push(a), &s);
+ return a;
+}
+
+/*****************************************************************
+ *
+ * Actually logging.
+ */
+
+static const char *process_item(request_rec *r, request_rec *orig,
+ log_format_item *item)
+{
+ const char *cp;
+
+ /* First, see if we need to process this thing at all... */
+
+ if (item->conditions && item->conditions->nelts != 0) {
+ int i;
+ int *conds = (int *) item->conditions->elts;
+ int in_list = 0;
+
+ for (i = 0; i < item->conditions->nelts; ++i) {
+ if (r->status == conds[i]) {
+ in_list = 1;
+ break;
+ }
+ }
+
+ if ((item->condition_sense && in_list)
+ || (!item->condition_sense && !in_list)) {
+ return "-";
+ }
+ }
+
+ /* We do. Do it... */
+
+ cp = (*item->func) (item->want_orig ? orig : r, item->arg);
+ return cp ? cp : "-";
+}
+
+static void flush_log(buffered_log *buf)
+{
+ if (buf->outcnt && buf->handle != NULL) {
+ apr_file_write(buf->handle, buf->outbuf, &buf->outcnt);
+ buf->outcnt = 0;
+ }
+}
+
+
+static int config_log_transaction(request_rec *r, config_log_state *cls,
+ apr_array_header_t *default_format)
+{
+ log_format_item *items;
+ const char **strs;
+ int *strl;
+ request_rec *orig;
+ int i;
+ apr_size_t len = 0;
+ apr_array_header_t *format;
+ char *envar;
+ apr_status_t rv;
+
+ if (cls->fname == NULL) {
+ return DECLINED;
+ }
+
+ /*
+ * See if we've got any conditional envariable-controlled logging decisions
+ * to make.
+ */
+ if (cls->condition_var != NULL) {
+ envar = cls->condition_var;
+ if (*envar != '!') {
+ if (apr_table_get(r->subprocess_env, envar) == NULL) {
+ return DECLINED;
+ }
+ }
+ else {
+ if (apr_table_get(r->subprocess_env, &envar[1]) != NULL) {
+ return DECLINED;
+ }
+ }
+ }
+
+ format = cls->format ? cls->format : default_format;
+
+ strs = apr_palloc(r->pool, sizeof(char *) * (format->nelts));
+ strl = apr_palloc(r->pool, sizeof(int) * (format->nelts));
+ items = (log_format_item *) format->elts;
+
+ orig = r;
+ while (orig->prev) {
+ orig = orig->prev;
+ }
+ while (r->next) {
+ r = r->next;
+ }
+
+ for (i = 0; i < format->nelts; ++i) {
+ strs[i] = process_item(r, orig, &items[i]);
+ }
+
+ for (i = 0; i < format->nelts; ++i) {
+ len += strl[i] = strlen(strs[i]);
+ }
+ if (!log_writer) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, r,
+ "log writer isn't correctly setup");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ rv = log_writer(r, cls->log_writer, strs, strl, format->nelts, len);
+ /* xxx: do we return an error on log_writer? */
+ return OK;
+}
+
+static int multi_log_transaction(request_rec *r)
+{
+ multi_log_state *mls = ap_get_module_config(r->server->module_config,
+ &log_config_module);
+ config_log_state *clsarray;
+ int i;
+
+ /*
+ * Log this transaction..
+ */
+ if (mls->config_logs->nelts) {
+ clsarray = (config_log_state *) mls->config_logs->elts;
+ for (i = 0; i < mls->config_logs->nelts; ++i) {
+ config_log_state *cls = &clsarray[i];
+
+ config_log_transaction(r, cls, mls->default_format);
+ }
+ }
+ else if (mls->server_config_logs) {
+ clsarray = (config_log_state *) mls->server_config_logs->elts;
+ for (i = 0; i < mls->server_config_logs->nelts; ++i) {
+ config_log_state *cls = &clsarray[i];
+
+ config_log_transaction(r, cls, mls->default_format);
+ }
+ }
+
+ return OK;
+}
+
+/*****************************************************************
+ *
+ * Module glue...
+ */
+
+static void *make_config_log_state(apr_pool_t *p, server_rec *s)
+{
+ multi_log_state *mls;
+
+ mls = (multi_log_state *) apr_palloc(p, sizeof(multi_log_state));
+ mls->config_logs = apr_array_make(p, 1, sizeof(config_log_state));
+ mls->default_format_string = NULL;
+ mls->default_format = NULL;
+ mls->server_config_logs = NULL;
+ mls->formats = apr_table_make(p, 4);
+ apr_table_setn(mls->formats, "CLF", DEFAULT_LOG_FORMAT);
+
+ return mls;
+}
+
+/*
+ * Use the merger to simply add a pointer from the vhost log state
+ * to the log of logs specified for the non-vhost configuration. Make sure
+ * vhosts inherit any globally-defined format names.
+ */
+
+static void *merge_config_log_state(apr_pool_t *p, void *basev, void *addv)
+{
+ multi_log_state *base = (multi_log_state *) basev;
+ multi_log_state *add = (multi_log_state *) addv;
+
+ add->server_config_logs = base->config_logs;
+ if (!add->default_format) {
+ add->default_format_string = base->default_format_string;
+ add->default_format = base->default_format;
+ }
+ add->formats = apr_table_overlay(p, base->formats, add->formats);
+
+ return add;
+}
+
+/*
+ * Set the default logfile format, or define a nickname for a format string.
+ */
+static const char *log_format(cmd_parms *cmd, void *dummy, const char *fmt,
+ const char *name)
+{
+ const char *err_string = NULL;
+ multi_log_state *mls = ap_get_module_config(cmd->server->module_config,
+ &log_config_module);
+
+ /*
+ * If we were given two arguments, the second is a name to be given to the
+ * format. This syntax just defines the nickname - it doesn't actually
+ * make the format the default.
+ */
+ if (name != NULL) {
+ parse_log_string(cmd->pool, fmt, &err_string);
+ if (err_string == NULL) {
+ apr_table_setn(mls->formats, name, fmt);
+ }
+ }
+ else {
+ mls->default_format_string = fmt;
+ mls->default_format = parse_log_string(cmd->pool, fmt, &err_string);
+ }
+ return err_string;
+}
+
+
+static const char *add_custom_log(cmd_parms *cmd, void *dummy, const char *fn,
+ const char *fmt, const char *envclause)
+{
+ const char *err_string = NULL;
+ multi_log_state *mls = ap_get_module_config(cmd->server->module_config,
+ &log_config_module);
+ config_log_state *cls;
+
+ cls = (config_log_state *) apr_array_push(mls->config_logs);
+ cls->condition_var = NULL;
+ if (envclause != NULL) {
+ if (strncasecmp(envclause, "env=", 4) != 0) {
+ return "error in condition clause";
+ }
+ if ((envclause[4] == '\0')
+ || ((envclause[4] == '!') && (envclause[5] == '\0'))) {
+ return "missing environment variable name";
+ }
+ cls->condition_var = apr_pstrdup(cmd->pool, &envclause[4]);
+ }
+
+ cls->fname = fn;
+ cls->format_string = fmt;
+ if (fmt == NULL) {
+ cls->format = NULL;
+ }
+ else {
+ cls->format = parse_log_string(cmd->pool, fmt, &err_string);
+ }
+ cls->log_writer = NULL;
+
+ return err_string;
+}
+
+static const char *set_transfer_log(cmd_parms *cmd, void *dummy,
+ const char *fn)
+{
+ return add_custom_log(cmd, dummy, fn, NULL, NULL);
+}
+
+static const char *set_cookie_log(cmd_parms *cmd, void *dummy, const char *fn)
+{
+ return add_custom_log(cmd, dummy, fn, "%{Cookie}n \"%r\" %t", NULL);
+}
+
+static const char *set_buffered_logs_on(cmd_parms *parms, void *dummy, int flag)
+{
+ buffered_logs = flag;
+ if (buffered_logs) {
+ ap_log_set_writer_init(ap_buffered_log_writer_init);
+ ap_log_set_writer(ap_buffered_log_writer);
+ }
+ return NULL;
+}
+static const command_rec config_log_cmds[] =
+{
+AP_INIT_TAKE23("CustomLog", add_custom_log, NULL, RSRC_CONF,
+ "a file name, a custom log format string or format name, "
+ "and an optional \"env=\" clause (see docs)"),
+AP_INIT_TAKE1("TransferLog", set_transfer_log, NULL, RSRC_CONF,
+ "the filename of the access log"),
+AP_INIT_TAKE12("LogFormat", log_format, NULL, RSRC_CONF,
+ "a log format string (see docs) and an optional format name"),
+AP_INIT_TAKE1("CookieLog", set_cookie_log, NULL, RSRC_CONF,
+ "the filename of the cookie log"),
+AP_INIT_FLAG("BufferedLogs", set_buffered_logs_on, NULL, RSRC_CONF,
+ "Enable Buffered Logging (experimental)"),
+ {NULL}
+};
+
+static config_log_state *open_config_log(server_rec *s, apr_pool_t *p,
+ config_log_state *cls,
+ apr_array_header_t *default_format)
+{
+ if (cls->log_writer != NULL) {
+ return cls; /* virtual config shared w/main server */
+ }
+
+ if (cls->fname == NULL) {
+ return cls; /* Leave it NULL to decline. */
+ }
+
+ cls->log_writer = log_writer_init(p, s, cls->fname);
+ if (cls->log_writer == NULL)
+ return NULL;
+
+ return cls;
+}
+
+static int open_multi_logs(server_rec *s, apr_pool_t *p)
+{
+ int i;
+ multi_log_state *mls = ap_get_module_config(s->module_config,
+ &log_config_module);
+ config_log_state *clsarray;
+ const char *dummy;
+ const char *format;
+
+ if (mls->default_format_string) {
+ format = apr_table_get(mls->formats, mls->default_format_string);
+ if (format) {
+ mls->default_format = parse_log_string(p, format, &dummy);
+ }
+ }
+
+ if (!mls->default_format) {
+ mls->default_format = parse_log_string(p, DEFAULT_LOG_FORMAT, &dummy);
+ }
+
+ if (mls->config_logs->nelts) {
+ clsarray = (config_log_state *) mls->config_logs->elts;
+ for (i = 0; i < mls->config_logs->nelts; ++i) {
+ config_log_state *cls = &clsarray[i];
+
+ if (cls->format_string) {
+ format = apr_table_get(mls->formats, cls->format_string);
+ if (format) {
+ cls->format = parse_log_string(p, format, &dummy);
+ }
+ }
+
+ if (!open_config_log(s, p, cls, mls->default_format)) {
+ /* Failure already logged by open_config_log */
+ return DONE;
+ }
+ }
+ }
+ else if (mls->server_config_logs) {
+ clsarray = (config_log_state *) mls->server_config_logs->elts;
+ for (i = 0; i < mls->server_config_logs->nelts; ++i) {
+ config_log_state *cls = &clsarray[i];
+
+ if (cls->format_string) {
+ format = apr_table_get(mls->formats, cls->format_string);
+ if (format) {
+ cls->format = parse_log_string(p, format, &dummy);
+ }
+ }
+
+ if (!open_config_log(s, p, cls, mls->default_format)) {
+ /* Failure already logged by open_config_log */
+ return DONE;
+ }
+ }
+ }
+
+ return OK;
+}
+
+
+static apr_status_t flush_all_logs(void *data)
+{
+ server_rec *s = data;
+ multi_log_state *mls;
+ apr_array_header_t *log_list;
+ config_log_state *clsarray;
+ buffered_log *buf;
+ int i;
+
+ if (!buffered_logs)
+ return APR_SUCCESS;
+
+ for (; s; s = s->next) {
+ mls = ap_get_module_config(s->module_config, &log_config_module);
+ log_list = NULL;
+ if (mls->config_logs->nelts) {
+ log_list = mls->config_logs;
+ }
+ else if (mls->server_config_logs) {
+ log_list = mls->server_config_logs;
+ }
+ if (log_list) {
+ clsarray = (config_log_state *) log_list->elts;
+ for (i = 0; i < log_list->nelts; ++i) {
+ buf = clsarray[i].log_writer;
+ flush_log(buf);
+ }
+ }
+ }
+ return APR_SUCCESS;
+}
+
+
+static int init_config_log(apr_pool_t *pc, apr_pool_t *p, apr_pool_t *pt, server_rec *s)
+{
+ int res;
+
+ /* First init the buffered logs array, which is needed when opening the logs. */
+ if (buffered_logs) {
+ all_buffered_logs = apr_array_make(p, 5, sizeof(buffered_log *));
+ }
+
+ /* Next, do "physical" server, which gets default log fd and format
+ * for the virtual servers, if they don't override...
+ */
+ res = open_multi_logs(s, p);
+
+ /* Then, virtual servers */
+
+ for (s = s->next; (res == OK) && s; s = s->next) {
+ res = open_multi_logs(s, p);
+ }
+
+ return res;
+}
+
+static void init_child(apr_pool_t *p, server_rec *s)
+{
+ int mpm_threads;
+
+ ap_mpm_query(AP_MPMQ_MAX_THREADS, &mpm_threads);
+
+ /* Now register the last buffer flush with the cleanup engine */
+ if (buffered_logs) {
+ int i;
+ buffered_log **array = (buffered_log **)all_buffered_logs->elts;
+
+ apr_pool_cleanup_register(p, s, flush_all_logs, flush_all_logs);
+
+ for (i = 0; i < all_buffered_logs->nelts; i++) {
+ buffered_log *this = array[i];
+
+#if APR_HAS_THREADS
+ if (mpm_threads > 1) {
+ apr_status_t rv;
+
+ this->mutex.type = apr_anylock_threadmutex;
+ rv = apr_thread_mutex_create(&this->mutex.lock.tm,
+ APR_THREAD_MUTEX_DEFAULT,
+ p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s,
+ "could not initialize buffered log mutex, "
+ "transfer log may become corrupted");
+ this->mutex.type = apr_anylock_none;
+ }
+ }
+ else
+#endif
+ {
+ this->mutex.type = apr_anylock_none;
+ }
+ }
+ }
+}
+
+static void ap_register_log_handler(apr_pool_t *p, char *tag,
+ ap_log_handler_fn_t *handler, int def)
+{
+ ap_log_handler *log_struct = apr_palloc(p, sizeof(*log_struct));
+ log_struct->func = handler;
+ log_struct->want_orig_default = def;
+
+ apr_hash_set(log_hash, tag, 1, (const void *)log_struct);
+}
+static void ap_log_set_writer_init(ap_log_writer_init *handle)
+{
+ log_writer_init = handle;
+
+}
+static void ap_log_set_writer(ap_log_writer *handle)
+{
+ log_writer = handle;
+}
+
+static apr_status_t ap_default_log_writer( request_rec *r,
+ void *handle,
+ const char **strs,
+ int *strl,
+ int nelts,
+ apr_size_t len)
+
+{
+ char *str;
+ char *s;
+ int i;
+ apr_status_t rv;
+
+ str = apr_palloc(r->pool, len + 1);
+
+ for (i = 0, s = str; i < nelts; ++i) {
+ memcpy(s, strs[i], strl[i]);
+ s += strl[i];
+ }
+
+ rv = apr_file_write((apr_file_t*)handle, str, &len);
+
+ return rv;
+}
+static void *ap_default_log_writer_init(apr_pool_t *p, server_rec *s,
+ const char* name)
+{
+ if (*name == '|') {
+ piped_log *pl;
+
+ pl = ap_open_piped_log(p, name + 1);
+ if (pl == NULL) {
+ return NULL;;
+ }
+ return ap_piped_log_write_fd(pl);
+ }
+ else {
+ const char *fname = ap_server_root_relative(p, name);
+ apr_file_t *fd;
+ apr_status_t rv;
+
+ if (!fname) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, APR_EBADPATH, s,
+ "invalid transfer log path %s.", name);
+ return NULL;
+ }
+ rv = apr_file_open(&fd, fname, xfer_flags, xfer_perms, p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "could not open transfer log file %s.", fname);
+ return NULL;
+ }
+ return fd;
+ }
+}
+static void *ap_buffered_log_writer_init(apr_pool_t *p, server_rec *s,
+ const char* name)
+{
+ buffered_log *b;
+ b = apr_pcalloc(p, sizeof(buffered_log));
+ b->handle = ap_default_log_writer_init(p, s, name);
+
+ if (b->handle) {
+ *(buffered_log **)apr_array_push(all_buffered_logs) = b;
+ return b;
+ }
+ else
+ return NULL;
+}
+static apr_status_t ap_buffered_log_writer(request_rec *r,
+ void *handle,
+ const char **strs,
+ int *strl,
+ int nelts,
+ apr_size_t len)
+
+{
+ char *str;
+ char *s;
+ int i;
+ apr_status_t rv;
+ buffered_log *buf = (buffered_log*)handle;
+
+ if ((rv = APR_ANYLOCK_LOCK(&buf->mutex)) != APR_SUCCESS) {
+ return rv;
+ }
+
+ if (len + buf->outcnt > LOG_BUFSIZE) {
+ flush_log(buf);
+ }
+ if (len >= LOG_BUFSIZE) {
+ apr_size_t w;
+
+ str = apr_palloc(r->pool, len + 1);
+ for (i = 0, s = str; i < nelts; ++i) {
+ memcpy(s, strs[i], strl[i]);
+ s += strl[i];
+ }
+ w = len;
+ rv = apr_file_write(buf->handle, str, &w);
+
+ }
+ else {
+ for (i = 0, s = &buf->outbuf[buf->outcnt]; i < nelts; ++i) {
+ memcpy(s, strs[i], strl[i]);
+ s += strl[i];
+ }
+ buf->outcnt += len;
+ rv = APR_SUCCESS;
+ }
+
+ APR_ANYLOCK_UNLOCK(&buf->mutex);
+ return rv;
+}
+
+static int log_pre_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp)
+{
+ static APR_OPTIONAL_FN_TYPE(ap_register_log_handler) *log_pfn_register;
+
+ log_pfn_register = APR_RETRIEVE_OPTIONAL_FN(ap_register_log_handler);
+
+ if (log_pfn_register) {
+ log_pfn_register(p, "h", log_remote_host, 0);
+ log_pfn_register(p, "a", log_remote_address, 0 );
+ log_pfn_register(p, "A", log_local_address, 0 );
+ log_pfn_register(p, "l", log_remote_logname, 0);
+ log_pfn_register(p, "u", log_remote_user, 0);
+ log_pfn_register(p, "t", log_request_time, 0);
+ log_pfn_register(p, "f", log_request_file, 0);
+ log_pfn_register(p, "b", clf_log_bytes_sent, 0);
+ log_pfn_register(p, "B", log_bytes_sent, 0);
+ log_pfn_register(p, "i", log_header_in, 0);
+ log_pfn_register(p, "o", log_header_out, 0);
+ log_pfn_register(p, "n", log_note, 0);
+ log_pfn_register(p, "e", log_env_var, 0);
+ log_pfn_register(p, "V", log_server_name, 0);
+ log_pfn_register(p, "v", log_virtual_host, 0);
+ log_pfn_register(p, "p", log_server_port, 0);
+ log_pfn_register(p, "P", log_pid_tid, 0);
+ log_pfn_register(p, "H", log_request_protocol, 0);
+ log_pfn_register(p, "m", log_request_method, 0);
+ log_pfn_register(p, "q", log_request_query, 0);
+ log_pfn_register(p, "X", log_connection_status, 0);
+ log_pfn_register(p, "C", log_cookie, 0);
+ log_pfn_register(p, "r", log_request_line, 1);
+ log_pfn_register(p, "D", log_request_duration_microseconds, 1);
+ log_pfn_register(p, "T", log_request_duration, 1);
+ log_pfn_register(p, "U", log_request_uri, 1);
+ log_pfn_register(p, "s", log_status, 1);
+ }
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_pre_config(log_pre_config,NULL,NULL,APR_HOOK_REALLY_FIRST);
+ ap_hook_child_init(init_child,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_open_logs(init_config_log,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_log_transaction(multi_log_transaction,NULL,NULL,APR_HOOK_MIDDLE);
+
+ /* Init log_hash before we register the optional function. It is
+ * possible for the optional function, ap_register_log_handler,
+ * to be called before any other mod_log_config hooks are called.
+ * As a policy, we should init everything required by an optional function
+ * before calling APR_REGISTER_OPTIONAL_FN.
+ */
+ log_hash = apr_hash_make(p);
+ APR_REGISTER_OPTIONAL_FN(ap_register_log_handler);
+ APR_REGISTER_OPTIONAL_FN(ap_log_set_writer_init);
+ APR_REGISTER_OPTIONAL_FN(ap_log_set_writer);
+}
+
+module AP_MODULE_DECLARE_DATA log_config_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-dir config */
+ NULL, /* merge per-dir config */
+ make_config_log_state, /* server config */
+ merge_config_log_state, /* merge server config */
+ config_log_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
+
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.dsp b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.dsp
new file mode 100644
index 00000000..dcd522ea
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_log_config" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_log_config - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_log_config.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_log_config.mak" CFG="mod_log_config - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_log_config - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_log_config - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_log_config - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_log_config_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_log_config.so" /base:@..\..\os\win32\BaseAddr.ref,mod_log_config.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_log_config.so" /base:@..\..\os\win32\BaseAddr.ref,mod_log_config.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_log_config - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_log_config_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_log_config.so" /base:@..\..\os\win32\BaseAddr.ref,mod_log_config.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_log_config.so" /base:@..\..\os\win32\BaseAddr.ref,mod_log_config.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_log_config - Win32 Release"
+# Name "mod_log_config - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_log_config.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_log_config.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_log_config - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_log_config.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_log_config.so "log_config_module for Apache" ../../include/ap_release.h > .\mod_log_config.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_log_config - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_log_config.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_log_config.so "log_config_module for Apache" ../../include/ap_release.h > .\mod_log_config.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.exp b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.exp
new file mode 100644
index 00000000..0749e527
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.exp
@@ -0,0 +1 @@
+log_config_module
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.h b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.h
new file mode 100644
index 00000000..00e79014
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.h
@@ -0,0 +1,63 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_optional.h"
+#include "httpd.h"
+#include "scoreboard.h"
+
+#ifndef _MOD_LOG_CONFIG_H
+#define _MOD_LOG_CONFIG_H 1
+
+/**
+ * callback function prototype for a external log handler
+ */
+typedef const char *ap_log_handler_fn_t(request_rec *r, char *a);
+
+/**
+ * callback function prototype for external writer initialization.
+ */
+typedef void *ap_log_writer_init(apr_pool_t *p, server_rec *s,
+ const char *name);
+/**
+ * callback which gets called where there is a log line to write.
+ */
+typedef apr_status_t ap_log_writer(
+ request_rec *r,
+ void *handle,
+ const char **portions,
+ int *lengths,
+ int nelts,
+ apr_size_t len);
+
+typedef struct ap_log_handler {
+ ap_log_handler_fn_t *func;
+ int want_orig_default;
+} ap_log_handler;
+
+APR_DECLARE_OPTIONAL_FN(void, ap_register_log_handler,
+ (apr_pool_t *p, char *tag, ap_log_handler_fn_t *func,
+ int def));
+/**
+ * you will need to set your init handler *BEFORE* the open_logs
+ * in mod_log_config gets executed
+ */
+APR_DECLARE_OPTIONAL_FN(void, ap_log_set_writer_init,(ap_log_writer_init *func));
+/**
+ * you should probably set the writer at the same time (ie..before open_logs)
+ */
+APR_DECLARE_OPTIONAL_FN(void, ap_log_set_writer, (ap_log_writer* func));
+
+#endif /* MOD_LOG_CONFIG */
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.la b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.la
new file mode 100644
index 00000000..e08ddd31
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.la
@@ -0,0 +1,35 @@
+# mod_log_config.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_log_config.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_log_config.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.lo b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.lo
new file mode 100644
index 00000000..8fb4c5e6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.lo
@@ -0,0 +1,12 @@
+# mod_log_config.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_log_config.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_log_config.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.o b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.o
new file mode 100644
index 00000000..55dc12bc
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_config.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_forensic.c b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_forensic.c
new file mode 100644
index 00000000..35ea369d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_forensic.c
@@ -0,0 +1,288 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * See also support/check_forensic.
+ * Relate the forensic log to the transfer log by including
+ * %{forensic-id}n in the custom log format, for example:
+ * CustomLog logs/custom "%h %l %u %t \"%r\" %>s %b %{forensic-id}n"
+ *
+ * Credit is due to Tina Bird <tbird precision-guesswork.com>, whose
+ * idea this module was.
+ *
+ * Ben Laurie 29/12/2003
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "apr_strings.h"
+#include "apr_atomic.h"
+#include "http_protocol.h"
+#include "test_char.h"
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+module AP_MODULE_DECLARE_DATA log_forensic_module;
+
+typedef struct fcfg {
+ const char *logname;
+ apr_file_t *fd;
+} fcfg;
+
+static void *make_forensic_log_scfg(apr_pool_t *p, server_rec *s)
+{
+ fcfg *cfg = apr_pcalloc(p, sizeof *cfg);
+
+ cfg->logname = NULL;
+ cfg->fd = NULL;
+
+ return cfg;
+}
+
+static void *merge_forensic_log_scfg(apr_pool_t *p, void *parent, void *new)
+{
+ fcfg *cfg = apr_pcalloc(p, sizeof *cfg);
+ fcfg *pc = parent;
+ fcfg *nc = new;
+
+ cfg->logname = apr_pstrdup(p, nc->logname ? nc->logname : pc->logname);
+ cfg->fd = NULL;
+
+ return cfg;
+}
+
+static int open_log(server_rec *s, apr_pool_t *p)
+{
+ fcfg *cfg = ap_get_module_config(s->module_config, &log_forensic_module);
+
+ if (!cfg->logname || cfg->fd)
+ return 1;
+
+ if (*cfg->logname == '|') {
+ piped_log *pl;
+ const char *pname = ap_server_root_relative(p, cfg->logname + 1);
+
+ pl = ap_open_piped_log(p, pname);
+ if (pl == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "couldn't spawn forensic log pipe %s", cfg->logname);
+ return 0;
+ }
+ cfg->fd = ap_piped_log_write_fd(pl);
+ }
+ else {
+ const char *fname = ap_server_root_relative(p, cfg->logname);
+ apr_status_t rv;
+
+ if ((rv = apr_file_open(&cfg->fd, fname,
+ APR_WRITE | APR_APPEND | APR_CREATE,
+ APR_OS_DEFAULT, p)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "could not open forensic log file %s.", fname);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int log_init(apr_pool_t *pc, apr_pool_t *p, apr_pool_t *pt,
+ server_rec *s)
+{
+ for ( ; s ; s = s->next) {
+ if (!open_log(s, p)) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+
+ return OK;
+}
+
+
+/* e is the first _invalid_ location in q
+ N.B. returns the terminating NUL.
+ */
+static char *log_escape(char *q, const char *e, const char *p)
+{
+ for ( ; *p ; ++p) {
+ ap_assert(q < e);
+ if (test_char_table[*(unsigned char *)p]&T_ESCAPE_FORENSIC) {
+ ap_assert(q+2 < e);
+ *q++ = '%';
+ sprintf(q, "%02x", *(unsigned char *)p);
+ q += 2;
+ }
+ else
+ *q++ = *p;
+ }
+ ap_assert(q < e);
+ *q = '\0';
+
+ return q;
+}
+
+typedef struct hlog {
+ char *log;
+ char *pos;
+ char *end;
+ apr_pool_t *p;
+ apr_size_t count;
+} hlog;
+
+static int count_string(const char *p)
+{
+ int n;
+
+ for (n = 0 ; *p ; ++p, ++n)
+ if (test_char_table[*(unsigned char *)p]&T_ESCAPE_FORENSIC)
+ n += 2;
+ return n;
+}
+
+static int count_headers(void *h_, const char *key, const char *value)
+{
+ hlog *h = h_;
+
+ h->count += count_string(key)+count_string(value)+2;
+
+ return 1;
+}
+
+static int log_headers(void *h_, const char *key, const char *value)
+{
+ hlog *h = h_;
+
+ /* note that we don't have to check h->pos here, coz its been done
+ for us by log_escape */
+ *h->pos++ = '|';
+ h->pos = log_escape(h->pos, h->end, key);
+ *h->pos++ = ':';
+ h->pos = log_escape(h->pos, h->end, value);
+
+ return 1;
+}
+
+static int log_before(request_rec *r)
+{
+ fcfg *cfg = ap_get_module_config(r->server->module_config,
+ &log_forensic_module);
+ const char *id;
+ hlog h;
+ apr_size_t n;
+ apr_status_t rv;
+
+ if (!cfg->fd || r->prev) {
+ return DECLINED;
+ }
+
+ if (!(id = apr_table_get(r->subprocess_env, "UNIQUE_ID"))) {
+ /* we make the assumption that we can't go through all the PIDs in
+ under 1 second */
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "mod_log_forensic: mod_unique_id must also be active");
+ return DECLINED;
+ }
+ ap_set_module_config(r->request_config, &log_forensic_module, (char *)id);
+
+ h.p = r->pool;
+ h.count = 0;
+
+ apr_table_do(count_headers, &h, r->headers_in, NULL);
+
+ h.count += 1+strlen(id)+1+count_string(r->the_request)+1+1;
+ h.log = apr_palloc(r->pool, h.count);
+ h.pos = h.log;
+ h.end = h.log+h.count;
+
+ *h.pos++ = '+';
+ strcpy(h.pos, id);
+ h.pos += strlen(h.pos);
+ *h.pos++ = '|';
+ h.pos = log_escape(h.pos, h.end, r->the_request);
+
+ apr_table_do(log_headers, &h, r->headers_in, NULL);
+
+ ap_assert(h.pos < h.end);
+ *h.pos++ = '\n';
+
+ n = h.count-1;
+ rv = apr_file_write(cfg->fd, h.log, &n);
+ ap_assert(rv == APR_SUCCESS && n == h.count-1);
+
+ apr_table_setn(r->notes, "forensic-id", id);
+
+ return OK;
+}
+
+static int log_after(request_rec *r)
+{
+ fcfg *cfg = ap_get_module_config(r->server->module_config,
+ &log_forensic_module);
+ const char *id = ap_get_module_config(r->request_config,
+ &log_forensic_module);
+ char *s;
+ apr_size_t l, n;
+ apr_status_t rv;
+
+ if (!cfg->fd) {
+ return DECLINED;
+ }
+
+ s = apr_pstrcat(r->pool, "-", id, "\n", NULL);
+ l = n = strlen(s);
+ rv = apr_file_write(cfg->fd, s, &n);
+ ap_assert(rv == APR_SUCCESS && n == l);
+
+ return OK;
+}
+
+static const char *set_forensic_log(cmd_parms *cmd, void *dummy, const char *fn)
+{
+ fcfg *cfg = ap_get_module_config(cmd->server->module_config,
+ &log_forensic_module);
+
+ cfg->logname = fn;
+ return NULL;
+}
+
+static const command_rec forensic_log_cmds[] =
+{
+ AP_INIT_TAKE1("ForensicLog", set_forensic_log, NULL, RSRC_CONF,
+ "the filename of the forensic log"),
+ { NULL }
+};
+
+static void register_hooks(apr_pool_t *p)
+{
+ static const char * const pre[] = { "mod_unique_id.c", NULL };
+
+ ap_hook_open_logs(log_init,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_post_read_request(log_before,pre,NULL,APR_HOOK_REALLY_FIRST);
+ ap_hook_log_transaction(log_after,NULL,NULL,APR_HOOK_REALLY_LAST);
+}
+
+module AP_MODULE_DECLARE_DATA log_forensic_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-dir config */
+ NULL, /* merge per-dir config */
+ make_forensic_log_scfg, /* server config */
+ merge_forensic_log_scfg, /* merge server config */
+ forensic_log_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_forensic.dsp b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_forensic.dsp
new file mode 100644
index 00000000..b41c267e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_forensic.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_log_forensic" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_log_forensic - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_log_forensic.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_log_forensic.mak" CFG="mod_log_forensic - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_log_forensic - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_log_forensic - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_log_forensic - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../server" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_log_forensic_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_log_forensic.so" /base:@..\..\os\win32\BaseAddr.ref,mod_log_forensic.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_log_forensic.so" /base:@..\..\os\win32\BaseAddr.ref,mod_log_forensic.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_log_forensic - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../server" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_log_forensic_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_log_forensic.so" /base:@..\..\os\win32\BaseAddr.ref,mod_log_forensic.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_log_forensic.so" /base:@..\..\os\win32\BaseAddr.ref,mod_log_forensic.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_log_forensic - Win32 Release"
+# Name "mod_log_forensic - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_log_forensic.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_log_forensic.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_log_forensic - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_log_forensic.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_log_forensic.so "log_forensic_module for Apache" ../../include/ap_release.h > .\mod_log_forensic.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_log_forensic - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_log_forensic.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_log_forensic.so "log_forensic_module for Apache" ../../include/ap_release.h > .\mod_log_forensic.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_forensic.exp b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_forensic.exp
new file mode 100644
index 00000000..92f5075b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/mod_log_forensic.exp
@@ -0,0 +1 @@
+log_forensic_module
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/mod_logio.c b/rubbos/app/httpd-2.0.64/modules/loggers/mod_logio.c
new file mode 100644
index 00000000..ba337ed2
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/mod_logio.c
@@ -0,0 +1,192 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Written by Bojan Smojver <bojan@rexursive.com>:
+ *
+ * The argument to LogFormat and CustomLog is a string, which can include
+ * literal characters copied into the log files, and '%' directives as
+ * follows:
+ *
+ * %...I: bytes received, including request and headers, cannot be zero
+ * %...O: bytes sent, including headers, cannot be zero
+ *
+ */
+
+#include "apr_strings.h"
+#include "apr_lib.h"
+#include "apr_hash.h"
+#include "apr_optional.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "mod_log_config.h"
+#include "httpd.h"
+#include "http_core.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_protocol.h"
+
+module AP_MODULE_DECLARE_DATA logio_module;
+
+static const char logio_filter_name[] = "LOG_INPUT_OUTPUT";
+
+/*
+ * Logging of input and output config...
+ */
+
+typedef struct logio_config_t {
+ apr_off_t bytes_in;
+ apr_off_t bytes_out;
+} logio_config_t;
+
+/*
+ * Optional function for the core to add to bytes_out
+ */
+
+static void ap_logio_add_bytes_out(conn_rec *c, apr_off_t bytes){
+ logio_config_t *cf = ap_get_module_config(c->conn_config, &logio_module);
+
+ cf->bytes_out += bytes;
+}
+
+/*
+ * Format items...
+ */
+
+static const char *log_bytes_in(request_rec *r, char *a)
+{
+ logio_config_t *cf = ap_get_module_config(r->connection->conn_config,
+ &logio_module);
+
+ return apr_off_t_toa(r->pool, cf->bytes_in);
+}
+
+static const char *log_bytes_out(request_rec *r, char *a)
+{
+ logio_config_t *cf = ap_get_module_config(r->connection->conn_config,
+ &logio_module);
+
+ return apr_off_t_toa(r->pool, cf->bytes_out);
+}
+
+/*
+ * Reset counters after logging...
+ */
+
+static int logio_transaction(request_rec *r)
+{
+ logio_config_t *cf = ap_get_module_config(r->connection->conn_config,
+ &logio_module);
+
+ cf->bytes_in = cf->bytes_out = 0;
+
+ return OK;
+}
+
+/*
+ * Logging of input and output filters...
+ */
+
+static apr_status_t logio_in_filter(ap_filter_t *f,
+ apr_bucket_brigade *bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes) {
+ apr_off_t length;
+ apr_status_t status;
+ logio_config_t *cf = ap_get_module_config(f->c->conn_config, &logio_module);
+
+ status = ap_get_brigade(f->next, bb, mode, block, readbytes);
+
+ apr_brigade_length (bb, 0, &length);
+
+ if (length > 0)
+ cf->bytes_in += length;
+
+ return status;
+}
+
+static apr_status_t logio_out_filter(ap_filter_t *f,
+ apr_bucket_brigade *bb) {
+ apr_bucket *b = APR_BRIGADE_LAST(bb);
+
+ /* End of data, make sure we flush */
+ if (APR_BUCKET_IS_EOS(b)) {
+ APR_BUCKET_INSERT_BEFORE(b,
+ apr_bucket_flush_create(f->c->bucket_alloc));
+ }
+
+ return ap_pass_brigade(f->next, bb);
+}
+
+/*
+ * The hooks...
+ */
+
+static int logio_pre_conn(conn_rec *c, void *csd) {
+ logio_config_t *cf = apr_pcalloc(c->pool, sizeof(*cf));
+
+ ap_set_module_config(c->conn_config, &logio_module, cf);
+
+ ap_add_input_filter(logio_filter_name, NULL, NULL, c);
+ ap_add_output_filter(logio_filter_name, NULL, NULL, c);
+
+ return OK;
+}
+
+static int logio_pre_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp)
+{
+ static APR_OPTIONAL_FN_TYPE(ap_register_log_handler) *log_pfn_register;
+
+ log_pfn_register = APR_RETRIEVE_OPTIONAL_FN(ap_register_log_handler);
+
+ if (log_pfn_register) {
+ log_pfn_register(p, "I", log_bytes_in, 0);
+ log_pfn_register(p, "O", log_bytes_out, 0);
+ }
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ static const char *pre[] = { "mod_log_config.c", NULL };
+
+ ap_hook_pre_connection(logio_pre_conn, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_pre_config(logio_pre_config, NULL, NULL, APR_HOOK_REALLY_FIRST);
+ ap_hook_log_transaction(logio_transaction, pre, NULL, APR_HOOK_MIDDLE);
+
+ ap_register_input_filter(logio_filter_name, logio_in_filter, NULL,
+ AP_FTYPE_NETWORK - 1);
+ ap_register_output_filter(logio_filter_name, logio_out_filter, NULL,
+ AP_FTYPE_NETWORK - 1);
+
+ APR_REGISTER_OPTIONAL_FN(ap_logio_add_bytes_out);
+}
+
+module AP_MODULE_DECLARE_DATA logio_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-dir config */
+ NULL, /* merge per-dir config */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ NULL, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/mod_logio.dsp b/rubbos/app/httpd-2.0.64/modules/loggers/mod_logio.dsp
new file mode 100644
index 00000000..c312c045
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/mod_logio.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_logio" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_logio - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_logio.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_logio.mak" CFG="mod_logio - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_logio - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_logio - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_logio - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_logio_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_logio.so" /base:@..\..\os\win32\BaseAddr.ref,mod_logio.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_logio.so" /base:@..\..\os\win32\BaseAddr.ref,mod_logio.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_logio - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_logio_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_logio.so" /base:@..\..\os\win32\BaseAddr.ref,mod_logio.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_logio.so" /base:@..\..\os\win32\BaseAddr.ref,mod_logio.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_logio - Win32 Release"
+# Name "mod_logio - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_logio.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_logio.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_logio - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_logio.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_logio.so "logio_module for Apache" ../../include/ap_release.h > .\mod_logio.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_logio - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_logio.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_logio.so "logio_module for Apache" ../../include/ap_release.h > .\mod_logio.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/loggers/modules.mk b/rubbos/app/httpd-2.0.64/modules/loggers/modules.mk
new file mode 100644
index 00000000..8aacac02
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/loggers/modules.mk
@@ -0,0 +1,5 @@
+mod_log_config.la: mod_log_config.lo
+ $(MOD_LINK) mod_log_config.lo $(MOD_LOG_CONFIG_LDADD)
+DISTCLEAN_TARGETS = modules.mk
+static = mod_log_config.la
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.deps b/rubbos/app/httpd-2.0.64/modules/mappers/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.indent.pro b/rubbos/app/httpd-2.0.64/modules/mappers/.indent.pro
new file mode 100644
index 00000000..a9fbe9f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_actions.a b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_actions.a
new file mode 100644
index 00000000..86545023
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_actions.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_actions.la b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_actions.la
new file mode 100644
index 00000000..4bc62514
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_actions.la
@@ -0,0 +1,35 @@
+# mod_actions.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_actions.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_actions.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_actions.o b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_actions.o
new file mode 100644
index 00000000..91f684c1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_actions.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_alias.a b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_alias.a
new file mode 100644
index 00000000..efd4192a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_alias.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_alias.la b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_alias.la
new file mode 100644
index 00000000..f008aff6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_alias.la
@@ -0,0 +1,35 @@
+# mod_alias.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_alias.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_alias.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_alias.o b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_alias.o
new file mode 100644
index 00000000..178414cd
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_alias.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_dir.a b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_dir.a
new file mode 100644
index 00000000..0fa02416
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_dir.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_dir.la b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_dir.la
new file mode 100644
index 00000000..9fd727d8
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_dir.la
@@ -0,0 +1,35 @@
+# mod_dir.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_dir.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_dir.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_dir.o b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_dir.o
new file mode 100644
index 00000000..40dbfa1e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_dir.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_imap.a b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_imap.a
new file mode 100644
index 00000000..e30d4fd5
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_imap.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_imap.la b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_imap.la
new file mode 100644
index 00000000..a51962d8
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_imap.la
@@ -0,0 +1,35 @@
+# mod_imap.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_imap.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_imap.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_imap.o b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_imap.o
new file mode 100644
index 00000000..e824bbdd
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_imap.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_negotiation.a b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_negotiation.a
new file mode 100644
index 00000000..83353892
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_negotiation.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_negotiation.la b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_negotiation.la
new file mode 100644
index 00000000..7b42e96c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_negotiation.la
@@ -0,0 +1,35 @@
+# mod_negotiation.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_negotiation.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_negotiation.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_negotiation.o b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_negotiation.o
new file mode 100644
index 00000000..38f00426
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_negotiation.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_so.a b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_so.a
new file mode 100644
index 00000000..ef52ec98
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_so.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_so.la b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_so.la
new file mode 100644
index 00000000..1c9e8489
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_so.la
@@ -0,0 +1,35 @@
+# mod_so.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_so.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_so.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_so.o b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_so.o
new file mode 100644
index 00000000..71d442fa
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_so.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_userdir.a b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_userdir.a
new file mode 100644
index 00000000..2f3d25ea
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_userdir.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_userdir.la b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_userdir.la
new file mode 100644
index 00000000..2f7eeb0b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_userdir.la
@@ -0,0 +1,35 @@
+# mod_userdir.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_userdir.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_userdir.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_userdir.o b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_userdir.o
new file mode 100644
index 00000000..677c39d1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/.libs/mod_userdir.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/Makefile b/rubbos/app/httpd-2.0.64/modules/mappers/Makefile
new file mode 100644
index 00000000..536b0888
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/mappers
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/mappers
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/mappers
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/Makefile.in b/rubbos/app/httpd-2.0.64/modules/mappers/Makefile.in
new file mode 100644
index 00000000..167b343d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/Makefile.in
@@ -0,0 +1,3 @@
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/mappers/NWGNUmakefile
new file mode 100644
index 00000000..62c9624d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/NWGNUmakefile
@@ -0,0 +1,247 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME =
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION =
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME =
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE =
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/rewrite.nlm \
+ $(OBJDIR)/speling.nlm \
+ $(OBJDIR)/vhost.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/NWGNUrewrite b/rubbos/app/httpd-2.0.64/modules/mappers/NWGNUrewrite
new file mode 100644
index 00000000..960fca7c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/NWGNUrewrite
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = rewrite
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Rewrite Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Rewrite Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/rewrite.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_rewrite.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ rewrite_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/NWGNUspeling b/rubbos/app/httpd-2.0.64/modules/mappers/NWGNUspeling
new file mode 100644
index 00000000..550def1a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/NWGNUspeling
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = speling
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Speling Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Speling Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/speling.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_speling.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ speling_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/NWGNUvhost b/rubbos/app/httpd-2.0.64/modules/mappers/NWGNUvhost
new file mode 100644
index 00000000..8d68295a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/NWGNUvhost
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = vhost
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Vhost Alias Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Vhost Alias Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/vhost.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_vhost_alias.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ vhost_alias_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/config9.m4 b/rubbos/app/httpd-2.0.64/modules/mappers/config9.m4
new file mode 100644
index 00000000..adb66ea8
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/config9.m4
@@ -0,0 +1,57 @@
+dnl modules enabled in this directory by default
+
+dnl APACHE_MODULE(name, helptext[, objects[, structname[, default[, config]]]])
+
+APACHE_MODPATH_INIT(mappers)
+
+APACHE_MODULE(vhost_alias, mass virtual hosting module, , , most)
+APACHE_MODULE(negotiation, content negotiation, , , yes)
+APACHE_MODULE(dir, directory request handling, , , yes)
+APACHE_MODULE(imap, server-side imagemaps, , , yes)
+APACHE_MODULE(actions, Action triggering on requests, , , yes)
+APACHE_MODULE(speling, correct common URL misspellings, , , most)
+APACHE_MODULE(userdir, mapping of requests to user-specific directories, , , yes)
+APACHE_MODULE(alias, mapping of requests to different filesystem parts, , , yes)
+
+APACHE_MODULE(rewrite, rule based URL manipulation, , , most)
+
+
+APR_CHECK_APR_DEFINE(APR_HAS_DSO)
+
+case "x$enable_so" in
+ "xyes")
+ if test $ac_cv_define_APR_HAS_DSO = "no"; then
+ AC_MSG_ERROR([mod_so has been requested but cannot be built on your system])
+ fi
+ ;;
+ "xshared")
+ AC_MSG_ERROR([mod_so can not be built as a shared DSO])
+ ;;
+ "xno")
+ ;;
+ "x")
+ enable_so=$ac_cv_define_APR_HAS_DSO
+ ;;
+esac
+
+dnl mod_so can only be built statically. If the user wants modules to
+dnl be built as DSOs by default (eg. ./configure --enable-mods-shared=most)
+dnl then we must override the default here.
+if test "x$enable_so" = "xyes"; then
+ enable_so="static"
+fi
+
+if test "$sharedobjs" = "yes"; then
+ if test $ac_cv_define_APR_HAS_DSO = "no"; then
+ AC_MSG_ERROR([shared objects have been requested but cannot be built since mod_so cannot be built])
+ elif test $enable_so = "no"; then
+ AC_MSG_ERROR([shared objects have been requested but cannot be built since mod_so was disabled])
+ fi
+fi
+
+APACHE_MODULE(so, DSO capability, , , $enable_so)
+
+dnl ### why save the cache?
+AC_CACHE_SAVE
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.c b/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.c
new file mode 100644
index 00000000..f8af935a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.c
@@ -0,0 +1,198 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_actions.c: executes scripts based on MIME type or HTTP method
+ *
+ * by Alexei Kosut; based on mod_cgi.c, mod_mime.c and mod_includes.c,
+ * adapted by rst from original NCSA code by Rob McCool
+ *
+ * Usage instructions:
+ *
+ * Action mime/type /cgi-bin/script
+ *
+ * will activate /cgi-bin/script when a file of content type mime/type is
+ * requested. It sends the URL and file path of the requested document using
+ * the standard CGI PATH_INFO and PATH_TRANSLATED environment variables.
+ *
+ * Script PUT /cgi-bin/script
+ *
+ * will activate /cgi-bin/script when a request is received with the
+ * HTTP method "PUT". The available method names are defined in httpd.h.
+ * If the method is GET, the script will only be activated if the requested
+ * URI includes query information (stuff after a ?-mark).
+ */
+
+#include "apr_strings.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_log.h"
+#include "util_script.h"
+
+typedef struct {
+ apr_table_t *action_types; /* Added with Action... */
+ const char *scripted[METHODS]; /* Added with Script... */
+ int configured; /* True if Action or Script has been
+ * called at least once
+ */
+} action_dir_config;
+
+module AP_MODULE_DECLARE_DATA actions_module;
+
+static void *create_action_dir_config(apr_pool_t *p, char *dummy)
+{
+ action_dir_config *new =
+ (action_dir_config *) apr_pcalloc(p, sizeof(action_dir_config));
+
+ new->action_types = apr_table_make(p, 4);
+
+ return new;
+}
+
+static void *merge_action_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ action_dir_config *base = (action_dir_config *) basev;
+ action_dir_config *add = (action_dir_config *) addv;
+ action_dir_config *new = (action_dir_config *) apr_palloc(p,
+ sizeof(action_dir_config));
+ int i;
+
+ new->action_types = apr_table_overlay(p, add->action_types,
+ base->action_types);
+
+ for (i = 0; i < METHODS; ++i) {
+ new->scripted[i] = add->scripted[i] ? add->scripted[i]
+ : base->scripted[i];
+ }
+
+ new->configured = (base->configured || add->configured);
+ return new;
+}
+
+static const char *add_action(cmd_parms *cmd, void *m_v,
+ const char *type, const char *script)
+{
+ action_dir_config *m = (action_dir_config *)m_v;
+ apr_table_setn(m->action_types, type, script);
+ m->configured = 1;
+ return NULL;
+}
+
+static const char *set_script(cmd_parms *cmd, void *m_v,
+ const char *method, const char *script)
+{
+ action_dir_config *m = (action_dir_config *)m_v;
+ int methnum;
+
+ methnum = ap_method_number_of(method);
+ if (methnum == M_TRACE)
+ return "TRACE not allowed for Script";
+ else if (methnum == M_INVALID)
+ return "Unknown method type for Script";
+ else
+ m->scripted[methnum] = script;
+
+ m->configured = 1;
+ return NULL;
+}
+
+static const command_rec action_cmds[] =
+{
+ AP_INIT_TAKE2("Action", add_action, NULL, OR_FILEINFO,
+ "a media type followed by a script name"),
+ AP_INIT_TAKE2("Script", set_script, NULL, ACCESS_CONF | RSRC_CONF,
+ "a method followed by a script name"),
+ {NULL}
+};
+
+static int action_handler(request_rec *r)
+{
+ action_dir_config *conf = (action_dir_config *)
+ ap_get_module_config(r->per_dir_config, &actions_module);
+ const char *t, *action;
+ const char *script;
+ int i;
+
+ if (!conf->configured) {
+ return DECLINED;
+ }
+
+ /* Note that this handler handles _all_ types, so handler is unchecked */
+
+ /* Set allowed stuff */
+ for (i = 0; i < METHODS; ++i) {
+ if (conf->scripted[i])
+ r->allowed |= (AP_METHOD_BIT << i);
+ }
+
+ /* First, check for the method-handling scripts */
+ if (r->method_number == M_GET) {
+ if (r->args)
+ script = conf->scripted[M_GET];
+ else
+ script = NULL;
+ }
+ else {
+ script = conf->scripted[r->method_number];
+ }
+
+ /* Check for looping, which can happen if the CGI script isn't */
+ if (script && r->prev && r->prev->prev)
+ return DECLINED;
+
+ /* Second, check for actions (which override the method scripts) */
+ action = r->handler ? r->handler :
+ ap_field_noparam(r->pool, r->content_type);
+ if ((t = apr_table_get(conf->action_types,
+ action ? action : ap_default_type(r)))) {
+ script = t;
+ if (r->finfo.filetype == 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "File does not exist: %s", r->filename);
+ return HTTP_NOT_FOUND;
+ }
+ }
+
+ if (script == NULL)
+ return DECLINED;
+
+ ap_internal_redirect_handler(apr_pstrcat(r->pool, script,
+ ap_escape_uri(r->pool, r->uri),
+ r->args ? "?" : NULL,
+ r->args, NULL), r);
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_handler(action_handler,NULL,NULL,APR_HOOK_LAST);
+}
+
+module AP_MODULE_DECLARE_DATA actions_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_action_dir_config, /* dir config creater */
+ merge_action_dir_configs, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ action_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.dsp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.dsp
new file mode 100644
index 00000000..f78be8f0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_actions" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_actions - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_actions.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_actions.mak" CFG="mod_actions - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_actions - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_actions - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_actions - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_actions_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_actions.so" /base:@..\..\os\win32\BaseAddr.ref,mod_actions.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_actions.so" /base:@..\..\os\win32\BaseAddr.ref,mod_actions.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_actions - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_actions_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_actions.so" /base:@..\..\os\win32\BaseAddr.ref,mod_actions.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_actions.so" /base:@..\..\os\win32\BaseAddr.ref,mod_actions.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_actions - Win32 Release"
+# Name "mod_actions - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_actions.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_actions.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_actions - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_actions.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_actions.so "actions_module for Apache" ../../include/ap_release.h > .\mod_actions.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_actions - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_actions.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_actions.so "actions_module for Apache" ../../include/ap_release.h > .\mod_actions.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.exp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.exp
new file mode 100644
index 00000000..8cc6cdb1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.exp
@@ -0,0 +1 @@
+actions_module
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.la b/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.la
new file mode 100644
index 00000000..4bc62514
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.la
@@ -0,0 +1,35 @@
+# mod_actions.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_actions.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_actions.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.lo b/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.lo
new file mode 100644
index 00000000..a7d3e371
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.lo
@@ -0,0 +1,12 @@
+# mod_actions.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_actions.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_actions.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.o b/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.o
new file mode 100644
index 00000000..91f684c1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_actions.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.c b/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.c
new file mode 100644
index 00000000..4e5b6ef0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.c
@@ -0,0 +1,484 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_alias.c: Stuff for dealing with directory aliases
+ *
+ * Original by Rob McCool, rewritten in succession by David Robinson
+ * and rst.
+ *
+ */
+
+#include "apr_strings.h"
+#include "apr_lib.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_core.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_log.h"
+
+
+typedef struct {
+ const char *real;
+ const char *fake;
+ char *handler;
+ regex_t *regexp;
+ int redir_status; /* 301, 302, 303, 410, etc */
+} alias_entry;
+
+typedef struct {
+ apr_array_header_t *aliases;
+ apr_array_header_t *redirects;
+} alias_server_conf;
+
+typedef struct {
+ apr_array_header_t *redirects;
+} alias_dir_conf;
+
+module AP_MODULE_DECLARE_DATA alias_module;
+
+static void *create_alias_config(apr_pool_t *p, server_rec *s)
+{
+ alias_server_conf *a =
+ (alias_server_conf *) apr_pcalloc(p, sizeof(alias_server_conf));
+
+ a->aliases = apr_array_make(p, 20, sizeof(alias_entry));
+ a->redirects = apr_array_make(p, 20, sizeof(alias_entry));
+ return a;
+}
+
+static void *create_alias_dir_config(apr_pool_t *p, char *d)
+{
+ alias_dir_conf *a =
+ (alias_dir_conf *) apr_pcalloc(p, sizeof(alias_dir_conf));
+ a->redirects = apr_array_make(p, 2, sizeof(alias_entry));
+ return a;
+}
+
+static void *merge_alias_config(apr_pool_t *p, void *basev, void *overridesv)
+{
+ alias_server_conf *a =
+ (alias_server_conf *) apr_pcalloc(p, sizeof(alias_server_conf));
+ alias_server_conf *base = (alias_server_conf *) basev;
+ alias_server_conf *overrides = (alias_server_conf *) overridesv;
+
+ a->aliases = apr_array_append(p, overrides->aliases, base->aliases);
+ a->redirects = apr_array_append(p, overrides->redirects, base->redirects);
+ return a;
+}
+
+static void *merge_alias_dir_config(apr_pool_t *p, void *basev, void *overridesv)
+{
+ alias_dir_conf *a =
+ (alias_dir_conf *) apr_pcalloc(p, sizeof(alias_dir_conf));
+ alias_dir_conf *base = (alias_dir_conf *) basev;
+ alias_dir_conf *overrides = (alias_dir_conf *) overridesv;
+ a->redirects = apr_array_append(p, overrides->redirects, base->redirects);
+ return a;
+}
+
+/* need prototype for overlap check */
+static int alias_matches(const char *uri, const char *alias_fakename);
+
+static const char *add_alias_internal(cmd_parms *cmd, void *dummy,
+ const char *f, const char *r,
+ int use_regex)
+{
+ server_rec *s = cmd->server;
+ alias_server_conf *conf = ap_get_module_config(s->module_config,
+ &alias_module);
+ alias_entry *new = apr_array_push(conf->aliases);
+ alias_entry *entries = (alias_entry *)conf->aliases->elts;
+ int i;
+
+ /* XX r can NOT be relative to DocumentRoot here... compat bug. */
+
+ if (use_regex) {
+ new->regexp = ap_pregcomp(cmd->pool, f, REG_EXTENDED);
+ if (new->regexp == NULL)
+ return "Regular expression could not be compiled.";
+ new->real = r;
+ }
+ else {
+ /* XXX This may be optimized, but we must know that new->real
+ * exists. If so, we can dir merge later, trusing new->real
+ * and just canonicalizing the remainder. Not till I finish
+ * cleaning out the old ap_canonical stuff first.
+ */
+ new->real = r;
+ }
+ new->fake = f;
+ new->handler = cmd->info;
+
+ /* check for overlapping (Script)Alias directives
+ * and throw a warning if found one
+ */
+ if (!use_regex) {
+ for (i = 0; i < conf->aliases->nelts - 1; ++i) {
+ alias_entry *p = &entries[i];
+
+ if ( (!p->regexp && alias_matches(f, p->fake) > 0)
+ || (p->regexp && !ap_regexec(p->regexp, f, 0, NULL, 0))) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server,
+ "The %s directive in %s at line %d will probably "
+ "never match because it overlaps an earlier "
+ "%sAlias%s.",
+ cmd->cmd->name, cmd->directive->filename,
+ cmd->directive->line_num,
+ p->handler ? "Script" : "",
+ p->regexp ? "Match" : "");
+
+ break; /* one warning per alias should be sufficient */
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static const char *add_alias(cmd_parms *cmd, void *dummy, const char *f,
+ const char *r)
+{
+ return add_alias_internal(cmd, dummy, f, r, 0);
+}
+
+static const char *add_alias_regex(cmd_parms *cmd, void *dummy, const char *f,
+ const char *r)
+{
+ return add_alias_internal(cmd, dummy, f, r, 1);
+}
+
+static const char *add_redirect_internal(cmd_parms *cmd,
+ alias_dir_conf *dirconf,
+ const char *arg1, const char *arg2,
+ const char *arg3, int use_regex)
+{
+ alias_entry *new;
+ server_rec *s = cmd->server;
+ alias_server_conf *serverconf = ap_get_module_config(s->module_config,
+ &alias_module);
+ int status = (int) (long) cmd->info;
+ regex_t *r = NULL;
+ const char *f = arg2;
+ const char *url = arg3;
+
+ if (!strcasecmp(arg1, "gone"))
+ status = HTTP_GONE;
+ else if (!strcasecmp(arg1, "permanent"))
+ status = HTTP_MOVED_PERMANENTLY;
+ else if (!strcasecmp(arg1, "temp"))
+ status = HTTP_MOVED_TEMPORARILY;
+ else if (!strcasecmp(arg1, "seeother"))
+ status = HTTP_SEE_OTHER;
+ else if (apr_isdigit(*arg1))
+ status = atoi(arg1);
+ else {
+ f = arg1;
+ url = arg2;
+ }
+
+ if (use_regex) {
+ r = ap_pregcomp(cmd->pool, f, REG_EXTENDED);
+ if (r == NULL)
+ return "Regular expression could not be compiled.";
+ }
+
+ if (ap_is_HTTP_REDIRECT(status)) {
+ if (!url)
+ return "URL to redirect to is missing";
+ if (!use_regex && !ap_is_url(url))
+ return "Redirect to non-URL";
+ }
+ else {
+ if (url)
+ return "Redirect URL not valid for this status";
+ }
+
+ if (cmd->path)
+ new = apr_array_push(dirconf->redirects);
+ else
+ new = apr_array_push(serverconf->redirects);
+
+ new->fake = f;
+ new->real = url;
+ new->regexp = r;
+ new->redir_status = status;
+ return NULL;
+}
+
+static const char *add_redirect(cmd_parms *cmd, void *dirconf,
+ const char *arg1, const char *arg2,
+ const char *arg3)
+{
+ return add_redirect_internal(cmd, dirconf, arg1, arg2, arg3, 0);
+}
+
+static const char *add_redirect2(cmd_parms *cmd, void *dirconf,
+ const char *arg1, const char *arg2)
+{
+ return add_redirect_internal(cmd, dirconf, arg1, arg2, NULL, 0);
+}
+
+static const char *add_redirect_regex(cmd_parms *cmd, void *dirconf,
+ const char *arg1, const char *arg2,
+ const char *arg3)
+{
+ return add_redirect_internal(cmd, dirconf, arg1, arg2, arg3, 1);
+}
+
+static const command_rec alias_cmds[] =
+{
+ AP_INIT_TAKE2("Alias", add_alias, NULL, RSRC_CONF,
+ "a fakename and a realname"),
+ AP_INIT_TAKE2("ScriptAlias", add_alias, "cgi-script", RSRC_CONF,
+ "a fakename and a realname"),
+ AP_INIT_TAKE23("Redirect", add_redirect, (void *) HTTP_MOVED_TEMPORARILY,
+ OR_FILEINFO,
+ "an optional status, then document to be redirected and "
+ "destination URL"),
+ AP_INIT_TAKE2("AliasMatch", add_alias_regex, NULL, RSRC_CONF,
+ "a regular expression and a filename"),
+ AP_INIT_TAKE2("ScriptAliasMatch", add_alias_regex, "cgi-script", RSRC_CONF,
+ "a regular expression and a filename"),
+ AP_INIT_TAKE23("RedirectMatch", add_redirect_regex,
+ (void *) HTTP_MOVED_TEMPORARILY, OR_FILEINFO,
+ "an optional status, then a regular expression and "
+ "destination URL"),
+ AP_INIT_TAKE2("RedirectTemp", add_redirect2,
+ (void *) HTTP_MOVED_TEMPORARILY, OR_FILEINFO,
+ "a document to be redirected, then the destination URL"),
+ AP_INIT_TAKE2("RedirectPermanent", add_redirect2,
+ (void *) HTTP_MOVED_PERMANENTLY, OR_FILEINFO,
+ "a document to be redirected, then the destination URL"),
+ {NULL}
+};
+
+static int alias_matches(const char *uri, const char *alias_fakename)
+{
+ const char *aliasp = alias_fakename, *urip = uri;
+
+ while (*aliasp) {
+ if (*aliasp == '/') {
+ /* any number of '/' in the alias matches any number in
+ * the supplied URI, but there must be at least one...
+ */
+ if (*urip != '/')
+ return 0;
+
+ do {
+ ++aliasp;
+ } while (*aliasp == '/');
+ do {
+ ++urip;
+ } while (*urip == '/');
+ }
+ else {
+ /* Other characters are compared literally */
+ if (*urip++ != *aliasp++)
+ return 0;
+ }
+ }
+
+ /* Check last alias path component matched all the way */
+
+ if (aliasp[-1] != '/' && *urip != '\0' && *urip != '/')
+ return 0;
+
+ /* Return number of characters from URI which matched (may be
+ * greater than length of alias, since we may have matched
+ * doubled slashes)
+ */
+
+ return urip - uri;
+}
+
+static char *try_alias_list(request_rec *r, apr_array_header_t *aliases,
+ int doesc, int *status)
+{
+ alias_entry *entries = (alias_entry *) aliases->elts;
+ regmatch_t regm[AP_MAX_REG_MATCH];
+ char *found = NULL;
+ int i;
+
+ for (i = 0; i < aliases->nelts; ++i) {
+ alias_entry *p = &entries[i];
+ int l;
+
+ if (p->regexp) {
+ if (!ap_regexec(p->regexp, r->uri, AP_MAX_REG_MATCH, regm, 0)) {
+ if (p->real) {
+ found = ap_pregsub(r->pool, p->real, r->uri,
+ AP_MAX_REG_MATCH, regm);
+ if (found && doesc) {
+ apr_uri_t uri;
+ apr_uri_parse(r->pool, found, &uri);
+ /* Do not escape the query string or fragment. */
+ found = apr_uri_unparse(r->pool, &uri,
+ APR_URI_UNP_OMITQUERY);
+ found = ap_escape_uri(r->pool, found);
+ if (uri.query) {
+ found = apr_pstrcat(r->pool, found, "?",
+ uri.query, NULL);
+ }
+ if (uri.fragment) {
+ found = apr_pstrcat(r->pool, found, "#",
+ uri.fragment, NULL);
+ }
+ }
+ }
+ else {
+ /* need something non-null */
+ found = apr_pstrdup(r->pool, "");
+ }
+ }
+ }
+ else {
+ l = alias_matches(r->uri, p->fake);
+
+ if (l > 0) {
+ if (doesc) {
+ char *escurl;
+ escurl = ap_os_escape_path(r->pool, r->uri + l, 1);
+
+ found = apr_pstrcat(r->pool, p->real, escurl, NULL);
+ }
+ else
+ found = apr_pstrcat(r->pool, p->real, r->uri + l, NULL);
+ }
+ }
+
+ if (found) {
+ if (p->handler) { /* Set handler, and leave a note for mod_cgi */
+ r->handler = p->handler;
+ apr_table_setn(r->notes, "alias-forced-type", r->handler);
+ }
+ /* XXX This is as SLOW as can be, next step, we optimize
+ * and merge to whatever part of the found path was already
+ * canonicalized. After I finish eliminating os canonical.
+ * Better fail test for ap_server_root_relative needed here.
+ */
+ if (!doesc) {
+ found = ap_server_root_relative(r->pool, found);
+ }
+ if (found) {
+ *status = p->redir_status;
+ }
+ return found;
+ }
+
+ }
+
+ return NULL;
+}
+
+static int translate_alias_redir(request_rec *r)
+{
+ ap_conf_vector_t *sconf = r->server->module_config;
+ alias_server_conf *serverconf = ap_get_module_config(sconf, &alias_module);
+ char *ret;
+ int status;
+
+ if (r->uri[0] != '/' && r->uri[0] != '\0') {
+ return DECLINED;
+ }
+
+ if ((ret = try_alias_list(r, serverconf->redirects, 1, &status)) != NULL) {
+ if (ap_is_HTTP_REDIRECT(status)) {
+ /* include QUERY_STRING if any */
+ if (r->args) {
+ ret = apr_pstrcat(r->pool, ret, "?", r->args, NULL);
+ }
+ apr_table_setn(r->headers_out, "Location", ret);
+ }
+ return status;
+ }
+
+ if ((ret = try_alias_list(r, serverconf->aliases, 0, &status)) != NULL) {
+ r->filename = ret;
+ return OK;
+ }
+
+ return DECLINED;
+}
+
+static int fixup_redir(request_rec *r)
+{
+ void *dconf = r->per_dir_config;
+ alias_dir_conf *dirconf =
+ (alias_dir_conf *) ap_get_module_config(dconf, &alias_module);
+ char *ret;
+ int status;
+
+ /* It may have changed since last time, so try again */
+
+ if ((ret = try_alias_list(r, dirconf->redirects, 1, &status)) != NULL) {
+ if (ap_is_HTTP_REDIRECT(status)) {
+ if (ret[0] == '/') {
+ char *orig_target = ret;
+
+ ret = ap_construct_url(r->pool, ret, r);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "incomplete redirection target of '%s' for "
+ "URI '%s' modified to '%s'",
+ orig_target, r->uri, ret);
+ }
+ if (!ap_is_url(ret)) {
+ status = HTTP_INTERNAL_SERVER_ERROR;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "cannot redirect '%s' to '%s'; "
+ "target is not a valid absoluteURI or abs_path",
+ r->uri, ret);
+ }
+ else {
+ /* append requested query only, if the config didn't
+ * supply its own.
+ */
+ if (r->args && !ap_strchr(ret, '?')) {
+ ret = apr_pstrcat(r->pool, ret, "?", r->args, NULL);
+ }
+ apr_table_setn(r->headers_out, "Location", ret);
+ }
+ }
+ return status;
+ }
+
+ return DECLINED;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ static const char * const aszSucc[]={ "mod_userdir.c",
+ "mod_vhost_alias.c",NULL };
+
+ ap_hook_translate_name(translate_alias_redir,NULL,aszSucc,APR_HOOK_MIDDLE);
+ ap_hook_fixups(fixup_redir,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA alias_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_alias_dir_config, /* dir config creater */
+ merge_alias_dir_config, /* dir merger --- default is to override */
+ create_alias_config, /* server config */
+ merge_alias_config, /* merge server configs */
+ alias_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.dsp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.dsp
new file mode 100644
index 00000000..b13219d1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_alias" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_alias - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_alias.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_alias.mak" CFG="mod_alias - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_alias - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_alias - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_alias - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_alias_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_alias.so" /base:@..\..\os\win32\BaseAddr.ref,mod_alias.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_alias.so" /base:@..\..\os\win32\BaseAddr.ref,mod_alias.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_alias - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_alias_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_alias.so" /base:@..\..\os\win32\BaseAddr.ref,mod_alias.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_alias.so" /base:@..\..\os\win32\BaseAddr.ref,mod_alias.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_alias - Win32 Release"
+# Name "mod_alias - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_alias.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_alias.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_alias - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_alias.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_alias.so "alias_module for Apache" ../../include/ap_release.h > .\mod_alias.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_alias - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_alias.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_alias.so "alias_module for Apache" ../../include/ap_release.h > .\mod_alias.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.exp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.exp
new file mode 100644
index 00000000..ac386ec3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.exp
@@ -0,0 +1 @@
+alias_module
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.la b/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.la
new file mode 100644
index 00000000..f008aff6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.la
@@ -0,0 +1,35 @@
+# mod_alias.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_alias.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_alias.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.lo b/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.lo
new file mode 100644
index 00000000..bf4ea8f5
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.lo
@@ -0,0 +1,12 @@
+# mod_alias.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_alias.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_alias.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.o b/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.o
new file mode 100644
index 00000000..178414cd
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_alias.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.c b/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.c
new file mode 100644
index 00000000..9191b7f0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.c
@@ -0,0 +1,247 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_dir.c: handle default index files, and trailing-/ redirects
+ */
+
+#include "apr_strings.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_request.h"
+#include "http_protocol.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "util_script.h"
+
+module AP_MODULE_DECLARE_DATA dir_module;
+
+typedef enum {
+ SLASH_OFF = 0,
+ SLASH_ON,
+ SLASH_UNSET
+} slash_cfg;
+
+typedef struct dir_config_struct {
+ apr_array_header_t *index_names;
+ slash_cfg do_slash;
+} dir_config_rec;
+
+#define DIR_CMD_PERMS OR_INDEXES
+
+static const char *add_index(cmd_parms *cmd, void *dummy, const char *arg)
+{
+ dir_config_rec *d = dummy;
+
+ if (!d->index_names) {
+ d->index_names = apr_array_make(cmd->pool, 2, sizeof(char *));
+ }
+ *(const char **)apr_array_push(d->index_names) = arg;
+ return NULL;
+}
+
+static const char *configure_slash(cmd_parms *cmd, void *d_, int arg)
+{
+ dir_config_rec *d = d_;
+
+ d->do_slash = arg ? SLASH_ON : SLASH_OFF;
+ return NULL;
+}
+
+static const command_rec dir_cmds[] =
+{
+ AP_INIT_ITERATE("DirectoryIndex", add_index, NULL, DIR_CMD_PERMS,
+ "a list of file names"),
+ AP_INIT_FLAG("DirectorySlash", configure_slash, NULL, DIR_CMD_PERMS,
+ "On or Off"),
+ {NULL}
+};
+
+static void *create_dir_config(apr_pool_t *p, char *dummy)
+{
+ dir_config_rec *new = apr_pcalloc(p, sizeof(dir_config_rec));
+
+ new->index_names = NULL;
+ new->do_slash = SLASH_UNSET;
+ return (void *) new;
+}
+
+static void *merge_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ dir_config_rec *new = apr_pcalloc(p, sizeof(dir_config_rec));
+ dir_config_rec *base = (dir_config_rec *)basev;
+ dir_config_rec *add = (dir_config_rec *)addv;
+
+ new->index_names = add->index_names ? add->index_names : base->index_names;
+ new->do_slash =
+ (add->do_slash == SLASH_UNSET) ? base->do_slash : add->do_slash;
+ return new;
+}
+
+static int fixup_dir(request_rec *r)
+{
+ dir_config_rec *d;
+ char *dummy_ptr[1];
+ char **names_ptr;
+ int num_names;
+ int error_notfound = 0;
+
+ /* only handle requests against directories */
+ if (r->finfo.filetype != APR_DIR) {
+ return DECLINED;
+ }
+
+ /* In case mod_mime wasn't present, and no handler was assigned. */
+ if (!r->handler) {
+ r->handler = DIR_MAGIC_TYPE;
+ }
+
+ /* Never tolerate path_info on dir requests */
+ if (r->path_info && *r->path_info) {
+ return DECLINED;
+ }
+
+ d = (dir_config_rec *)ap_get_module_config(r->per_dir_config,
+ &dir_module);
+
+ /* Redirect requests that are not '/' terminated */
+ if (r->uri[0] == '\0' || r->uri[strlen(r->uri) - 1] != '/')
+ {
+ char *ifile;
+
+ if (!d->do_slash) {
+ return DECLINED;
+ }
+
+ /* Only redirect non-get requests if we have no note to warn
+ * that this browser cannot handle redirs on non-GET requests
+ * (such as Microsoft's WebFolders).
+ */
+ if ((r->method_number != M_GET)
+ && apr_table_get(r->subprocess_env, "redirect-carefully")) {
+ return DECLINED;
+ }
+
+ if (r->args != NULL) {
+ ifile = apr_pstrcat(r->pool, ap_escape_uri(r->pool, r->uri),
+ "/", "?", r->args, NULL);
+ }
+ else {
+ ifile = apr_pstrcat(r->pool, ap_escape_uri(r->pool, r->uri),
+ "/", NULL);
+ }
+
+ apr_table_setn(r->headers_out, "Location",
+ ap_construct_url(r->pool, ifile, r));
+ return HTTP_MOVED_PERMANENTLY;
+ }
+
+ if (strcmp(r->handler, DIR_MAGIC_TYPE)) {
+ return DECLINED;
+ }
+
+ if (d->index_names) {
+ names_ptr = (char **)d->index_names->elts;
+ num_names = d->index_names->nelts;
+ }
+ else {
+ dummy_ptr[0] = AP_DEFAULT_INDEX;
+ names_ptr = dummy_ptr;
+ num_names = 1;
+ }
+
+ for (; num_names; ++names_ptr, --num_names) {
+ /* XXX: Is this name_ptr considered escaped yet, or not??? */
+ char *name_ptr = *names_ptr;
+ request_rec *rr;
+
+ /* Once upon a time args were handled _after_ the successful redirect.
+ * But that redirect might then _refuse_ the given r->args, creating
+ * a nasty tangle. It seems safer to consider the r->args while we
+ * determine if name_ptr is our viable index, and therefore set them
+ * up correctly on redirect.
+ */
+ if (r->args != NULL) {
+ name_ptr = apr_pstrcat(r->pool, name_ptr, "?", r->args, NULL);
+ }
+
+ rr = ap_sub_req_lookup_uri(name_ptr, r, NULL);
+
+ /* XXX: (filetype == APR_REG) - we can't use a non-file index??? */
+ if ( rr->status == HTTP_OK
+ && ( (rr->handler && !strcmp(rr->handler, "proxy-server"))
+ || rr->finfo.filetype == APR_REG)) {
+ ap_internal_fast_redirect(rr, r);
+ return OK;
+ }
+
+ /* If the request returned a redirect, propagate it to the client */
+
+ if (ap_is_HTTP_REDIRECT(rr->status)
+ || (rr->status == HTTP_NOT_ACCEPTABLE && num_names == 1)
+ || (rr->status == HTTP_UNAUTHORIZED && num_names == 1)) {
+
+ apr_pool_join(r->pool, rr->pool);
+ error_notfound = rr->status;
+ r->notes = apr_table_overlay(r->pool, r->notes, rr->notes);
+ r->headers_out = apr_table_overlay(r->pool, r->headers_out,
+ rr->headers_out);
+ r->err_headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ rr->err_headers_out);
+ return error_notfound;
+ }
+
+ /* If the request returned something other than 404 (or 200),
+ * it means the module encountered some sort of problem. To be
+ * secure, we should return the error, rather than allow autoindex
+ * to create a (possibly unsafe) directory index.
+ *
+ * So we store the error, and if none of the listed files
+ * exist, we return the last error response we got, instead
+ * of a directory listing.
+ */
+ if (rr->status && rr->status != HTTP_NOT_FOUND
+ && rr->status != HTTP_OK) {
+ error_notfound = rr->status;
+ }
+
+ ap_destroy_sub_req(rr);
+ }
+
+ if (error_notfound) {
+ return error_notfound;
+ }
+
+ /* nothing for us to do, pass on through */
+ return DECLINED;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_fixups(fixup_dir,NULL,NULL,APR_HOOK_LAST);
+}
+
+module AP_MODULE_DECLARE_DATA dir_module = {
+ STANDARD20_MODULE_STUFF,
+ create_dir_config, /* create per-directory config structure */
+ merge_dir_configs, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ dir_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.dsp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.dsp
new file mode 100644
index 00000000..5863d90f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_dir" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_dir - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_dir.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_dir.mak" CFG="mod_dir - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_dir - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_dir - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_dir - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_dir_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_dir.so" /base:@..\..\os\win32\BaseAddr.ref,mod_dir.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_dir.so" /base:@..\..\os\win32\BaseAddr.ref,mod_dir.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_dir - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_dir_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_dir.so" /base:@..\..\os\win32\BaseAddr.ref,mod_dir.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_dir.so" /base:@..\..\os\win32\BaseAddr.ref,mod_dir.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_dir - Win32 Release"
+# Name "mod_dir - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_dir.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_dir.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_dir - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_dir.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_dir.so "dir_module for Apache" ../../include/ap_release.h > .\mod_dir.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_dir - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_dir.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_dir.so "dir_module for Apache" ../../include/ap_release.h > .\mod_dir.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.exp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.exp
new file mode 100644
index 00000000..5fbf7729
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.exp
@@ -0,0 +1 @@
+dir_module
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.la b/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.la
new file mode 100644
index 00000000..9fd727d8
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.la
@@ -0,0 +1,35 @@
+# mod_dir.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_dir.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_dir.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.lo b/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.lo
new file mode 100644
index 00000000..471329d6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.lo
@@ -0,0 +1,12 @@
+# mod_dir.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_dir.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_dir.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.o b/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.o
new file mode 100644
index 00000000..40dbfa1e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_dir.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.c b/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.c
new file mode 100644
index 00000000..f7745c65
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.c
@@ -0,0 +1,897 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This imagemap module started as a port of the original imagemap.c
+ * written by Rob McCool (11/13/93 robm@ncsa.uiuc.edu).
+ * This version includes the mapping algorithms found in version 1.3
+ * of imagemap.c.
+ *
+ * Contributors to this code include:
+ *
+ * Kevin Hughes, kevinh@pulua.hcc.hawaii.edu
+ *
+ * Eric Haines, erich@eye.com
+ * "macmartinized" polygon code copyright 1992 by Eric Haines, erich@eye.com
+ *
+ * Randy Terbush, randy@zyzzyva.com
+ * port to Apache module format, "base_uri" and support for relative URLs
+ *
+ * James H. Cloos, Jr., cloos@jhcloos.com
+ * Added point datatype, using code in NCSA's version 1.8 imagemap.c
+ * program, as distributed with version 1.4.1 of their server.
+ * The point code is originally added by Craig Milo Rogers, Rogers@ISI.Edu
+ *
+ * Nathan Kurz, nate@tripod.com
+ * Rewrite/reorganization. New handling of default, base and relative URLs.
+ * New Configuration directives:
+ * ImapMenu {none, formatted, semiformatted, unformatted}
+ * ImapDefault {error, nocontent, referer, menu, URL}
+ * ImapBase {map, referer, URL}
+ * Support for creating non-graphical menu added. (backwards compatible):
+ * Old: directive URL [x,y ...]
+ * New: directive URL "Menu text" [x,y ...]
+ * or: directive URL x,y ... "Menu text"
+ * Map format and menu concept courtesy Joshua Bell, jsbell@acs.ucalgary.ca.
+ *
+ * Mark Cox, mark@ukweb.com, Allow relative URLs even when no base specified
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+
+#define APR_WANT_STDIO /* for sscanf() */
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_log.h"
+#include "util_script.h"
+#include "mod_core.h"
+
+
+#define IMAP_MAGIC_TYPE "application/x-httpd-imap"
+#define MAXVERTS 100
+#define X 0
+#define Y 1
+
+#define IMAP_MENU_DEFAULT "formatted"
+#define IMAP_DEFAULT_DEFAULT "nocontent"
+#define IMAP_BASE_DEFAULT "map"
+
+#ifdef SUNOS4
+double strtod(); /* SunOS needed this */
+#endif
+
+module AP_MODULE_DECLARE_DATA imap_module;
+
+typedef struct {
+ char *imap_menu;
+ char *imap_default;
+ char *imap_base;
+} imap_conf_rec;
+
+static void *create_imap_dir_config(apr_pool_t *p, char *dummy)
+{
+ imap_conf_rec *icr =
+ (imap_conf_rec *) apr_palloc(p, sizeof(imap_conf_rec));
+
+ icr->imap_menu = NULL;
+ icr->imap_default = NULL;
+ icr->imap_base = NULL;
+
+ return icr;
+}
+
+static void *merge_imap_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ imap_conf_rec *new = (imap_conf_rec *) apr_pcalloc(p, sizeof(imap_conf_rec));
+ imap_conf_rec *base = (imap_conf_rec *) basev;
+ imap_conf_rec *add = (imap_conf_rec *) addv;
+
+ new->imap_menu = add->imap_menu ? add->imap_menu : base->imap_menu;
+ new->imap_default = add->imap_default ? add->imap_default
+ : base->imap_default;
+ new->imap_base = add->imap_base ? add->imap_base : base->imap_base;
+
+ return new;
+}
+
+
+static const command_rec imap_cmds[] =
+{
+ AP_INIT_TAKE1("ImapMenu", ap_set_string_slot,
+ (void *)APR_OFFSETOF(imap_conf_rec, imap_menu), OR_INDEXES,
+ "the type of menu generated: none, formatted, semiformatted, "
+ "unformatted"),
+ AP_INIT_TAKE1("ImapDefault", ap_set_string_slot,
+ (void *)APR_OFFSETOF(imap_conf_rec, imap_default), OR_INDEXES,
+ "the action taken if no match: error, nocontent, referer, "
+ "menu, URL"),
+ AP_INIT_TAKE1("ImapBase", ap_set_string_slot,
+ (void *)APR_OFFSETOF(imap_conf_rec, imap_base), OR_INDEXES,
+ "the base for all URL's: map, referer, URL (or start of)"),
+ {NULL}
+};
+
+static int pointinrect(const double point[2], double coords[MAXVERTS][2])
+{
+ double max[2], min[2];
+ if (coords[0][X] > coords[1][X]) {
+ max[0] = coords[0][X];
+ min[0] = coords[1][X];
+ }
+ else {
+ max[0] = coords[1][X];
+ min[0] = coords[0][X];
+ }
+
+ if (coords[0][Y] > coords[1][Y]) {
+ max[1] = coords[0][Y];
+ min[1] = coords[1][Y];
+ }
+ else {
+ max[1] = coords[1][Y];
+ min[1] = coords[0][Y];
+ }
+
+ return ((point[X] >= min[0] && point[X] <= max[0]) &&
+ (point[Y] >= min[1] && point[Y] <= max[1]));
+}
+
+static int pointincircle(const double point[2], double coords[MAXVERTS][2])
+{
+ double radius1, radius2;
+
+ radius1 = ((coords[0][Y] - coords[1][Y]) * (coords[0][Y] - coords[1][Y]))
+ + ((coords[0][X] - coords[1][X]) * (coords[0][X] - coords[1][X]));
+
+ radius2 = ((coords[0][Y] - point[Y]) * (coords[0][Y] - point[Y]))
+ + ((coords[0][X] - point[X]) * (coords[0][X] - point[X]));
+
+ return (radius2 <= radius1);
+}
+
+#define fmin(a,b) (((a)>(b))?(b):(a))
+#define fmax(a,b) (((a)>(b))?(a):(b))
+
+static int pointinpoly(const double point[2], double pgon[MAXVERTS][2])
+{
+ int i, numverts, crossings = 0;
+ double x = point[X], y = point[Y];
+
+ for (numverts = 0; pgon[numverts][X] != -1 && numverts < MAXVERTS;
+ numverts++) {
+ /* just counting the vertexes */
+ }
+
+ for (i = 0; i < numverts; i++) {
+ double x1=pgon[i][X];
+ double y1=pgon[i][Y];
+ double x2=pgon[(i + 1) % numverts][X];
+ double y2=pgon[(i + 1) % numverts][Y];
+ double d=(y - y1) * (x2 - x1) - (x - x1) * (y2 - y1);
+
+ if ((y1 >= y) != (y2 >= y)) {
+ crossings +=y2 - y1 >= 0 ? d >= 0 : d <= 0;
+ }
+ if (!d && fmin(x1,x2) <= x && x <= fmax(x1,x2)
+ && fmin(y1,y2) <= y && y <= fmax(y1,y2)) {
+ return 1;
+ }
+ }
+ return crossings & 0x01;
+}
+
+
+static int is_closer(const double point[2], double coords[MAXVERTS][2],
+ double *closest)
+{
+ double dist_squared = ((point[X] - coords[0][X])
+ * (point[X] - coords[0][X]))
+ + ((point[Y] - coords[0][Y])
+ * (point[Y] - coords[0][Y]));
+
+ if (point[X] < 0 || point[Y] < 0) {
+ return (0); /* don't mess around with negative coordinates */
+ }
+
+ if (*closest < 0 || dist_squared < *closest) {
+ *closest = dist_squared;
+ return (1); /* if this is the first point or is the closest yet
+ set 'closest' equal to this distance^2 */
+ }
+
+ return (0); /* if it's not the first or closest */
+
+}
+
+static double get_x_coord(const char *args)
+{
+ char *endptr; /* we want it non-null */
+ double x_coord = -1; /* -1 is returned if no coordinate is given */
+
+ if (args == NULL) {
+ return (-1); /* in case we aren't passed anything */
+ }
+
+ while (*args && !apr_isdigit(*args) && *args != ',') {
+ args++; /* jump to the first digit, but not past
+ a comma or end */
+ }
+
+ x_coord = strtod(args, &endptr);
+
+ if (endptr > args) { /* if a conversion was made */
+ return (x_coord);
+ }
+
+ return (-1); /* else if no conversion was made,
+ or if no args was given */
+}
+
+static double get_y_coord(const char *args)
+{
+ char *endptr; /* we want it non-null */
+ const char *start_of_y = NULL;
+ double y_coord = -1; /* -1 is returned on error */
+
+ if (args == NULL) {
+ return (-1); /* in case we aren't passed anything */
+ }
+
+ start_of_y = ap_strchr_c(args, ','); /* the comma */
+
+ if (start_of_y) {
+
+ start_of_y++; /* start looking at the character after
+ the comma */
+
+ while (*start_of_y && !apr_isdigit(*start_of_y)) {
+ start_of_y++; /* jump to the first digit, but not
+ past the end */
+ }
+
+ y_coord = strtod(start_of_y, &endptr);
+
+ if (endptr > start_of_y) {
+ return (y_coord);
+ }
+ }
+
+ return (-1); /* if no conversion was made, or
+ no comma was found in args */
+}
+
+
+/* See if string has a "quoted part", and if so set *quoted_part to
+ * the first character of the quoted part, then hammer a \0 onto the
+ * trailing quote, and set *string to point at the first character
+ * past the second quote.
+ *
+ * Otherwise set *quoted_part to NULL, and leave *string alone.
+ */
+static void read_quoted(char **string, char **quoted_part)
+{
+ char *strp = *string;
+
+ /* assume there's no quoted part */
+ *quoted_part = NULL;
+
+ while (apr_isspace(*strp)) {
+ strp++; /* go along string until non-whitespace */
+ }
+
+ if (*strp == '"') { /* if that character is a double quote */
+ strp++; /* step over it */
+ *quoted_part = strp; /* note where the quoted part begins */
+
+ while (*strp && *strp != '"') {
+ ++strp; /* skip the quoted portion */
+ }
+
+ *strp = '\0'; /* end the string with a NUL */
+
+ strp++; /* step over the last double quote */
+ *string = strp;
+ }
+}
+
+/*
+ * returns the mapped URL or NULL.
+ */
+static char *imap_url(request_rec *r, const char *base, const char *value)
+{
+/* translates a value into a URL. */
+ int slen, clen;
+ char *string_pos = NULL;
+ const char *string_pos_const = NULL;
+ char *directory = NULL;
+ const char *referer = NULL;
+ char *my_base;
+
+ if (!strcasecmp(value, "map") || !strcasecmp(value, "menu")) {
+ return ap_construct_url(r->pool, r->uri, r);
+ }
+
+ if (!strcasecmp(value, "nocontent") || !strcasecmp(value, "error")) {
+ return apr_pstrdup(r->pool, value); /* these are handled elsewhere,
+ so just copy them */
+ }
+
+ if (!strcasecmp(value, "referer")) {
+ referer = apr_table_get(r->headers_in, "Referer");
+ if (referer && *referer) {
+ return ap_escape_html(r->pool, referer);
+ }
+ else {
+ /* XXX: This used to do *value = '\0'; ... which is totally bogus
+ * because it hammers the passed in value, which can be a string
+ * constant, or part of a config, or whatever. Total garbage.
+ * This works around that without changing the rest of this
+ * code much
+ */
+ value = ""; /* if 'referer' but no referring page,
+ null the value */
+ }
+ }
+
+ string_pos_const = value;
+ while (apr_isalpha(*string_pos_const)) {
+ string_pos_const++; /* go along the URL from the map
+ until a non-letter */
+ }
+ if (*string_pos_const == ':') {
+ /* if letters and then a colon (like http:) */
+ /* it's an absolute URL, so use it! */
+ return apr_pstrdup(r->pool, value);
+ }
+
+ if (!base || !*base) {
+ if (value && *value) {
+ return apr_pstrdup(r->pool, value); /* no base: use what is given */
+ }
+ /* no base, no value: pick a simple default */
+ return ap_construct_url(r->pool, "/", r);
+ }
+
+ /* must be a relative URL to be combined with base */
+ if (ap_strchr_c(base, '/') == NULL && (!strncmp(value, "../", 3)
+ || !strcmp(value, ".."))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "invalid base directive in map file: %s", r->uri);
+ return NULL;
+ }
+ my_base = apr_pstrdup(r->pool, base);
+ string_pos = my_base;
+ while (*string_pos) {
+ if (*string_pos == '/' && *(string_pos + 1) == '/') {
+ string_pos += 2; /* if there are two slashes, jump over them */
+ continue;
+ }
+ if (*string_pos == '/') { /* the first single slash */
+ if (value[0] == '/') {
+ *string_pos = '\0';
+ } /* if the URL from the map starts from root,
+ end the base URL string at the first single
+ slash */
+ else {
+ directory = string_pos; /* save the start of
+ the directory portion */
+
+ string_pos = strrchr(string_pos, '/'); /* now reuse
+ string_pos */
+ string_pos++; /* step over that last slash */
+ *string_pos = '\0';
+ } /* but if the map url is relative, leave the
+ slash on the base (if there is one) */
+ break;
+ }
+ string_pos++; /* until we get to the end of my_base without
+ finding a slash by itself */
+ }
+
+ while (!strncmp(value, "../", 3) || !strcmp(value, "..")) {
+
+ if (directory && (slen = strlen(directory))) {
+
+ /* for each '..', knock a directory off the end
+ by ending the string right at the last slash.
+ But only consider the directory portion: don't eat
+ into the server name. And only try if a directory
+ portion was found */
+
+ clen = slen - 1;
+
+ while ((slen - clen) == 1) {
+
+ if ((string_pos = strrchr(directory, '/'))) {
+ *string_pos = '\0';
+ }
+ clen = strlen(directory);
+ if (clen == 0) {
+ break;
+ }
+ }
+
+ value += 2; /* jump over the '..' that we found in the
+ value */
+ }
+ else if (directory) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "invalid directory name in map file: %s", r->uri);
+ return NULL;
+ }
+
+ if (!strncmp(value, "/../", 4) || !strcmp(value, "/..")) {
+ value++; /* step over the '/' if there are more '..'
+ to do. This way, we leave the starting
+ '/' on value after the last '..', but get
+ rid of it otherwise */
+ }
+
+ } /* by this point, value does not start
+ with '..' */
+
+ if (value && *value) {
+ return apr_pstrcat(r->pool, my_base, value, NULL);
+ }
+ return my_base;
+}
+
+static int imap_reply(request_rec *r, char *redirect)
+{
+ if (!strcasecmp(redirect, "error")) {
+ /* they actually requested an error! */
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ if (!strcasecmp(redirect, "nocontent")) {
+ /* tell the client to keep the page it has */
+ return HTTP_NO_CONTENT;
+ }
+ if (redirect && *redirect) {
+ /* must be a URL, so redirect to it */
+ apr_table_setn(r->headers_out, "Location", redirect);
+ return HTTP_MOVED_TEMPORARILY;
+ }
+ return HTTP_INTERNAL_SERVER_ERROR;
+}
+
+static void menu_header(request_rec *r, char *menu)
+{
+ ap_set_content_type(r, "text/html; charset=ISO-8859-1");
+
+ ap_rvputs(r, DOCTYPE_HTML_3_2, "<html><head>\n<title>Menu for ",
+ ap_escape_html(r->pool, r->uri),
+ "</title>\n</head><body>\n", NULL);
+
+ if (!strcasecmp(menu, "formatted")) {
+ ap_rvputs(r, "<h1>Menu for ",
+ ap_escape_html(r->pool, r->uri),
+ "</h1>\n<hr />\n\n", NULL);
+ }
+
+ return;
+}
+
+static void menu_blank(request_rec *r, char *menu)
+{
+ if (!strcasecmp(menu, "formatted")) {
+ ap_rputs("\n", r);
+ }
+ if (!strcasecmp(menu, "semiformatted")) {
+ ap_rputs("<br />\n", r);
+ }
+ if (!strcasecmp(menu, "unformatted")) {
+ ap_rputs("\n", r);
+ }
+ return;
+}
+
+static void menu_comment(request_rec *r, char *menu, char *comment)
+{
+ if (!strcasecmp(menu, "formatted")) {
+ ap_rputs("\n", r); /* print just a newline if 'formatted' */
+ }
+ if (!strcasecmp(menu, "semiformatted") && *comment) {
+ ap_rvputs(r, comment, "\n", NULL);
+ }
+ if (!strcasecmp(menu, "unformatted") && *comment) {
+ ap_rvputs(r, comment, "\n", NULL);
+ }
+ return; /* comments are ignored in the
+ 'formatted' form */
+}
+
+static void menu_default(request_rec *r, char *menu, char *href, char *text)
+{
+ if (!strcasecmp(href, "error") || !strcasecmp(href, "nocontent")) {
+ return; /* don't print such lines, these aren't
+ really href's */
+ }
+ if (!strcasecmp(menu, "formatted")) {
+ ap_rvputs(r, "<pre>(Default) <a href=\"", href, "\">", text,
+ "</a></pre>\n", NULL);
+ }
+ if (!strcasecmp(menu, "semiformatted")) {
+ ap_rvputs(r, "<pre>(Default) <a href=\"", href, "\">", text,
+ "</a></pre>\n", NULL);
+ }
+ if (!strcasecmp(menu, "unformatted")) {
+ ap_rvputs(r, "<a href=\"", href, "\">", text, "</a>", NULL);
+ }
+ return;
+}
+
+static void menu_directive(request_rec *r, char *menu, char *href, char *text)
+{
+ if (!strcasecmp(href, "error") || !strcasecmp(href, "nocontent")) {
+ return; /* don't print such lines, as this isn't
+ really an href */
+ }
+ if (!strcasecmp(menu, "formatted")) {
+ ap_rvputs(r, "<pre> <a href=\"", href, "\">", text,
+ "</a></pre>\n", NULL);
+ }
+ if (!strcasecmp(menu, "semiformatted")) {
+ ap_rvputs(r, "<pre> <a href=\"", href, "\">", text,
+ "</a></pre>\n", NULL);
+ }
+ if (!strcasecmp(menu, "unformatted")) {
+ ap_rvputs(r, "<a href=\"", href, "\">", text, "</a>", NULL);
+ }
+ return;
+}
+
+static void menu_footer(request_rec *r)
+{
+ ap_rputs("\n\n</body>\n</html>\n", r); /* finish the menu */
+}
+
+static int imap_handler_internal(request_rec *r)
+{
+ char input[MAX_STRING_LEN];
+ char *directive;
+ char *value;
+ char *href_text;
+ char *base;
+ char *redirect;
+ char *mapdflt;
+ char *closest = NULL;
+ double closest_yet = -1;
+ apr_status_t status;
+
+ double testpoint[2];
+ double pointarray[MAXVERTS + 1][2];
+ int vertex;
+
+ char *string_pos;
+ int showmenu = 0;
+
+ imap_conf_rec *icr;
+
+ char *imap_menu;
+ char *imap_default;
+ char *imap_base;
+
+ ap_configfile_t *imap;
+
+ icr = ap_get_module_config(r->per_dir_config, &imap_module);
+
+ imap_menu = icr->imap_menu ? icr->imap_menu : IMAP_MENU_DEFAULT;
+ imap_default = icr->imap_default
+ ? icr->imap_default : IMAP_DEFAULT_DEFAULT;
+ imap_base = icr->imap_base ? icr->imap_base : IMAP_BASE_DEFAULT;
+
+ status = ap_pcfg_openfile(&imap, r->pool, r->filename);
+
+ if (status != APR_SUCCESS) {
+ return HTTP_NOT_FOUND;
+ }
+
+ base = imap_url(r, NULL, imap_base); /* set base according
+ to default */
+ if (!base) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ mapdflt = imap_url(r, NULL, imap_default); /* and default to
+ global default */
+ if (!mapdflt) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ testpoint[X] = get_x_coord(r->args);
+ testpoint[Y] = get_y_coord(r->args);
+
+ if ((testpoint[X] == -1 || testpoint[Y] == -1) ||
+ (testpoint[X] == 0 && testpoint[Y] == 0)) {
+ /* if either is -1 or if both are zero (new Lynx) */
+ /* we don't have valid coordinates */
+ testpoint[X] = -1;
+ testpoint[Y] = -1;
+ if (strncasecmp(imap_menu, "none", 2)) {
+ showmenu = 1; /* show the menu _unless_ ImapMenu is
+ 'none' or 'no' */
+ }
+ }
+
+ if (showmenu) { /* send start of imagemap menu if
+ we're going to */
+ menu_header(r, imap_menu);
+ }
+
+ while (!ap_cfg_getline(input, sizeof(input), imap)) {
+ if (!input[0]) {
+ if (showmenu) {
+ menu_blank(r, imap_menu);
+ }
+ continue;
+ }
+
+ if (input[0] == '#') {
+ if (showmenu) {
+ menu_comment(r, imap_menu, input + 1);
+ }
+ continue;
+ } /* blank lines and comments are ignored
+ if we aren't printing a menu */
+
+ /* find the first two space delimited fields, recall that
+ * ap_cfg_getline has removed leading/trailing whitespace.
+ *
+ * note that we're tokenizing as we go... if we were to use the
+ * ap_getword() class of functions we would end up allocating extra
+ * memory for every line of the map file
+ */
+ string_pos = input;
+ if (!*string_pos) { /* need at least two fields */
+ goto need_2_fields;
+ }
+
+ directive = string_pos;
+ while (*string_pos && !apr_isspace(*string_pos)) { /* past directive */
+ ++string_pos;
+ }
+ if (!*string_pos) { /* need at least two fields */
+ goto need_2_fields;
+ }
+ *string_pos++ = '\0';
+
+ if (!*string_pos) { /* need at least two fields */
+ goto need_2_fields;
+ }
+ while(*string_pos && apr_isspace(*string_pos)) { /* past whitespace */
+ ++string_pos;
+ }
+
+ value = string_pos;
+ while (*string_pos && !apr_isspace(*string_pos)) { /* past value */
+ ++string_pos;
+ }
+ if (apr_isspace(*string_pos)) {
+ *string_pos++ = '\0';
+ }
+ else {
+ /* end of input, don't advance past it */
+ *string_pos = '\0';
+ }
+
+ if (!strncasecmp(directive, "base", 4)) { /* base, base_uri */
+ base = imap_url(r, NULL, value);
+ if (!base) {
+ goto menu_bail;
+ }
+ continue; /* base is never printed to a menu */
+ }
+
+ read_quoted(&string_pos, &href_text);
+
+ if (!strcasecmp(directive, "default")) { /* default */
+ mapdflt = imap_url(r, NULL, value);
+ if (!mapdflt) {
+ goto menu_bail;
+ }
+ if (showmenu) { /* print the default if there's a menu */
+ redirect = imap_url(r, base, mapdflt);
+ if (!redirect) {
+ goto menu_bail;
+ }
+ menu_default(r, imap_menu, redirect,
+ href_text ? href_text : mapdflt);
+ }
+ continue;
+ }
+
+ vertex = 0;
+ while (vertex < MAXVERTS &&
+ sscanf(string_pos, "%lf%*[, ]%lf",
+ &pointarray[vertex][X], &pointarray[vertex][Y]) == 2) {
+ /* Now skip what we just read... we can't use ANSIism %n */
+ while (apr_isspace(*string_pos)) { /* past whitespace */
+ string_pos++;
+ }
+ while (apr_isdigit(*string_pos)) { /* and the 1st number */
+ string_pos++;
+ }
+ string_pos++; /* skip the ',' */
+ while (apr_isspace(*string_pos)) { /* past any more whitespace */
+ string_pos++;
+ }
+ while (apr_isdigit(*string_pos)) { /* 2nd number */
+ string_pos++;
+ }
+ vertex++;
+ } /* so long as there are more vertices to
+ read, and we have room, read them in.
+ We start where we left off of the last
+ sscanf, not at the beginning. */
+
+ pointarray[vertex][X] = -1; /* signals the end of vertices */
+
+ if (showmenu) {
+ if (!href_text) {
+ read_quoted(&string_pos, &href_text); /* href text could
+ be here instead */
+ }
+ redirect = imap_url(r, base, value);
+ if (!redirect) {
+ goto menu_bail;
+ }
+ menu_directive(r, imap_menu, redirect,
+ href_text ? href_text : value);
+ continue;
+ }
+ /* note that we don't make it past here if we are making a menu */
+
+ if (testpoint[X] == -1 || pointarray[0][X] == -1) {
+ continue; /* don't try the following tests if testpoints
+ are invalid, or if there are no
+ coordinates */
+ }
+
+ if (!strcasecmp(directive, "poly")) { /* poly */
+
+ if (pointinpoly(testpoint, pointarray)) {
+ ap_cfg_closefile(imap);
+ redirect = imap_url(r, base, value);
+ if (!redirect) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ return (imap_reply(r, redirect));
+ }
+ continue;
+ }
+
+ if (!strcasecmp(directive, "circle")) { /* circle */
+
+ if (pointincircle(testpoint, pointarray)) {
+ ap_cfg_closefile(imap);
+ redirect = imap_url(r, base, value);
+ if (!redirect) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ return (imap_reply(r, redirect));
+ }
+ continue;
+ }
+
+ if (!strcasecmp(directive, "rect")) { /* rect */
+
+ if (pointinrect(testpoint, pointarray)) {
+ ap_cfg_closefile(imap);
+ redirect = imap_url(r, base, value);
+ if (!redirect) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ return (imap_reply(r, redirect));
+ }
+ continue;
+ }
+
+ if (!strcasecmp(directive, "point")) { /* point */
+
+ if (is_closer(testpoint, pointarray, &closest_yet)) {
+ closest = apr_pstrdup(r->pool, value);
+ }
+
+ continue;
+ } /* move on to next line whether it's
+ closest or not */
+
+ } /* nothing matched, so we get another line! */
+
+ ap_cfg_closefile(imap); /* we are done with the map file; close it */
+
+ if (showmenu) {
+ menu_footer(r); /* finish the menu and we are done */
+ return OK;
+ }
+
+ if (closest) { /* if a 'point' directive has been seen */
+ redirect = imap_url(r, base, closest);
+ if (!redirect) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ return (imap_reply(r, redirect));
+ }
+
+ if (mapdflt) { /* a default should be defined, even if
+ only 'nocontent' */
+ redirect = imap_url(r, base, mapdflt);
+ if (!redirect) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ return (imap_reply(r, redirect));
+ }
+
+ return HTTP_INTERNAL_SERVER_ERROR; /* If we make it this far,
+ we failed. They lose! */
+
+need_2_fields:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "map file %s, line %d syntax error: requires at "
+ "least two fields", r->uri, imap->line_number);
+ /* fall through */
+menu_bail:
+ ap_cfg_closefile(imap);
+ if (showmenu) {
+ /* There's not much else we can do ... we've already sent the headers
+ * to the client.
+ */
+ ap_rputs("\n\n[an internal server error occured]\n", r);
+ menu_footer(r);
+ return OK;
+ }
+ return HTTP_INTERNAL_SERVER_ERROR;
+}
+
+static int imap_handler(request_rec *r)
+{
+ /* Optimization: skip the allocation of large local variables on the
+ * stack (in imap_handler_internal()) on requests that aren't using
+ * imagemaps
+ */
+ if (r->method_number != M_GET || (strcmp(r->handler,IMAP_MAGIC_TYPE)
+ && strcmp(r->handler, "imap-file"))) {
+ return DECLINED;
+ }
+ else {
+ return imap_handler_internal(r);
+ }
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_handler(imap_handler,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA imap_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_imap_dir_config, /* dir config creater */
+ merge_imap_dir_configs, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ imap_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.dsp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.dsp
new file mode 100644
index 00000000..19e15bf3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_imap" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_imap - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_imap.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_imap.mak" CFG="mod_imap - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_imap - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_imap - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_imap - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_imap_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_imap.so" /base:@..\..\os\win32\BaseAddr.ref,mod_imap.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_imap.so" /base:@..\..\os\win32\BaseAddr.ref,mod_imap.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_imap - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_imap_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_imap.so" /base:@..\..\os\win32\BaseAddr.ref,mod_imap.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_imap.so" /base:@..\..\os\win32\BaseAddr.ref,mod_imap.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_imap - Win32 Release"
+# Name "mod_imap - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_imap.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_imap.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_imap - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_imap.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_imap.so "imap_module for Apache" ../../include/ap_release.h > .\mod_imap.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_imap - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_imap.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_imap.so "imap_module for Apache" ../../include/ap_release.h > .\mod_imap.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.exp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.exp
new file mode 100644
index 00000000..1e0e0b83
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.exp
@@ -0,0 +1 @@
+imap_module
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.la b/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.la
new file mode 100644
index 00000000..a51962d8
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.la
@@ -0,0 +1,35 @@
+# mod_imap.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_imap.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_imap.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.lo b/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.lo
new file mode 100644
index 00000000..4cfb46d6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.lo
@@ -0,0 +1,12 @@
+# mod_imap.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_imap.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_imap.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.o b/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.o
new file mode 100644
index 00000000..e824bbdd
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_imap.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.c b/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.c
new file mode 100644
index 00000000..f8214cb4
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.c
@@ -0,0 +1,3096 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_negotiation.c: keeps track of MIME types the client is willing to
+ * accept, and contains code to handle type arbitration.
+ *
+ * rst
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_file_io.h"
+#include "apr_lib.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_protocol.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "util_script.h"
+
+
+#define MAP_FILE_MAGIC_TYPE "application/x-type-map"
+
+/* Commands --- configuring document caching on a per (virtual?)
+ * server basis...
+ */
+
+typedef struct {
+ int forcelangpriority;
+ apr_array_header_t *language_priority;
+} neg_dir_config;
+
+/* forcelangpriority flags
+ */
+#define FLP_UNDEF 0 /* Same as FLP_DEFAULT, but base overrides */
+#define FLP_NONE 1 /* Return 406, HTTP_NOT_ACCEPTABLE */
+#define FLP_PREFER 2 /* Use language_priority rather than MC */
+#define FLP_FALLBACK 4 /* Use language_priority rather than NA */
+
+#define FLP_DEFAULT FLP_PREFER
+
+module AP_MODULE_DECLARE_DATA negotiation_module;
+
+static void *create_neg_dir_config(apr_pool_t *p, char *dummy)
+{
+ neg_dir_config *new = (neg_dir_config *) apr_palloc(p,
+ sizeof(neg_dir_config));
+
+ new->forcelangpriority = FLP_UNDEF;
+ new->language_priority = NULL;
+ return new;
+}
+
+static void *merge_neg_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ neg_dir_config *base = (neg_dir_config *) basev;
+ neg_dir_config *add = (neg_dir_config *) addv;
+ neg_dir_config *new = (neg_dir_config *) apr_palloc(p,
+ sizeof(neg_dir_config));
+
+ /* give priority to the config in the subdirectory */
+ new->forcelangpriority = (add->forcelangpriority != FLP_UNDEF)
+ ? add->forcelangpriority
+ : base->forcelangpriority;
+ new->language_priority = add->language_priority
+ ? add->language_priority
+ : base->language_priority;
+ return new;
+}
+
+static const char *set_language_priority(cmd_parms *cmd, void *n_,
+ const char *lang)
+{
+ neg_dir_config *n = n_;
+ const char **langp;
+
+ if (!n->language_priority)
+ n->language_priority = apr_array_make(cmd->pool, 4, sizeof(char *));
+
+ langp = (const char **) apr_array_push(n->language_priority);
+ *langp = lang;
+ return NULL;
+}
+
+static const char *set_force_priority(cmd_parms *cmd, void *n_, const char *w)
+{
+ neg_dir_config *n = n_;
+
+ if (!strcasecmp(w, "None")) {
+ if (n->forcelangpriority & ~FLP_NONE) {
+ return "Cannot combine ForceLanguagePriority options with None";
+ }
+ n->forcelangpriority = FLP_NONE;
+ }
+ else if (!strcasecmp(w, "Prefer")) {
+ if (n->forcelangpriority & FLP_NONE) {
+ return "Cannot combine ForceLanguagePriority options None and "
+ "Prefer";
+ }
+ n->forcelangpriority |= FLP_PREFER;
+ }
+ else if (!strcasecmp(w, "Fallback")) {
+ if (n->forcelangpriority & FLP_NONE) {
+ return "Cannot combine ForceLanguagePriority options None and "
+ "Fallback";
+ }
+ n->forcelangpriority |= FLP_FALLBACK;
+ }
+ else {
+ return apr_pstrcat(cmd->pool, "Invalid ForceLanguagePriority option ",
+ w, NULL);
+ }
+
+ return NULL;
+}
+
+static const char *cache_negotiated_docs(cmd_parms *cmd, void *dummy,
+ int arg)
+{
+ ap_set_module_config(cmd->server->module_config, &negotiation_module,
+ (arg ? "Cache" : NULL));
+ return NULL;
+}
+
+static int do_cache_negotiated_docs(server_rec *s)
+{
+ return (ap_get_module_config(s->module_config,
+ &negotiation_module) != NULL);
+}
+
+static const command_rec negotiation_cmds[] =
+{
+ AP_INIT_FLAG("CacheNegotiatedDocs", cache_negotiated_docs, NULL, RSRC_CONF,
+ "Either 'on' or 'off' (default)"),
+ AP_INIT_ITERATE("LanguagePriority", set_language_priority, NULL,
+ OR_FILEINFO,
+ "space-delimited list of MIME language abbreviations"),
+ AP_INIT_ITERATE("ForceLanguagePriority", set_force_priority, NULL,
+ OR_FILEINFO,
+ "Force LanguagePriority elections, either None, or "
+ "Fallback and/or Prefer"),
+ {NULL}
+};
+
+/*
+ * Record of available info on a media type specified by the client
+ * (we also use 'em for encodings and languages)
+ */
+
+typedef struct accept_rec {
+ char *name; /* MUST be lowercase */
+ float quality;
+ float level;
+ char *charset; /* for content-type only */
+} accept_rec;
+
+/*
+ * Record of available info on a particular variant
+ *
+ * Note that a few of these fields are updated by the actual negotiation
+ * code. These are:
+ *
+ * level_matched --- initialized to zero. Set to the value of level
+ * if the client actually accepts this media type at that
+ * level (and *not* if it got in on a wildcard). See level_cmp
+ * below.
+ * mime_stars -- initialized to zero. Set to the number of stars
+ * present in the best matching Accept header element.
+ * 1 for star/star, 2 for type/star and 3 for
+ * type/subtype.
+ *
+ * definite -- initialized to 1. Set to 0 if there is a match which
+ * makes the variant non-definite according to the rules
+ * in rfc2296.
+ */
+
+typedef struct var_rec {
+ request_rec *sub_req; /* May be NULL (is, for map files) */
+ const char *mime_type; /* MUST be lowercase */
+ const char *file_name; /* Set to 'this' (for map file body content) */
+ apr_off_t body; /* Only for map file body content */
+ const char *content_encoding;
+ apr_array_header_t *content_languages; /* list of lang. for this variant */
+ const char *content_charset;
+ const char *description;
+
+ /* The next five items give the quality values for the dimensions
+ * of negotiation for this variant. They are obtained from the
+ * appropriate header lines, except for source_quality, which
+ * is obtained from the variant itself (the 'qs' parameter value
+ * from the variant's mime-type). Apart from source_quality,
+ * these values are set when we find the quality for each variant
+ * (see best_match()). source_quality is set from the 'qs' parameter
+ * of the variant description or mime type: see set_mime_fields().
+ */
+ float lang_quality; /* quality of this variant's language */
+ float encoding_quality; /* ditto encoding */
+ float charset_quality; /* ditto charset */
+ float mime_type_quality; /* ditto media type */
+ float source_quality; /* source quality for this variant */
+
+ /* Now some special values */
+ float level; /* Auxiliary to content-type... */
+ apr_off_t bytes; /* content length, if known */
+ int lang_index; /* Index into LanguagePriority list */
+ int is_pseudo_html; /* text/html, *or* the INCLUDES_MAGIC_TYPEs */
+
+ /* Above are all written-once properties of the variant. The
+ * three fields below are changed during negotiation:
+ */
+
+ float level_matched;
+ int mime_stars;
+ int definite;
+} var_rec;
+
+/* Something to carry around the state of negotiation (and to keep
+ * all of this thread-safe)...
+ */
+
+typedef struct {
+ apr_pool_t *pool;
+ request_rec *r;
+ neg_dir_config *conf;
+ char *dir_name;
+ int accept_q; /* 1 if an Accept item has a q= param */
+ float default_lang_quality; /* fiddle lang q for variants with no lang */
+
+ /* the array pointers below are NULL if the corresponding accept
+ * headers are not present
+ */
+ apr_array_header_t *accepts; /* accept_recs */
+ apr_array_header_t *accept_encodings; /* accept_recs */
+ apr_array_header_t *accept_charsets; /* accept_recs */
+ apr_array_header_t *accept_langs; /* accept_recs */
+
+ apr_array_header_t *avail_vars; /* available variants */
+
+ int count_multiviews_variants; /* number of variants found on disk */
+
+ int is_transparent; /* 1 if this resource is trans. negotiable */
+
+ int dont_fiddle_headers; /* 1 if we may not fiddle with accept hdrs */
+ int ua_supports_trans; /* 1 if ua supports trans negotiation */
+ int send_alternates; /* 1 if we want to send an Alternates header */
+ int may_choose; /* 1 if we may choose a variant for the client */
+ int use_rvsa; /* 1 if we must use RVSA/1.0 negotiation algo */
+} negotiation_state;
+
+/* A few functions to manipulate var_recs.
+ * Cleaning out the fields...
+ */
+
+static void clean_var_rec(var_rec *mime_info)
+{
+ mime_info->sub_req = NULL;
+ mime_info->mime_type = "";
+ mime_info->file_name = "";
+ mime_info->body = 0;
+ mime_info->content_encoding = NULL;
+ mime_info->content_languages = NULL;
+ mime_info->content_charset = "";
+ mime_info->description = "";
+
+ mime_info->is_pseudo_html = 0;
+ mime_info->level = 0.0f;
+ mime_info->level_matched = 0.0f;
+ mime_info->bytes = -1;
+ mime_info->lang_index = -1;
+ mime_info->mime_stars = 0;
+ mime_info->definite = 1;
+
+ mime_info->charset_quality = 1.0f;
+ mime_info->encoding_quality = 1.0f;
+ mime_info->lang_quality = 1.0f;
+ mime_info->mime_type_quality = 1.0f;
+ mime_info->source_quality = 0.0f;
+}
+
+/* Initializing the relevant fields of a variant record from the
+ * accept_info read out of its content-type, one way or another.
+ */
+
+static void set_mime_fields(var_rec *var, accept_rec *mime_info)
+{
+ var->mime_type = mime_info->name;
+ var->source_quality = mime_info->quality;
+ var->level = mime_info->level;
+ var->content_charset = mime_info->charset;
+
+ var->is_pseudo_html = (!strcmp(var->mime_type, "text/html")
+ || !strcmp(var->mime_type, INCLUDES_MAGIC_TYPE)
+ || !strcmp(var->mime_type, INCLUDES_MAGIC_TYPE3));
+}
+
+/* Create a variant list validator in r using info from vlistr. */
+
+static void set_vlist_validator(request_rec *r, request_rec *vlistr)
+{
+ /* Calculating the variant list validator is similar to
+ * calculating an etag for the source of the variant list
+ * information, so we use ap_make_etag(). Note that this
+ * validator can be 'weak' in extreme case.
+ */
+ ap_update_mtime(vlistr, vlistr->finfo.mtime);
+ r->vlist_validator = ap_make_etag(vlistr, 0);
+
+ /* ap_set_etag will later take r->vlist_validator into account
+ * when creating the etag header
+ */
+}
+
+
+/*****************************************************************
+ *
+ * Parsing (lists of) media types and their parameters, as seen in
+ * HTTPD header lines and elsewhere.
+ */
+
+/*
+ * Get a single mime type entry --- one media type and parameters;
+ * enter the values we recognize into the argument accept_rec
+ */
+
+static const char *get_entry(apr_pool_t *p, accept_rec *result,
+ const char *accept_line)
+{
+ result->quality = 1.0f;
+ result->level = 0.0f;
+ result->charset = "";
+
+ /*
+ * Note that this handles what I gather is the "old format",
+ *
+ * Accept: text/html text/plain moo/zot
+ *
+ * without any compatibility kludges --- if the token after the
+ * MIME type begins with a semicolon, we know we're looking at parms,
+ * otherwise, we know we aren't. (So why all the pissing and moaning
+ * in the CERN server code? I must be missing something).
+ */
+
+ result->name = ap_get_token(p, &accept_line, 0);
+ ap_str_tolower(result->name); /* You want case insensitive,
+ * you'll *get* case insensitive.
+ */
+
+ /* KLUDGE!!! Default HTML to level 2.0 unless the browser
+ * *explicitly* says something else.
+ */
+
+ if (!strcmp(result->name, "text/html") && (result->level == 0.0)) {
+ result->level = 2.0f;
+ }
+ else if (!strcmp(result->name, INCLUDES_MAGIC_TYPE)) {
+ result->level = 2.0f;
+ }
+ else if (!strcmp(result->name, INCLUDES_MAGIC_TYPE3)) {
+ result->level = 3.0f;
+ }
+
+ while (*accept_line == ';') {
+ /* Parameters ... */
+
+ char *parm;
+ char *cp;
+ char *end;
+
+ ++accept_line;
+ parm = ap_get_token(p, &accept_line, 1);
+
+ /* Look for 'var = value' --- and make sure the var is in lcase. */
+
+ for (cp = parm; (*cp && !apr_isspace(*cp) && *cp != '='); ++cp) {
+ *cp = apr_tolower(*cp);
+ }
+
+ if (!*cp) {
+ continue; /* No '='; just ignore it. */
+ }
+
+ *cp++ = '\0'; /* Delimit var */
+ while (*cp && (apr_isspace(*cp) || *cp == '=')) {
+ ++cp;
+ }
+
+ if (*cp == '"') {
+ ++cp;
+ for (end = cp;
+ (*end && *end != '\n' && *end != '\r' && *end != '\"');
+ end++);
+ }
+ else {
+ for (end = cp; (*end && !apr_isspace(*end)); end++);
+ }
+ if (*end) {
+ *end = '\0'; /* strip ending quote or return */
+ }
+ ap_str_tolower(cp);
+
+ if (parm[0] == 'q'
+ && (parm[1] == '\0' || (parm[1] == 's' && parm[2] == '\0'))) {
+ result->quality = (float)atof(cp);
+ }
+ else if (parm[0] == 'l' && !strcmp(&parm[1], "evel")) {
+ result->level = (float)atof(cp);
+ }
+ else if (!strcmp(parm, "charset")) {
+ result->charset = cp;
+ }
+ }
+
+ if (*accept_line == ',') {
+ ++accept_line;
+ }
+
+ return accept_line;
+}
+
+/*****************************************************************
+ *
+ * Dealing with header lines ...
+ *
+ * Accept, Accept-Charset, Accept-Language and Accept-Encoding
+ * are handled by do_header_line() - they all have the same
+ * basic structure of a list of items of the format
+ * name; q=N; charset=TEXT
+ *
+ * where charset is only valid in Accept.
+ */
+
+static apr_array_header_t *do_header_line(apr_pool_t *p,
+ const char *accept_line)
+{
+ apr_array_header_t *accept_recs;
+
+ if (!accept_line) {
+ return NULL;
+ }
+
+ accept_recs = apr_array_make(p, 40, sizeof(accept_rec));
+
+ while (*accept_line) {
+ accept_rec *new = (accept_rec *) apr_array_push(accept_recs);
+ accept_line = get_entry(p, new, accept_line);
+ }
+
+ return accept_recs;
+}
+
+/* Given the text of the Content-Languages: line from the var map file,
+ * return an array containing the languages of this variant
+ */
+
+static apr_array_header_t *do_languages_line(apr_pool_t *p,
+ const char **lang_line)
+{
+ apr_array_header_t *lang_recs = apr_array_make(p, 2, sizeof(char *));
+
+ if (!lang_line) {
+ return lang_recs;
+ }
+
+ while (**lang_line) {
+ char **new = (char **) apr_array_push(lang_recs);
+ *new = ap_get_token(p, lang_line, 0);
+ ap_str_tolower(*new);
+ if (**lang_line == ',' || **lang_line == ';') {
+ ++(*lang_line);
+ }
+ }
+
+ return lang_recs;
+}
+
+/*****************************************************************
+ *
+ * Handling header lines from clients...
+ */
+
+static negotiation_state *parse_accept_headers(request_rec *r)
+{
+ negotiation_state *new =
+ (negotiation_state *) apr_pcalloc(r->pool, sizeof(negotiation_state));
+ accept_rec *elts;
+ apr_table_t *hdrs = r->headers_in;
+ int i;
+
+ new->pool = r->pool;
+ new->r = r;
+ new->conf = (neg_dir_config *)ap_get_module_config(r->per_dir_config,
+ &negotiation_module);
+
+ new->dir_name = ap_make_dirstr_parent(r->pool, r->filename);
+
+ new->accepts = do_header_line(r->pool, apr_table_get(hdrs, "Accept"));
+
+ /* calculate new->accept_q value */
+ if (new->accepts) {
+ elts = (accept_rec *) new->accepts->elts;
+
+ for (i = 0; i < new->accepts->nelts; ++i) {
+ if (elts[i].quality < 1.0) {
+ new->accept_q = 1;
+ }
+ }
+ }
+
+ new->accept_encodings =
+ do_header_line(r->pool, apr_table_get(hdrs, "Accept-Encoding"));
+ new->accept_langs =
+ do_header_line(r->pool, apr_table_get(hdrs, "Accept-Language"));
+ new->accept_charsets =
+ do_header_line(r->pool, apr_table_get(hdrs, "Accept-Charset"));
+
+ /* This is possibly overkill for some servers, heck, we have
+ * only 33 index.html variants in docs/docroot (today).
+ * Make this configurable?
+ */
+ new->avail_vars = apr_array_make(r->pool, 40, sizeof(var_rec));
+
+ return new;
+}
+
+
+static void parse_negotiate_header(request_rec *r, negotiation_state *neg)
+{
+ const char *negotiate = apr_table_get(r->headers_in, "Negotiate");
+ char *tok;
+
+ /* First, default to no TCN, no Alternates, and the original Apache
+ * negotiation algorithm with fiddles for broken browser configs.
+ *
+ * To save network bandwidth, we do not configure to send an
+ * Alternates header to the user agent by default. User
+ * agents that want an Alternates header for agent-driven
+ * negotiation will have to request it by sending an
+ * appropriate Negotiate header.
+ */
+ neg->ua_supports_trans = 0;
+ neg->send_alternates = 0;
+ neg->may_choose = 1;
+ neg->use_rvsa = 0;
+ neg->dont_fiddle_headers = 0;
+
+ if (!negotiate)
+ return;
+
+ if (strcmp(negotiate, "trans") == 0) {
+ /* Lynx 2.7 and 2.8 send 'negotiate: trans' even though they
+ * do not support transparent content negotiation, so for Lynx we
+ * ignore the negotiate header when its contents are exactly "trans".
+ * If future versions of Lynx ever need to say 'negotiate: trans',
+ * they can send the equivalent 'negotiate: trans, trans' instead
+ * to avoid triggering the workaround below.
+ */
+ const char *ua = apr_table_get(r->headers_in, "User-Agent");
+
+ if (ua && (strncmp(ua, "Lynx", 4) == 0))
+ return;
+ }
+
+ neg->may_choose = 0; /* An empty Negotiate would require 300 response */
+
+ while ((tok = ap_get_list_item(neg->pool, &negotiate)) != NULL) {
+
+ if (strcmp(tok, "trans") == 0 ||
+ strcmp(tok, "vlist") == 0 ||
+ strcmp(tok, "guess-small") == 0 ||
+ apr_isdigit(tok[0]) ||
+ strcmp(tok, "*") == 0) {
+
+ /* The user agent supports transparent negotiation */
+ neg->ua_supports_trans = 1;
+
+ /* Send-alternates could be configurable, but note
+ * that it must be 1 if we have 'vlist' in the
+ * negotiate header.
+ */
+ neg->send_alternates = 1;
+
+ if (strcmp(tok, "1.0") == 0) {
+ /* we may use the RVSA/1.0 algorithm, configure for it */
+ neg->may_choose = 1;
+ neg->use_rvsa = 1;
+ neg->dont_fiddle_headers = 1;
+ }
+ else if (tok[0] == '*') {
+ /* we may use any variant selection algorithm, configure
+ * to use the Apache algorithm
+ */
+ neg->may_choose = 1;
+
+ /* We disable header fiddles on the assumption that a
+ * client sending Negotiate knows how to send correct
+ * headers which don't need fiddling.
+ */
+ neg->dont_fiddle_headers = 1;
+ }
+ }
+ }
+
+#ifdef NEG_DEBUG
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "dont_fiddle_headers=%d use_rvsa=%d ua_supports_trans=%d "
+ "send_alternates=%d, may_choose=%d",
+ neg->dont_fiddle_headers, neg->use_rvsa,
+ neg->ua_supports_trans, neg->send_alternates, neg->may_choose);
+#endif
+
+}
+
+/* Sometimes clients will give us no Accept info at all; this routine sets
+ * up the standard default for that case, and also arranges for us to be
+ * willing to run a CGI script if we find one. (In fact, we set up to
+ * dramatically prefer CGI scripts in cases where that's appropriate,
+ * e.g., POST or when URI includes query args or extra path info).
+ */
+static void maybe_add_default_accepts(negotiation_state *neg,
+ int prefer_scripts)
+{
+ accept_rec *new_accept;
+
+ if (!neg->accepts) {
+ neg->accepts = apr_array_make(neg->pool, 4, sizeof(accept_rec));
+
+ new_accept = (accept_rec *) apr_array_push(neg->accepts);
+
+ new_accept->name = "*/*";
+ new_accept->quality = 1.0f;
+ new_accept->level = 0.0f;
+ }
+
+ new_accept = (accept_rec *) apr_array_push(neg->accepts);
+
+ new_accept->name = CGI_MAGIC_TYPE;
+ if (neg->use_rvsa) {
+ new_accept->quality = 0;
+ }
+ else {
+ new_accept->quality = prefer_scripts ? 2.0f : 0.001f;
+ }
+ new_accept->level = 0.0f;
+}
+
+/*****************************************************************
+ *
+ * Parsing type-map files, in Roy's meta/http format augmented with
+ * #-comments.
+ */
+
+/* Reading RFC822-style header lines, ignoring #-comments and
+ * handling continuations.
+ */
+
+enum header_state {
+ header_eof, header_seen, header_sep
+};
+
+static enum header_state get_header_line(char *buffer, int len, apr_file_t *map)
+{
+ char *buf_end = buffer + len;
+ char *cp;
+ char c;
+
+ /* Get a noncommented line */
+
+ do {
+ if (apr_file_gets(buffer, MAX_STRING_LEN, map) != APR_SUCCESS) {
+ return header_eof;
+ }
+ } while (buffer[0] == '#');
+
+ /* If blank, just return it --- this ends information on this variant */
+
+ for (cp = buffer; (*cp && apr_isspace(*cp)); ++cp) {
+ continue;
+ }
+
+ if (*cp == '\0') {
+ return header_sep;
+ }
+
+ /* If non-blank, go looking for header lines, but note that we still
+ * have to treat comments specially...
+ */
+
+ cp += strlen(cp);
+
+ /* We need to shortcut the rest of this block following the Body:
+ * tag - we will not look for continutation after this line.
+ */
+ if (!strncasecmp(buffer, "Body:", 5))
+ return header_seen;
+
+ while (apr_file_getc(&c, map) != APR_EOF) {
+ if (c == '#') {
+ /* Comment line */
+ while (apr_file_getc(&c, map) != APR_EOF && c != '\n') {
+ continue;
+ }
+ }
+ else if (apr_isspace(c)) {
+ /* Leading whitespace. POSSIBLE continuation line
+ * Also, possibly blank --- if so, we ungetc() the final newline
+ * so that we will pick up the blank line the next time 'round.
+ */
+
+ while (c != '\n' && apr_isspace(c)) {
+ if(apr_file_getc(&c, map) != APR_SUCCESS)
+ break;
+ }
+
+ apr_file_ungetc(c, map);
+
+ if (c == '\n') {
+ return header_seen; /* Blank line */
+ }
+
+ /* Continuation */
+
+ while ( cp < buf_end - 2
+ && (apr_file_getc(&c, map)) != APR_EOF
+ && c != '\n') {
+ *cp++ = c;
+ }
+
+ *cp++ = '\n';
+ *cp = '\0';
+ }
+ else {
+
+ /* Line beginning with something other than whitespace */
+
+ apr_file_ungetc(c, map);
+ return header_seen;
+ }
+ }
+
+ return header_seen;
+}
+
+static apr_off_t get_body(char *buffer, apr_size_t *len, const char *tag,
+ apr_file_t *map)
+{
+ char *endbody;
+ int bodylen;
+ int taglen;
+ apr_off_t pos;
+
+ taglen = strlen(tag);
+ *len -= taglen;
+
+ /* We are at the first character following a body:tag\n entry
+ * Suck in the body, then backspace to the first char after the
+ * closing tag entry. If we fail to read, find the tag or back
+ * up then we have a hosed file, so give up already
+ */
+ if (apr_file_read(map, buffer, len) != APR_SUCCESS) {
+ return -1;
+ }
+
+ /* put a copy of the tag *after* the data read from the file
+ * so that strstr() will find something with no reliance on
+ * terminating '\0'
+ */
+ memcpy(buffer + *len, tag, taglen);
+ endbody = strstr(buffer, tag);
+ if (endbody == buffer + *len) {
+ return -1;
+ }
+ bodylen = endbody - buffer;
+ endbody += strlen(tag);
+ /* Skip all the trailing cruft after the end tag to the next line */
+ while (*endbody) {
+ if (*endbody == '\n') {
+ ++endbody;
+ break;
+ }
+ ++endbody;
+ }
+
+ pos = -(apr_off_t)(*len - (endbody - buffer));
+ if (apr_file_seek(map, APR_CUR, &pos) != APR_SUCCESS) {
+ return -1;
+ }
+
+ /* Give the caller back the actual body's file offset and length */
+ *len = bodylen;
+ return pos - (endbody - buffer);
+}
+
+
+/* Stripping out RFC822 comments */
+
+static void strip_paren_comments(char *hdr)
+{
+ /* Hmmm... is this correct? In Roy's latest draft, (comments) can nest! */
+ /* Nope, it isn't correct. Fails to handle backslash escape as well. */
+
+ while (*hdr) {
+ if (*hdr == '"') {
+ hdr = strchr(hdr, '"');
+ if (hdr == NULL) {
+ return;
+ }
+ ++hdr;
+ }
+ else if (*hdr == '(') {
+ while (*hdr && *hdr != ')') {
+ *hdr++ = ' ';
+ }
+
+ if (*hdr) {
+ *hdr++ = ' ';
+ }
+ }
+ else {
+ ++hdr;
+ }
+ }
+}
+
+/* Getting to a header body from the header */
+
+static char *lcase_header_name_return_body(char *header, request_rec *r)
+{
+ char *cp = header;
+
+ for ( ; *cp && *cp != ':' ; ++cp) {
+ *cp = apr_tolower(*cp);
+ }
+
+ if (!*cp) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Syntax error in type map, no ':' in %s for header %s",
+ r->filename, header);
+ return NULL;
+ }
+
+ do {
+ ++cp;
+ } while (*cp && apr_isspace(*cp));
+
+ if (!*cp) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Syntax error in type map --- no header body: %s for %s",
+ r->filename, header);
+ return NULL;
+ }
+
+ return cp;
+}
+
+static int read_type_map(apr_file_t **map, negotiation_state *neg,
+ request_rec *rr)
+{
+ request_rec *r = neg->r;
+ apr_file_t *map_ = NULL;
+ apr_status_t status;
+ char buffer[MAX_STRING_LEN];
+ enum header_state hstate;
+ struct var_rec mime_info;
+ int has_content;
+
+ if (!map)
+ map = &map_;
+
+ /* We are not using multiviews */
+ neg->count_multiviews_variants = 0;
+
+ if ((status = apr_file_open(map, rr->filename, APR_READ | APR_BUFFERED,
+ APR_OS_DEFAULT, neg->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r,
+ "cannot access type map file: %s", rr->filename);
+ return HTTP_FORBIDDEN;
+ }
+
+ clean_var_rec(&mime_info);
+ has_content = 0;
+
+ do {
+ hstate = get_header_line(buffer, MAX_STRING_LEN, *map);
+
+ if (hstate == header_seen) {
+ char *body1 = lcase_header_name_return_body(buffer, neg->r);
+ const char *body;
+
+ if (body1 == NULL) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ strip_paren_comments(body1);
+ body = body1;
+
+ if (!strncmp(buffer, "uri:", 4)) {
+ mime_info.file_name = ap_get_token(neg->pool, &body, 0);
+ }
+ else if (!strncmp(buffer, "content-type:", 13)) {
+ struct accept_rec accept_info;
+
+ get_entry(neg->pool, &accept_info, body);
+ set_mime_fields(&mime_info, &accept_info);
+ has_content = 1;
+ }
+ else if (!strncmp(buffer, "content-length:", 15)) {
+ mime_info.bytes = apr_atoi64((char *)body);
+ has_content = 1;
+ }
+ else if (!strncmp(buffer, "content-language:", 17)) {
+ mime_info.content_languages = do_languages_line(neg->pool,
+ &body);
+ has_content = 1;
+ }
+ else if (!strncmp(buffer, "content-encoding:", 17)) {
+ mime_info.content_encoding = ap_get_token(neg->pool, &body, 0);
+ has_content = 1;
+ }
+ else if (!strncmp(buffer, "description:", 12)) {
+ char *desc = apr_pstrdup(neg->pool, body);
+ char *cp;
+
+ for (cp = desc; *cp; ++cp) {
+ if (*cp=='\n') *cp=' ';
+ }
+ if (cp>desc) *(cp-1)=0;
+ mime_info.description = desc;
+ }
+ else if (!strncmp(buffer, "body:", 5)) {
+ char *tag = apr_pstrdup(neg->pool, body);
+ char *eol = strchr(tag, '\0');
+ apr_size_t len = MAX_STRING_LEN;
+ while (--eol >= tag && apr_isspace(*eol))
+ *eol = '\0';
+ if ((mime_info.body = get_body(buffer, &len, tag, *map)) < 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Syntax error in type map, no end tag '%s'"
+ "found in %s for Body: content.",
+ tag, r->filename);
+ break;
+ }
+ mime_info.bytes = len;
+ mime_info.file_name = apr_filename_of_pathname(rr->filename);
+ }
+ }
+ else {
+ if (*mime_info.file_name && has_content) {
+ void *new_var = apr_array_push(neg->avail_vars);
+
+ memcpy(new_var, (void *) &mime_info, sizeof(var_rec));
+ }
+
+ clean_var_rec(&mime_info);
+ has_content = 0;
+ }
+ } while (hstate != header_eof);
+
+ if (map_)
+ apr_file_close(map_);
+
+ set_vlist_validator(r, rr);
+
+ return OK;
+}
+
+
+/* Sort function used by read_types_multi. */
+static int variantsortf(var_rec *a, var_rec *b) {
+
+ /* First key is the source quality, sort in descending order. */
+
+ /* XXX: note that we currently implement no method of setting the
+ * source quality for multiviews variants, so we are always comparing
+ * 1.0 to 1.0 for now
+ */
+ if (a->source_quality < b->source_quality)
+ return 1;
+ if (a->source_quality > b->source_quality)
+ return -1;
+
+ /* Second key is the variant name */
+ return strcmp(a->file_name, b->file_name);
+}
+
+/*****************************************************************
+ *
+ * Same as read_type_map, except we use a filtered directory listing
+ * as the map...
+ */
+
+static int read_types_multi(negotiation_state *neg)
+{
+ request_rec *r = neg->r;
+
+ char *filp;
+ int prefix_len;
+ apr_dir_t *dirp;
+ apr_finfo_t dirent;
+ apr_status_t status;
+ struct var_rec mime_info;
+ struct accept_rec accept_info;
+ void *new_var;
+ int anymatch = 0;
+
+ clean_var_rec(&mime_info);
+
+ if (r->proxyreq || !r->filename
+ || !ap_os_is_path_absolute(neg->pool, r->filename)) {
+ return DECLINED;
+ }
+
+ /* Only absolute paths here */
+ if (!(filp = strrchr(r->filename, '/'))) {
+ return DECLINED;
+ }
+ ++filp;
+ prefix_len = strlen(filp);
+
+ if ((status = apr_dir_open(&dirp, neg->dir_name,
+ neg->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r,
+ "cannot read directory for multi: %s", neg->dir_name);
+ return HTTP_FORBIDDEN;
+ }
+
+ while (apr_dir_read(&dirent, APR_FINFO_DIRENT, dirp) == APR_SUCCESS) {
+ apr_array_header_t *exception_list;
+ request_rec *sub_req;
+
+ /* Do we have a match? */
+#ifdef CASE_BLIND_FILESYSTEM
+ if (strncasecmp(dirent.name, filp, prefix_len)) {
+#else
+ if (strncmp(dirent.name, filp, prefix_len)) {
+#endif
+ continue;
+ }
+ if (dirent.name[prefix_len] != '.') {
+ continue;
+ }
+
+ /* Don't negotiate directories and other unusual files
+ * Really shouldn't see anything but DIR/LNK/REG here,
+ * and we aught to discover if the LNK was interesting.
+ *
+ * Of course, this only helps platforms that capture the
+ * the filetype in apr_dir_read(), which most can once
+ * they are optimized with some magic [it's known to the
+ * dirent, not associated to the inode, on most FS's.]
+ */
+ if ((dirent.valid & APR_FINFO_TYPE) && (dirent.filetype == APR_DIR))
+ continue;
+
+ /* Ok, something's here. Maybe nothing useful. Remember that
+ * we tried, if we completely fail, so we can reject the request!
+ */
+ anymatch = 1;
+
+ /* See if it's something which we have access to, and which
+ * has a known type and encoding (as opposed to something
+ * which we'll be slapping default_type on later).
+ */
+ sub_req = ap_sub_req_lookup_dirent(&dirent, r, AP_SUBREQ_MERGE_ARGS,
+ NULL);
+
+ /* Double check, we still don't multi-resolve non-ordinary files
+ */
+ if (sub_req->finfo.filetype != APR_REG)
+ continue;
+
+ /* If it has a handler, we'll pretend it's a CGI script,
+ * since that's a good indication of the sort of thing it
+ * might be doing.
+ */
+ if (sub_req->handler && !sub_req->content_type) {
+ ap_set_content_type(sub_req, CGI_MAGIC_TYPE);
+ }
+
+ /*
+ * mod_mime will _always_ provide us the base name in the
+ * ap-mime-exception-list, if it processed anything. If
+ * this list is empty, give up immediately, there was
+ * nothing interesting. For example, looking at the files
+ * readme.txt and readme.foo, we will throw away .foo if
+ * it's an insignificant file (e.g. did not identify a
+ * language, charset, encoding, content type or handler,)
+ */
+ exception_list =
+ (apr_array_header_t *)apr_table_get(sub_req->notes,
+ "ap-mime-exceptions-list");
+
+ if (!exception_list) {
+ ap_destroy_sub_req(sub_req);
+ continue;
+ }
+
+ /* Each unregonized bit better match our base name, in sequence.
+ * A test of index.html.foo will match index.foo or index.html.foo,
+ * but it will never transpose the segments and allow index.foo.html
+ * because that would introduce too much CPU consumption. Better that
+ * we don't attempt a many-to-many match here.
+ */
+ {
+ int nexcept = exception_list->nelts;
+ char **cur_except = (char**)exception_list->elts;
+ char *segstart = filp, *segend, saveend;
+
+ while (*segstart && nexcept) {
+ if (!(segend = strchr(segstart, '.')))
+ segend = strchr(segstart, '\0');
+ saveend = *segend;
+ *segend = '\0';
+
+#ifdef CASE_BLIND_FILESYSTEM
+ if (strcasecmp(segstart, *cur_except) == 0) {
+#else
+ if (strcmp(segstart, *cur_except) == 0) {
+#endif
+ --nexcept;
+ ++cur_except;
+ }
+
+ if (!saveend)
+ break;
+
+ *segend = saveend;
+ segstart = segend + 1;
+ }
+
+ if (nexcept) {
+ /* Something you don't know is, something you don't know...
+ */
+ ap_destroy_sub_req(sub_req);
+ continue;
+ }
+ }
+
+ /*
+ * ###: be warned, the _default_ content type is already
+ * picked up here! If we failed the subrequest, or don't
+ * know what we are serving, then continue.
+ */
+ if (sub_req->status != HTTP_OK || (!sub_req->content_type)) {
+ ap_destroy_sub_req(sub_req);
+ continue;
+ }
+
+ /* If it's a map file, we use that instead of the map
+ * we're building...
+ */
+ if (((sub_req->content_type) &&
+ !strcmp(sub_req->content_type, MAP_FILE_MAGIC_TYPE)) ||
+ ((sub_req->handler) &&
+ !strcmp(sub_req->handler, "type-map"))) {
+
+ apr_dir_close(dirp);
+ neg->avail_vars->nelts = 0;
+ if (sub_req->status != HTTP_OK) {
+ return sub_req->status;
+ }
+ return read_type_map(NULL, neg, sub_req);
+ }
+
+ /* Have reasonable variant --- gather notes. */
+
+ mime_info.sub_req = sub_req;
+ mime_info.file_name = apr_pstrdup(neg->pool, dirent.name);
+ if (sub_req->content_encoding) {
+ mime_info.content_encoding = sub_req->content_encoding;
+ }
+ if (sub_req->content_languages) {
+ mime_info.content_languages = sub_req->content_languages;
+ }
+
+ get_entry(neg->pool, &accept_info, sub_req->content_type);
+ set_mime_fields(&mime_info, &accept_info);
+
+ new_var = apr_array_push(neg->avail_vars);
+ memcpy(new_var, (void *) &mime_info, sizeof(var_rec));
+
+ neg->count_multiviews_variants++;
+
+ clean_var_rec(&mime_info);
+ }
+
+ apr_dir_close(dirp);
+
+ /* We found some file names that matched. None could be served.
+ * Rather than fall out to autoindex or some other mapper, this
+ * request must die.
+ */
+ if (anymatch && !neg->avail_vars->nelts) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Negotiation: discovered file(s) matching request: %s"
+ " (None could be negotiated).",
+ r->filename);
+ return HTTP_NOT_FOUND;
+ }
+
+ set_vlist_validator(r, r);
+
+ /* Sort the variants into a canonical order. The negotiation
+ * result sometimes depends on the order of the variants. By
+ * sorting the variants into a canonical order, rather than using
+ * the order in which readdir() happens to return them, we ensure
+ * that the negotiation result will be consistent over filesystem
+ * backup/restores and over all mirror sites.
+ */
+
+ qsort((void *) neg->avail_vars->elts, neg->avail_vars->nelts,
+ sizeof(var_rec), (int (*)(const void *, const void *)) variantsortf);
+
+ return OK;
+}
+
+
+/*****************************************************************
+ * And now for the code you've been waiting for... actually
+ * finding a match to the client's requirements.
+ */
+
+/* Matching MIME types ... the star/star and foo/star commenting conventions
+ * are implemented here. (You know what I mean by star/star, but just
+ * try mentioning those three characters in a C comment). Using strcmp()
+ * is legit, because everything has already been smashed to lowercase.
+ *
+ * Note also that if we get an exact match on the media type, we update
+ * level_matched for use in level_cmp below...
+ *
+ * We also give a value for mime_stars, which is used later. It should
+ * be 1 for star/star, 2 for type/star and 3 for type/subtype.
+ */
+
+static int mime_match(accept_rec *accept_r, var_rec *avail)
+{
+ const char *accept_type = accept_r->name;
+ const char *avail_type = avail->mime_type;
+ int len = strlen(accept_type);
+
+ if (accept_type[0] == '*') { /* Anything matches star/star */
+ if (avail->mime_stars < 1) {
+ avail->mime_stars = 1;
+ }
+ return 1;
+ }
+ else if ((accept_type[len - 1] == '*') &&
+ !strncmp(accept_type, avail_type, len - 2)) {
+ if (avail->mime_stars < 2) {
+ avail->mime_stars = 2;
+ }
+ return 1;
+ }
+ else if (!strcmp(accept_type, avail_type)
+ || (!strcmp(accept_type, "text/html")
+ && (!strcmp(avail_type, INCLUDES_MAGIC_TYPE)
+ || !strcmp(avail_type, INCLUDES_MAGIC_TYPE3)))) {
+ if (accept_r->level >= avail->level) {
+ avail->level_matched = avail->level;
+ avail->mime_stars = 3;
+ return 1;
+ }
+ }
+
+ return OK;
+}
+
+/* This code implements a piece of the tie-breaking algorithm between
+ * variants of equal quality. This piece is the treatment of variants
+ * of the same base media type, but different levels. What we want to
+ * return is the variant at the highest level that the client explicitly
+ * claimed to accept.
+ *
+ * If all the variants available are at a higher level than that, or if
+ * the client didn't say anything specific about this media type at all
+ * and these variants just got in on a wildcard, we prefer the lowest
+ * level, on grounds that that's the one that the client is least likely
+ * to choke on.
+ *
+ * (This is all motivated by treatment of levels in HTML --- we only
+ * want to give level 3 to browsers that explicitly ask for it; browsers
+ * that don't, including HTTP/0.9 browsers that only get the implicit
+ * "Accept: * / *" [space added to avoid confusing cpp --- no, that
+ * syntax doesn't really work] should get HTML2 if available).
+ *
+ * (Note that this code only comes into play when we are choosing among
+ * variants of equal quality, where the draft standard gives us a fair
+ * bit of leeway about what to do. It ain't specified by the standard;
+ * rather, it is a choice made by this server about what to do in cases
+ * where the standard does not specify a unique course of action).
+ */
+
+static int level_cmp(var_rec *var1, var_rec *var2)
+{
+ /* Levels are only comparable between matching media types */
+
+ if (var1->is_pseudo_html && !var2->is_pseudo_html) {
+ return 0;
+ }
+
+ if (!var1->is_pseudo_html && strcmp(var1->mime_type, var2->mime_type)) {
+ return 0;
+ }
+ /* The result of the above if statements is that, if we get to
+ * here, both variants have the same mime_type or both are
+ * pseudo-html.
+ */
+
+ /* Take highest level that matched, if either did match. */
+
+ if (var1->level_matched > var2->level_matched) {
+ return 1;
+ }
+ if (var1->level_matched < var2->level_matched) {
+ return -1;
+ }
+
+ /* Neither matched. Take lowest level, if there's a difference. */
+
+ if (var1->level < var2->level) {
+ return 1;
+ }
+ if (var1->level > var2->level) {
+ return -1;
+ }
+
+ /* Tied */
+
+ return 0;
+}
+
+/* Finding languages. The main entry point is set_language_quality()
+ * which is called for each variant. It sets two elements in the
+ * variant record:
+ * language_quality - the 'q' value of the 'best' matching language
+ * from Accept-Language: header (HTTP/1.1)
+ * lang_index - Non-negotiated language priority, using
+ * position of language on the Accept-Language:
+ * header, if present, else LanguagePriority
+ * directive order.
+ *
+ * When we do the variant checking for best variant, we use language
+ * quality first, and if a tie, language_index next (this only applies
+ * when _not_ using the RVSA/1.0 algorithm). If using the RVSA/1.0
+ * algorithm, lang_index is never used.
+ *
+ * set_language_quality() calls find_lang_index() and find_default_index()
+ * to set lang_index.
+ */
+
+static int find_lang_index(apr_array_header_t *accept_langs, char *lang)
+{
+ const char **alang;
+ int i;
+
+ if (!lang || !accept_langs) {
+ return -1;
+ }
+
+ alang = (const char **) accept_langs->elts;
+
+ for (i = 0; i < accept_langs->nelts; ++i) {
+ if (!strncmp(lang, *alang, strlen(*alang))) {
+ return i;
+ }
+ alang += (accept_langs->elt_size / sizeof(char*));
+ }
+
+ return -1;
+}
+
+/* set_default_lang_quality() sets the quality we apply to variants
+ * which have no language assigned to them. If none of the variants
+ * have a language, we are not negotiating on language, so all are
+ * acceptable, and we set the default q value to 1.0. However if
+ * some of the variants have languages, we set this default to 0.0001.
+ * The value of this default will be applied to all variants with
+ * no explicit language -- which will have the effect of making them
+ * acceptable, but only if no variants with an explicit language
+ * are acceptable. The default q value set here is assigned to variants
+ * with no language type in set_language_quality().
+ *
+ * Note that if using the RVSA/1.0 algorithm, we don't use this
+ * fiddle.
+ */
+
+static void set_default_lang_quality(negotiation_state *neg)
+{
+ var_rec *avail_recs = (var_rec *) neg->avail_vars->elts;
+ int j;
+
+ if (!neg->dont_fiddle_headers) {
+ for (j = 0; j < neg->avail_vars->nelts; ++j) {
+ var_rec *variant = &avail_recs[j];
+ if (variant->content_languages &&
+ variant->content_languages->nelts) {
+ neg->default_lang_quality = 0.0001f;
+ return;
+ }
+ }
+ }
+
+ neg->default_lang_quality = 1.0f;
+}
+
+/* Set the language_quality value in the variant record. Also
+ * assigns lang_index for ForceLanguagePriority.
+ *
+ * To find the language_quality value, we look for the 'q' value
+ * of the 'best' matching language on the Accept-Language
+ * header. The 'best' match is the language on Accept-Language
+ * header which matches the language of this variant either fully,
+ * or as far as the prefix marker (-). If two or more languages
+ * match, use the longest string from the Accept-Language header
+ * (see HTTP/1.1 [14.4])
+ *
+ * When a variant has multiple languages, we find the 'best'
+ * match for each variant language tag as above, then select the
+ * one with the highest q value. Because both the accept-header
+ * and variant can have multiple languages, we now have a hairy
+ * loop-within-a-loop here.
+ *
+ * If the variant has no language and we have no Accept-Language
+ * items, leave the quality at 1.0 and return.
+ *
+ * If the variant has no language, we use the default as set by
+ * set_default_lang_quality() (1.0 if we are not negotiating on
+ * language, 0.001 if we are).
+ *
+ * Following the setting of the language quality, we drop through to
+ * set the old 'lang_index'. This is set based on either the order
+ * of the languages on the Accept-Language header, or the
+ * order on the LanguagePriority directive. This is only used
+ * in the negotiation if the language qualities tie.
+ */
+
+static void set_language_quality(negotiation_state *neg, var_rec *variant)
+{
+ int forcepriority = neg->conf->forcelangpriority;
+ if (forcepriority == FLP_UNDEF) {
+ forcepriority = FLP_DEFAULT;
+ }
+
+ if (!variant->content_languages || !variant->content_languages->nelts) {
+ /* This variant has no content-language, so use the default
+ * quality factor for variants with no content-language
+ * (previously set by set_default_lang_quality()).
+ * Leave the factor alone (it remains at 1.0) when we may not fiddle
+ * with the headers.
+ */
+ if (!neg->dont_fiddle_headers) {
+ variant->lang_quality = neg->default_lang_quality;
+ }
+ if (!neg->accept_langs) {
+ return; /* no accept-language header */
+ }
+ return;
+ }
+ else {
+ /* Variant has one (or more) languages. Look for the best
+ * match. We do this by going through each language on the
+ * variant description looking for a match on the
+ * Accept-Language header. The best match is the longest
+ * matching language on the header. The final result is the
+ * best q value from all the languages on the variant
+ * description.
+ */
+
+ if (!neg->accept_langs) {
+ /* no accept-language header makes the variant indefinite */
+ variant->definite = 0;
+ }
+ else { /* There is an accept-language with 0 or more items */
+ accept_rec *accs = (accept_rec *) neg->accept_langs->elts;
+ accept_rec *best = NULL, *star = NULL;
+ accept_rec *bestthistag;
+ char *lang, *p;
+ float fiddle_q = 0.0f;
+ int any_match_on_star = 0;
+ int i, j;
+ apr_size_t alen, longest_lang_range_len;
+
+ for (j = 0; j < variant->content_languages->nelts; ++j) {
+ p = NULL;
+ bestthistag = NULL;
+ longest_lang_range_len = 0;
+ alen = 0;
+
+ /* lang is the variant's language-tag, which is the one
+ * we are allowed to use the prefix of in HTTP/1.1
+ */
+ lang = ((char **) (variant->content_languages->elts))[j];
+
+ /* now find the best (i.e. longest) matching
+ * Accept-Language header language. We put the best match
+ * for this tag in bestthistag. We cannot update the
+ * overall best (based on q value) because the best match
+ * for this tag is the longest language item on the accept
+ * header, not necessarily the highest q.
+ */
+ for (i = 0; i < neg->accept_langs->nelts; ++i) {
+ if (!strcmp(accs[i].name, "*")) {
+ if (!star) {
+ star = &accs[i];
+ }
+ continue;
+ }
+ /* Find language. We match if either the variant
+ * language tag exactly matches the language range
+ * from the accept header, or a prefix of the variant
+ * language tag up to a '-' character matches the
+ * whole of the language range in the Accept-Language
+ * header. Note that HTTP/1.x allows any number of
+ * '-' characters in a tag or range, currently only
+ * tags with zero or one '-' characters are defined
+ * for general use (see rfc1766).
+ *
+ * We only use language range in the Accept-Language
+ * header the best match for the variant language tag
+ * if it is longer than the previous best match.
+ */
+
+ alen = strlen(accs[i].name);
+
+ if ((strlen(lang) >= alen) &&
+ !strncmp(lang, accs[i].name, alen) &&
+ ((lang[alen] == 0) || (lang[alen] == '-')) ) {
+
+ if (alen > longest_lang_range_len) {
+ longest_lang_range_len = alen;
+ bestthistag = &accs[i];
+ }
+ }
+
+ if (!bestthistag && !neg->dont_fiddle_headers) {
+ /* The next bit is a fiddle. Some browsers might
+ * be configured to send more specific language
+ * ranges than desirable. For example, an
+ * Accept-Language of en-US should never match
+ * variants with languages en or en-GB. But US
+ * English speakers might pick en-US as their
+ * language choice. So this fiddle checks if the
+ * language range has a prefix, and if so, it
+ * matches variants which match that prefix with a
+ * priority of 0.001. So a request for en-US would
+ * match variants of types en and en-GB, but at
+ * much lower priority than matches of en-US
+ * directly, or of any other language listed on
+ * the Accept-Language header. Note that this
+ * fiddle does not handle multi-level prefixes.
+ */
+ if ((p = strchr(accs[i].name, '-'))) {
+ int plen = p - accs[i].name;
+
+ if (!strncmp(lang, accs[i].name, plen)) {
+ fiddle_q = 0.001f;
+ }
+ }
+ }
+ }
+ /* Finished looking at Accept-Language headers, the best
+ * (longest) match is in bestthistag, or NULL if no match
+ */
+ if (!best ||
+ (bestthistag && bestthistag->quality > best->quality)) {
+ best = bestthistag;
+ }
+
+ /* See if the tag matches on a * in the Accept-Language
+ * header. If so, record this fact for later use
+ */
+ if (!bestthistag && star) {
+ any_match_on_star = 1;
+ }
+ }
+
+ /* If one of the language tags of the variant matched on *, we
+ * need to see if its q is better than that of any non-* match
+ * on any other tag of the variant. If so the * match takes
+ * precedence and the overall match is not definite.
+ */
+ if ( any_match_on_star &&
+ ((best && star->quality > best->quality) ||
+ (!best)) ) {
+ best = star;
+ variant->definite = 0;
+ }
+
+ variant->lang_quality = best ? best->quality : fiddle_q;
+ }
+ }
+
+ /* Handle the ForceDefaultLanguage overrides, based on the best match
+ * to LanguagePriority order. The best match is the lowest index of
+ * any LanguagePriority match.
+ */
+ if (((forcepriority & FLP_PREFER)
+ && (variant->lang_index < 0))
+ || ((forcepriority & FLP_FALLBACK)
+ && !variant->lang_quality))
+ {
+ int bestidx = -1;
+ int j;
+
+ for (j = 0; j < variant->content_languages->nelts; ++j)
+ {
+ /* lang is the variant's language-tag, which is the one
+ * we are allowed to use the prefix of in HTTP/1.1
+ */
+ char *lang = ((char **) (variant->content_languages->elts))[j];
+ int idx = -1;
+
+ /* If we wish to fallback or
+ * we use our own LanguagePriority index.
+ */
+ idx = find_lang_index(neg->conf->language_priority, lang);
+ if ((idx >= 0) && ((bestidx == -1) || (idx < bestidx))) {
+ bestidx = idx;
+ }
+ }
+
+ if (bestidx >= 0) {
+ if (variant->lang_quality) {
+ if (forcepriority & FLP_PREFER) {
+ variant->lang_index = bestidx;
+ }
+ }
+ else {
+ if (forcepriority & FLP_FALLBACK) {
+ variant->lang_index = bestidx;
+ variant->lang_quality = .0001f;
+ variant->definite = 0;
+ }
+ }
+ }
+ }
+ return;
+}
+
+/* Determining the content length --- if the map didn't tell us,
+ * we have to do a stat() and remember for next time.
+ */
+
+static apr_off_t find_content_length(negotiation_state *neg, var_rec *variant)
+{
+ apr_finfo_t statb;
+
+ if (variant->bytes < 0) {
+ if ( variant->sub_req
+ && (variant->sub_req->finfo.valid & APR_FINFO_SIZE)) {
+ variant->bytes = variant->sub_req->finfo.size;
+ }
+ else {
+ char *fullname = ap_make_full_path(neg->pool, neg->dir_name,
+ variant->file_name);
+
+ if (apr_stat(&statb, fullname,
+ APR_FINFO_SIZE, neg->pool) == APR_SUCCESS) {
+ variant->bytes = statb.size;
+ }
+ }
+ }
+
+ return variant->bytes;
+}
+
+/* For a given variant, find the best matching Accept: header
+ * and assign the Accept: header's quality value to the
+ * mime_type_quality field of the variant, for later use in
+ * determining the best matching variant.
+ */
+
+static void set_accept_quality(negotiation_state *neg, var_rec *variant)
+{
+ int i;
+ accept_rec *accept_recs;
+ float q = 0.0f;
+ int q_definite = 1;
+
+ /* if no Accept: header, leave quality alone (will
+ * remain at the default value of 1)
+ *
+ * XXX: This if is currently never true because of the effect of
+ * maybe_add_default_accepts().
+ */
+ if (!neg->accepts) {
+ if (variant->mime_type && *variant->mime_type)
+ variant->definite = 0;
+ return;
+ }
+
+ accept_recs = (accept_rec *) neg->accepts->elts;
+
+ /*
+ * Go through each of the ranges on the Accept: header,
+ * looking for the 'best' match with this variant's
+ * content-type. We use the best match's quality
+ * value (from the Accept: header) for this variant's
+ * mime_type_quality field.
+ *
+ * The best match is determined like this:
+ * type/type is better than type/ * is better than * / *
+ * if match is type/type, use the level mime param if available
+ */
+ for (i = 0; i < neg->accepts->nelts; ++i) {
+
+ accept_rec *type = &accept_recs[i];
+ int prev_mime_stars;
+
+ prev_mime_stars = variant->mime_stars;
+
+ if (!mime_match(type, variant)) {
+ continue; /* didn't match the content type at all */
+ }
+ else {
+ /* did match - see if there were less or more stars than
+ * in previous match
+ */
+ if (prev_mime_stars == variant->mime_stars) {
+ continue; /* more stars => not as good a match */
+ }
+ }
+
+ /* If we are allowed to mess with the q-values
+ * and have no explicit q= parameters in the accept header,
+ * make wildcards very low, so we have a low chance
+ * of ending up with them if there's something better.
+ */
+
+ if (!neg->dont_fiddle_headers && !neg->accept_q &&
+ variant->mime_stars == 1) {
+ q = 0.01f;
+ }
+ else if (!neg->dont_fiddle_headers && !neg->accept_q &&
+ variant->mime_stars == 2) {
+ q = 0.02f;
+ }
+ else {
+ q = type->quality;
+ }
+
+ q_definite = (variant->mime_stars == 3);
+ }
+ variant->mime_type_quality = q;
+ variant->definite = variant->definite && q_definite;
+
+}
+
+/* For a given variant, find the 'q' value of the charset given
+ * on the Accept-Charset line. If no charsets are listed,
+ * assume value of '1'.
+ */
+static void set_charset_quality(negotiation_state *neg, var_rec *variant)
+{
+ int i;
+ accept_rec *accept_recs;
+ const char *charset = variant->content_charset;
+ accept_rec *star = NULL;
+
+ /* if no Accept-Charset: header, leave quality alone (will
+ * remain at the default value of 1)
+ */
+ if (!neg->accept_charsets) {
+ if (charset && *charset)
+ variant->definite = 0;
+ return;
+ }
+
+ accept_recs = (accept_rec *) neg->accept_charsets->elts;
+
+ if (charset == NULL || !*charset) {
+ /* Charset of variant not known */
+
+ /* if not a text / * type, leave quality alone */
+ if (!(!strncmp(variant->mime_type, "text/", 5)
+ || !strcmp(variant->mime_type, INCLUDES_MAGIC_TYPE)
+ || !strcmp(variant->mime_type, INCLUDES_MAGIC_TYPE3)
+ ))
+ return;
+
+ /* Don't go guessing if we are in strict header mode,
+ * e.g. when running the rvsa, as any guess won't be reflected
+ * in the variant list or content-location headers.
+ */
+ if (neg->dont_fiddle_headers)
+ return;
+
+ charset = "iso-8859-1"; /* The default charset for HTTP text types */
+ }
+
+ /*
+ * Go through each of the items on the Accept-Charset header,
+ * looking for a match with this variant's charset. If none
+ * match, charset is unacceptable, so set quality to 0.
+ */
+ for (i = 0; i < neg->accept_charsets->nelts; ++i) {
+
+ accept_rec *type = &accept_recs[i];
+
+ if (!strcmp(type->name, charset)) {
+ variant->charset_quality = type->quality;
+ return;
+ }
+ else if (strcmp(type->name, "*") == 0) {
+ star = type;
+ }
+ }
+ /* No explicit match */
+ if (star) {
+ variant->charset_quality = star->quality;
+ variant->definite = 0;
+ return;
+ }
+ /* If this variant is in charset iso-8859-1, the default is 1.0 */
+ if (strcmp(charset, "iso-8859-1") == 0) {
+ variant->charset_quality = 1.0f;
+ }
+ else {
+ variant->charset_quality = 0.0f;
+ }
+}
+
+
+/* is_identity_encoding is included for back-compat, but does anyone
+ * use 7bit, 8bin or binary in their var files??
+ */
+
+static int is_identity_encoding(const char *enc)
+{
+ return (!enc || !enc[0] || !strcmp(enc, "7bit") || !strcmp(enc, "8bit")
+ || !strcmp(enc, "binary"));
+}
+
+/*
+ * set_encoding_quality determines whether the encoding for a particular
+ * variant is acceptable for the user-agent.
+ *
+ * The rules for encoding are that if the user-agent does not supply
+ * any Accept-Encoding header, then all encodings are allowed but a
+ * variant with no encoding should be preferred.
+ * If there is an empty Accept-Encoding header, then no encodings are
+ * acceptable. If there is a non-empty Accept-Encoding header, then
+ * any of the listed encodings are acceptable, as well as no encoding
+ * unless the "identity" encoding is specifically excluded.
+ */
+static void set_encoding_quality(negotiation_state *neg, var_rec *variant)
+{
+ accept_rec *accept_recs;
+ const char *enc = variant->content_encoding;
+ accept_rec *star = NULL;
+ float value_if_not_found = 0.0f;
+ int i;
+
+ if (!neg->accept_encodings) {
+ /* We had no Accept-Encoding header, assume that all
+ * encodings are acceptable with a low quality,
+ * but we prefer no encoding if available.
+ */
+ if (!enc || is_identity_encoding(enc))
+ variant->encoding_quality = 1.0f;
+ else
+ variant->encoding_quality = 0.5f;
+
+ return;
+ }
+
+ if (!enc || is_identity_encoding(enc)) {
+ enc = "identity";
+ value_if_not_found = 0.0001f;
+ }
+
+ accept_recs = (accept_rec *) neg->accept_encodings->elts;
+
+ /* Go through each of the encodings on the Accept-Encoding: header,
+ * looking for a match with our encoding. x- prefixes are ignored.
+ */
+ if (enc[0] == 'x' && enc[1] == '-') {
+ enc += 2;
+ }
+ for (i = 0; i < neg->accept_encodings->nelts; ++i) {
+
+ char *name = accept_recs[i].name;
+
+ if (name[0] == 'x' && name[1] == '-') {
+ name += 2;
+ }
+
+ if (!strcmp(name, enc)) {
+ variant->encoding_quality = accept_recs[i].quality;
+ return;
+ }
+
+ if (strcmp(name, "*") == 0) {
+ star = &accept_recs[i];
+ }
+
+ }
+ /* No explicit match */
+ if (star) {
+ variant->encoding_quality = star->quality;
+ return;
+ }
+
+ /* Encoding not found on Accept-Encoding: header, so it is
+ * _not_ acceptable unless it is the identity (no encoding)
+ */
+ variant->encoding_quality = value_if_not_found;
+}
+
+/*************************************************************
+ * Possible results of the variant selection algorithm
+ */
+enum algorithm_results {
+ alg_choice = 1, /* choose variant */
+ alg_list /* list variants */
+};
+
+/* Below is the 'best_match' function. It returns an int, which has
+ * one of the two values alg_choice or alg_list, which give the result
+ * of the variant selection algorithm. alg_list means that no best
+ * variant was found by the algorithm, alg_choice means that a best
+ * variant was found and should be returned. The list/choice
+ * terminology comes from TCN (rfc2295), but is used in a more generic
+ * way here. The best variant is returned in *pbest. best_match has
+ * two possible algorithms for determining the best variant: the
+ * RVSA/1.0 algorithm (from RFC2296), and the standard Apache
+ * algorithm. These are split out into separate functions
+ * (is_variant_better_rvsa() and is_variant_better()). Selection of
+ * one is through the neg->use_rvsa flag.
+ *
+ * The call to best_match also creates full information, including
+ * language, charset, etc quality for _every_ variant. This is needed
+ * for generating a correct Vary header, and can be used for the
+ * Alternates header, the human-readable list responses and 406 errors.
+ */
+
+/* Firstly, the RVSA/1.0 (HTTP Remote Variant Selection Algorithm
+ * v1.0) from rfc2296. This is the algorithm that goes together with
+ * transparent content negotiation (TCN).
+ */
+static int is_variant_better_rvsa(negotiation_state *neg, var_rec *variant,
+ var_rec *best, float *p_bestq)
+{
+ float bestq = *p_bestq, q;
+
+ /* TCN does not cover negotiation on content-encoding. For now,
+ * we ignore the encoding unless it was explicitly excluded.
+ */
+ if (variant->encoding_quality == 0.0f)
+ return 0;
+
+ q = variant->mime_type_quality *
+ variant->source_quality *
+ variant->charset_quality *
+ variant->lang_quality;
+
+ /* RFC 2296 calls for the result to be rounded to 5 decimal places,
+ * but we don't do that because it serves no useful purpose other
+ * than to ensure that a remote algorithm operates on the same
+ * precision as ours. That is silly, since what we obviously want
+ * is for the algorithm to operate on the best available precision
+ * regardless of who runs it. Since the above calculation may
+ * result in significant variance at 1e-12, rounding would be bogus.
+ */
+
+#ifdef NEG_DEBUG
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "Variant: file=%s type=%s lang=%s sourceq=%1.3f "
+ "mimeq=%1.3f langq=%1.3f charq=%1.3f encq=%1.3f "
+ "q=%1.5f definite=%d",
+ (variant->file_name ? variant->file_name : ""),
+ (variant->mime_type ? variant->mime_type : ""),
+ (variant->content_languages
+ ? apr_array_pstrcat(neg->pool, variant->content_languages, ',')
+ : ""),
+ variant->source_quality,
+ variant->mime_type_quality,
+ variant->lang_quality,
+ variant->charset_quality,
+ variant->encoding_quality,
+ q,
+ variant->definite);
+#endif
+
+ if (q <= 0.0f) {
+ return 0;
+ }
+ if (q > bestq) {
+ *p_bestq = q;
+ return 1;
+ }
+ if (q == bestq) {
+ /* If the best variant's encoding is of lesser quality than
+ * this variant, then we prefer this variant
+ */
+ if (variant->encoding_quality > best->encoding_quality) {
+ *p_bestq = q;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Negotiation algorithm as used by previous versions of Apache
+ * (just about).
+ */
+
+static int is_variant_better(negotiation_state *neg, var_rec *variant,
+ var_rec *best, float *p_bestq)
+{
+ float bestq = *p_bestq, q;
+ int levcmp;
+
+ /* For non-transparent negotiation, server can choose how
+ * to handle the negotiation. We'll use the following in
+ * order: content-type, language, content-type level, charset,
+ * content encoding, content length.
+ *
+ * For each check, we have three possible outcomes:
+ * This variant is worse than current best: return 0
+ * This variant is better than the current best:
+ * assign this variant's q to *p_bestq, and return 1
+ * This variant is just as desirable as the current best:
+ * drop through to the next test.
+ *
+ * This code is written in this long-winded way to allow future
+ * customisation, either by the addition of additional
+ * checks, or to allow the order of the checks to be determined
+ * by configuration options (e.g. we might prefer to check
+ * language quality _before_ content type).
+ */
+
+ /* First though, eliminate this variant if it is not
+ * acceptable by type, charset, encoding or language.
+ */
+
+#ifdef NEG_DEBUG
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "Variant: file=%s type=%s lang=%s sourceq=%1.3f "
+ "mimeq=%1.3f langq=%1.3f langidx=%d charq=%1.3f encq=%1.3f ",
+ (variant->file_name ? variant->file_name : ""),
+ (variant->mime_type ? variant->mime_type : ""),
+ (variant->content_languages
+ ? apr_array_pstrcat(neg->pool, variant->content_languages, ',')
+ : ""),
+ variant->source_quality,
+ variant->mime_type_quality,
+ variant->lang_quality,
+ variant->lang_index,
+ variant->charset_quality,
+ variant->encoding_quality);
+#endif
+
+ if (variant->encoding_quality == 0.0f ||
+ variant->lang_quality == 0.0f ||
+ variant->source_quality == 0.0f ||
+ variant->charset_quality == 0.0f ||
+ variant->mime_type_quality == 0.0f) {
+ return 0; /* don't consider unacceptables */
+ }
+
+ q = variant->mime_type_quality * variant->source_quality;
+ if (q == 0.0 || q < bestq) {
+ return 0;
+ }
+ if (q > bestq || !best) {
+ *p_bestq = q;
+ return 1;
+ }
+
+ /* language */
+ if (variant->lang_quality < best->lang_quality) {
+ return 0;
+ }
+ if (variant->lang_quality > best->lang_quality) {
+ *p_bestq = q;
+ return 1;
+ }
+
+ /* if language qualities were equal, try the LanguagePriority stuff */
+ if (best->lang_index != -1 &&
+ (variant->lang_index == -1 || variant->lang_index > best->lang_index)) {
+ return 0;
+ }
+ if (variant->lang_index != -1 &&
+ (best->lang_index == -1 || variant->lang_index < best->lang_index)) {
+ *p_bestq = q;
+ return 1;
+ }
+
+ /* content-type level (sometimes used with text/html, though we
+ * support it on other types too)
+ */
+ levcmp = level_cmp(variant, best);
+ if (levcmp == -1) {
+ return 0;
+ }
+ if (levcmp == 1) {
+ *p_bestq = q;
+ return 1;
+ }
+
+ /* charset */
+ if (variant->charset_quality < best->charset_quality) {
+ return 0;
+ }
+ /* If the best variant's charset is ISO-8859-1 and this variant has
+ * the same charset quality, then we prefer this variant
+ */
+
+ if (variant->charset_quality > best->charset_quality ||
+ ((variant->content_charset != NULL &&
+ *variant->content_charset != '\0' &&
+ strcmp(variant->content_charset, "iso-8859-1") != 0) &&
+ (best->content_charset == NULL ||
+ *best->content_charset == '\0' ||
+ strcmp(best->content_charset, "iso-8859-1") == 0))) {
+ *p_bestq = q;
+ return 1;
+ }
+
+ /* Prefer the highest value for encoding_quality.
+ */
+ if (variant->encoding_quality < best->encoding_quality) {
+ return 0;
+ }
+ if (variant->encoding_quality > best->encoding_quality) {
+ *p_bestq = q;
+ return 1;
+ }
+
+ /* content length if all else equal */
+ if (find_content_length(neg, variant) >= find_content_length(neg, best)) {
+ return 0;
+ }
+
+ /* ok, to get here means every thing turned out equal, except
+ * we have a shorter content length, so use this variant
+ */
+ *p_bestq = q;
+ return 1;
+}
+
+/* figure out, whether a variant is in a specific language
+ * it returns also false, if the variant has no language.
+ */
+static int variant_has_language(var_rec *variant, const char *lang)
+{
+ int j, max;
+
+ /* fast exit */
+ if ( !lang
+ || !variant->content_languages
+ || !(max = variant->content_languages->nelts)) {
+ return 0;
+ }
+
+ for (j = 0; j < max; ++j) {
+ if (!strcmp(lang,
+ ((char **) (variant->content_languages->elts))[j])) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int best_match(negotiation_state *neg, var_rec **pbest)
+{
+ int j;
+ var_rec *best;
+ float bestq = 0.0f;
+ enum algorithm_results algorithm_result;
+
+ var_rec *avail_recs = (var_rec *) neg->avail_vars->elts;
+
+ const char *preferred_language = apr_table_get(neg->r->subprocess_env,
+ "prefer-language");
+
+ set_default_lang_quality(neg);
+
+ /*
+ * Find the 'best' variant
+ * We run the loop possibly twice: if "prefer-language"
+ * environment variable is set but we did not find an appropriate
+ * best variant. In that case forget the preferred language and
+ * negotiate over all variants.
+ */
+
+ do {
+ best = NULL;
+
+ for (j = 0; j < neg->avail_vars->nelts; ++j) {
+ var_rec *variant = &avail_recs[j];
+
+ /* if a language is preferred, but the current variant
+ * is not in that language, then drop it for now
+ */
+ if ( preferred_language
+ && !variant_has_language(variant, preferred_language)) {
+ continue;
+ }
+
+ /* Find all the relevant 'quality' values from the
+ * Accept... headers, and store in the variant. This also
+ * prepares for sending an Alternates header etc so we need to
+ * do it even if we do not actually plan to find a best
+ * variant.
+ */
+ set_accept_quality(neg, variant);
+ /* accept the preferred language, even when it's not listed within
+ * the Accept-Language header
+ */
+ if (preferred_language) {
+ variant->lang_quality = 1.0f;
+ variant->definite = 1;
+ }
+ else {
+ set_language_quality(neg, variant);
+ }
+ set_encoding_quality(neg, variant);
+ set_charset_quality(neg, variant);
+
+ /* Only do variant selection if we may actually choose a
+ * variant for the client
+ */
+ if (neg->may_choose) {
+
+ /* Now find out if this variant is better than the current
+ * best, either using the RVSA/1.0 algorithm, or Apache's
+ * internal server-driven algorithm. Presumably other
+ * server-driven algorithms are possible, and could be
+ * implemented here.
+ */
+
+ if (neg->use_rvsa) {
+ if (is_variant_better_rvsa(neg, variant, best, &bestq)) {
+ best = variant;
+ }
+ }
+ else {
+ if (is_variant_better(neg, variant, best, &bestq)) {
+ best = variant;
+ }
+ }
+ }
+ }
+
+ /* We now either have a best variant, or no best variant */
+
+ if (neg->use_rvsa) {
+ /* calculate result for RVSA/1.0 algorithm:
+ * only a choice response if the best variant has q>0
+ * and is definite
+ */
+ algorithm_result = (best && best->definite) && (bestq > 0) ?
+ alg_choice : alg_list;
+ }
+ else {
+ /* calculate result for Apache negotiation algorithm */
+ algorithm_result = bestq > 0 ? alg_choice : alg_list;
+ }
+
+ /* run the loop again, if the "prefer-language" got no clear result */
+ if (preferred_language && (!best || algorithm_result != alg_choice)) {
+ preferred_language = NULL;
+ continue;
+ }
+
+ break;
+ } while (1);
+
+ /* Returning a choice response with a non-neighboring variant is a
+ * protocol security error in TCN (see rfc2295). We do *not*
+ * verify here that the variant and URI are neighbors, even though
+ * we may return alg_choice. We depend on the environment (the
+ * caller) to only declare the resource transparently negotiable if
+ * all variants are neighbors.
+ */
+ *pbest = best;
+ return algorithm_result;
+}
+
+/* Sets response headers for a negotiated response.
+ * neg->is_transparent determines whether a transparently negotiated
+ * response or a plain `server driven negotiation' response is
+ * created. Applicable headers are Alternates, Vary, and TCN.
+ *
+ * The Vary header we create is sometimes longer than is required for
+ * the correct caching of negotiated results by HTTP/1.1 caches. For
+ * example if we have 3 variants x.html, x.ps.en and x.ps.nl, and if
+ * the Accept: header assigns a 0 quality to .ps, then the results of
+ * the two server-side negotiation algorithms we currently implement
+ * will never depend on Accept-Language so we could return `Vary:
+ * negotiate, accept' instead of the longer 'Vary: negotiate, accept,
+ * accept-language' which the code below will return. A routine for
+ * computing the exact minimal Vary header would be a huge pain to code
+ * and maintain though, especially because we need to take all possible
+ * twiddles in the server-side negotiation algorithms into account.
+ */
+static void set_neg_headers(request_rec *r, negotiation_state *neg,
+ int alg_result)
+{
+ apr_table_t *hdrs;
+ var_rec *avail_recs = (var_rec *) neg->avail_vars->elts;
+ const char *sample_type = NULL;
+ const char *sample_language = NULL;
+ const char *sample_encoding = NULL;
+ const char *sample_charset = NULL;
+ char *lang;
+ char *qstr;
+ char *lenstr;
+ apr_off_t len;
+ apr_array_header_t *arr;
+ int max_vlist_array = (neg->avail_vars->nelts * 21);
+ int first_variant = 1;
+ int vary_by_type = 0;
+ int vary_by_language = 0;
+ int vary_by_charset = 0;
+ int vary_by_encoding = 0;
+ int j;
+
+ /* In order to avoid O(n^2) memory copies in building Alternates,
+ * we preallocate a apr_table_t with the maximum substrings possible,
+ * fill it with the variant list, and then concatenate the entire array.
+ * Note that if you change the number of substrings pushed, you also
+ * need to change the calculation of max_vlist_array above.
+ */
+ if (neg->send_alternates && neg->avail_vars->nelts)
+ arr = apr_array_make(r->pool, max_vlist_array, sizeof(char *));
+ else
+ arr = NULL;
+
+ /* Put headers into err_headers_out, since send_http_header()
+ * outputs both headers_out and err_headers_out.
+ */
+ hdrs = r->err_headers_out;
+
+ for (j = 0; j < neg->avail_vars->nelts; ++j) {
+ var_rec *variant = &avail_recs[j];
+
+ if (variant->content_languages && variant->content_languages->nelts) {
+ lang = apr_array_pstrcat(r->pool, variant->content_languages, ',');
+ }
+ else {
+ lang = NULL;
+ }
+
+ /* Calculate Vary by looking for any difference between variants */
+
+ if (first_variant) {
+ sample_type = variant->mime_type;
+ sample_charset = variant->content_charset;
+ sample_language = lang;
+ sample_encoding = variant->content_encoding;
+ }
+ else {
+ if (!vary_by_type &&
+ strcmp(sample_type ? sample_type : "",
+ variant->mime_type ? variant->mime_type : "")) {
+ vary_by_type = 1;
+ }
+ if (!vary_by_charset &&
+ strcmp(sample_charset ? sample_charset : "",
+ variant->content_charset ?
+ variant->content_charset : "")) {
+ vary_by_charset = 1;
+ }
+ if (!vary_by_language &&
+ strcmp(sample_language ? sample_language : "",
+ lang ? lang : "")) {
+ vary_by_language = 1;
+ }
+ if (!vary_by_encoding &&
+ strcmp(sample_encoding ? sample_encoding : "",
+ variant->content_encoding ?
+ variant->content_encoding : "")) {
+ vary_by_encoding = 1;
+ }
+ }
+ first_variant = 0;
+
+ if (!neg->send_alternates)
+ continue;
+
+ /* Generate the string components for this Alternates entry */
+
+ *((const char **) apr_array_push(arr)) = "{\"";
+ *((const char **) apr_array_push(arr)) = variant->file_name;
+ *((const char **) apr_array_push(arr)) = "\" ";
+
+ qstr = (char *) apr_palloc(r->pool, 6);
+ apr_snprintf(qstr, 6, "%1.3f", variant->source_quality);
+
+ /* Strip trailing zeros (saves those valuable network bytes) */
+ if (qstr[4] == '0') {
+ qstr[4] = '\0';
+ if (qstr[3] == '0') {
+ qstr[3] = '\0';
+ if (qstr[2] == '0') {
+ qstr[1] = '\0';
+ }
+ }
+ }
+ *((const char **) apr_array_push(arr)) = qstr;
+
+ if (variant->mime_type && *variant->mime_type) {
+ *((const char **) apr_array_push(arr)) = " {type ";
+ *((const char **) apr_array_push(arr)) = variant->mime_type;
+ *((const char **) apr_array_push(arr)) = "}";
+ }
+ if (variant->content_charset && *variant->content_charset) {
+ *((const char **) apr_array_push(arr)) = " {charset ";
+ *((const char **) apr_array_push(arr)) = variant->content_charset;
+ *((const char **) apr_array_push(arr)) = "}";
+ }
+ if (lang) {
+ *((const char **) apr_array_push(arr)) = " {language ";
+ *((const char **) apr_array_push(arr)) = lang;
+ *((const char **) apr_array_push(arr)) = "}";
+ }
+ if (variant->content_encoding && *variant->content_encoding) {
+ /* Strictly speaking, this is non-standard, but so is TCN */
+
+ *((const char **) apr_array_push(arr)) = " {encoding ";
+ *((const char **) apr_array_push(arr)) = variant->content_encoding;
+ *((const char **) apr_array_push(arr)) = "}";
+ }
+
+ /* Note that the Alternates specification (in rfc2295) does
+ * not require that we include {length x}, so we could omit it
+ * if determining the length is too expensive. We currently
+ * always include it though. 22 bytes is enough for 2^64.
+ *
+ * If the variant is a CGI script, find_content_length would
+ * return the length of the script, not the output it
+ * produces, so we check for the presence of a handler and if
+ * there is one we don't add a length.
+ *
+ * XXX: TODO: This check does not detect a CGI script if we
+ * get the variant from a type map. This needs to be fixed
+ * (without breaking things if the type map specifies a
+ * content-length, which currently leads to the correct result).
+ */
+ if (!(variant->sub_req && variant->sub_req->handler)
+ && (len = find_content_length(neg, variant)) >= 0) {
+
+ lenstr = (char *) apr_palloc(r->pool, 22);
+ apr_snprintf(lenstr, 22, "%" APR_OFF_T_FMT, len);
+ *((const char **) apr_array_push(arr)) = " {length ";
+ *((const char **) apr_array_push(arr)) = lenstr;
+ *((const char **) apr_array_push(arr)) = "}";
+ }
+
+ *((const char **) apr_array_push(arr)) = "}";
+ *((const char **) apr_array_push(arr)) = ", "; /* trimmed below */
+ }
+
+ if (neg->send_alternates && neg->avail_vars->nelts) {
+ arr->nelts--; /* remove last comma */
+ apr_table_mergen(hdrs, "Alternates",
+ apr_array_pstrcat(r->pool, arr, '\0'));
+ }
+
+ if (neg->is_transparent || vary_by_type || vary_by_language ||
+ vary_by_language || vary_by_charset || vary_by_encoding) {
+
+ apr_table_mergen(hdrs, "Vary", 2 + apr_pstrcat(r->pool,
+ neg->is_transparent ? ", negotiate" : "",
+ vary_by_type ? ", accept" : "",
+ vary_by_language ? ", accept-language" : "",
+ vary_by_charset ? ", accept-charset" : "",
+ vary_by_encoding ? ", accept-encoding" : "", NULL));
+ }
+
+ if (neg->is_transparent) { /* Create TCN response header */
+ apr_table_setn(hdrs, "TCN",
+ alg_result == alg_list ? "list" : "choice");
+ }
+}
+
+/**********************************************************************
+ *
+ * Return an HTML list of variants. This is output as part of the
+ * choice response or 406 status body.
+ */
+
+static char *make_variant_list(request_rec *r, negotiation_state *neg)
+{
+ apr_array_header_t *arr;
+ int i;
+ int max_vlist_array = (neg->avail_vars->nelts * 15) + 2;
+
+ /* In order to avoid O(n^2) memory copies in building the list,
+ * we preallocate a apr_table_t with the maximum substrings possible,
+ * fill it with the variant list, and then concatenate the entire array.
+ */
+ arr = apr_array_make(r->pool, max_vlist_array, sizeof(char *));
+
+ *((const char **) apr_array_push(arr)) = "Available variants:\n<ul>\n";
+
+ for (i = 0; i < neg->avail_vars->nelts; ++i) {
+ var_rec *variant = &((var_rec *) neg->avail_vars->elts)[i];
+ const char *filename = variant->file_name ? variant->file_name : "";
+ apr_array_header_t *languages = variant->content_languages;
+ const char *description = variant->description
+ ? variant->description
+ : "";
+
+ /* The format isn't very neat, and it would be nice to make
+ * the tags human readable (eg replace 'language en' with 'English').
+ * Note that if you change the number of substrings pushed, you also
+ * need to change the calculation of max_vlist_array above.
+ */
+ *((const char **) apr_array_push(arr)) = "<li><a href=\"";
+ *((const char **) apr_array_push(arr)) = filename;
+ *((const char **) apr_array_push(arr)) = "\">";
+ *((const char **) apr_array_push(arr)) = filename;
+ *((const char **) apr_array_push(arr)) = "</a> ";
+ *((const char **) apr_array_push(arr)) = description;
+
+ if (variant->mime_type && *variant->mime_type) {
+ *((const char **) apr_array_push(arr)) = ", type ";
+ *((const char **) apr_array_push(arr)) = variant->mime_type;
+ }
+ if (languages && languages->nelts) {
+ *((const char **) apr_array_push(arr)) = ", language ";
+ *((const char **) apr_array_push(arr)) = apr_array_pstrcat(r->pool,
+ languages, ',');
+ }
+ if (variant->content_charset && *variant->content_charset) {
+ *((const char **) apr_array_push(arr)) = ", charset ";
+ *((const char **) apr_array_push(arr)) = variant->content_charset;
+ }
+ if (variant->content_encoding) {
+ *((const char **) apr_array_push(arr)) = ", encoding ";
+ *((const char **) apr_array_push(arr)) = variant->content_encoding;
+ }
+ *((const char **) apr_array_push(arr)) = "</li>\n";
+ }
+ *((const char **) apr_array_push(arr)) = "</ul>\n";
+
+ return apr_array_pstrcat(r->pool, arr, '\0');
+}
+
+static void store_variant_list(request_rec *r, negotiation_state *neg)
+{
+ if (r->main == NULL) {
+ apr_table_setn(r->notes, "variant-list", make_variant_list(r, neg));
+ }
+ else {
+ apr_table_setn(r->main->notes, "variant-list",
+ make_variant_list(r->main, neg));
+ }
+}
+
+/* Called if we got a "Choice" response from the variant selection algorithm.
+ * It checks the result of the chosen variant to see if it
+ * is itself negotiated (if so, return error HTTP_VARIANT_ALSO_VARIES).
+ * Otherwise, add the appropriate headers to the current response.
+ */
+
+static int setup_choice_response(request_rec *r, negotiation_state *neg,
+ var_rec *variant)
+{
+ request_rec *sub_req;
+ const char *sub_vary;
+
+ if (!variant->sub_req) {
+ int status;
+
+ sub_req = ap_sub_req_lookup_file(variant->file_name, r, NULL);
+ status = sub_req->status;
+
+ if (status != HTTP_OK &&
+ !apr_table_get(sub_req->err_headers_out, "TCN")) {
+ ap_destroy_sub_req(sub_req);
+ return status;
+ }
+ variant->sub_req = sub_req;
+ }
+ else {
+ sub_req = variant->sub_req;
+ }
+
+ /* The variant selection algorithm told us to return a "Choice"
+ * response. This is the normal variant response, with
+ * some extra headers. First, ensure that the chosen
+ * variant did or will not itself engage in transparent negotiation.
+ * If not, set the appropriate headers, and fall through to
+ * the normal variant handling
+ */
+
+ /* This catches the error that a transparent type map selects a
+ * transparent multiviews resource as the best variant.
+ *
+ * XXX: We do not signal an error if a transparent type map
+ * selects a _non_transparent multiviews resource as the best
+ * variant, because we can generate a legal negotiation response
+ * in this case. In this case, the vlist_validator of the
+ * nontransparent subrequest will be lost however. This could
+ * lead to cases in which a change in the set of variants or the
+ * negotiation algorithm of the nontransparent resource is never
+ * propagated up to a HTTP/1.1 cache which interprets Vary. To be
+ * completely on the safe side we should return HTTP_VARIANT_ALSO_VARIES
+ * for this type of recursive negotiation too.
+ */
+ if (neg->is_transparent &&
+ apr_table_get(sub_req->err_headers_out, "TCN")) {
+ return HTTP_VARIANT_ALSO_VARIES;
+ }
+
+ /* This catches the error that a transparent type map recursively
+ * selects, as the best variant, another type map which itself
+ * causes transparent negotiation to be done.
+ *
+ * XXX: Actually, we catch this error by catching all cases of
+ * type map recursion. There are some borderline recursive type
+ * map arrangements which would not produce transparent
+ * negotiation protocol errors or lack of cache propagation
+ * problems, but such arrangements are very hard to detect at this
+ * point in the control flow, so we do not bother to single them
+ * out.
+ *
+ * Recursive type maps imply a recursive arrangement of negotiated
+ * resources which is visible to outside clients, and this is not
+ * supported by the transparent negotiation caching protocols, so
+ * if we are to have generic support for recursive type maps, we
+ * have to create some configuration setting which makes all type
+ * maps non-transparent when recursion is enabled. Also, if we
+ * want recursive type map support which ensures propagation of
+ * type map changes into HTTP/1.1 caches that handle Vary, we
+ * would have to extend the current mechanism for generating
+ * variant list validators.
+ */
+ if (sub_req->handler && strcmp(sub_req->handler, "type-map") == 0) {
+ return HTTP_VARIANT_ALSO_VARIES;
+ }
+
+ /* This adds an appropriate Variant-Vary header if the subrequest
+ * is a multiviews resource.
+ *
+ * XXX: TODO: Note that this does _not_ handle any Vary header
+ * returned by a CGI if sub_req is a CGI script, because we don't
+ * see that Vary header yet at this point in the control flow.
+ * This won't cause any cache consistency problems _unless_ the
+ * CGI script also returns a Cache-Control header marking the
+ * response as cachable. This needs to be fixed, also there are
+ * problems if a CGI returns an Etag header which also need to be
+ * fixed.
+ */
+ if ((sub_vary = apr_table_get(sub_req->err_headers_out, "Vary")) != NULL) {
+ apr_table_setn(r->err_headers_out, "Variant-Vary", sub_vary);
+
+ /* Move the subreq Vary header into the main request to
+ * prevent having two Vary headers in the response, which
+ * would be legal but strange.
+ */
+ apr_table_setn(r->err_headers_out, "Vary", sub_vary);
+ apr_table_unset(sub_req->err_headers_out, "Vary");
+ }
+
+ apr_table_setn(r->err_headers_out, "Content-Location",
+ apr_pstrdup(r->pool, variant->file_name));
+
+ set_neg_headers(r, neg, alg_choice); /* add Alternates and Vary */
+
+ /* Still to do by caller: add Expires */
+
+ return 0;
+}
+
+/****************************************************************
+ *
+ * Executive...
+ */
+
+static int do_negotiation(request_rec *r, negotiation_state *neg,
+ var_rec **bestp, int prefer_scripts)
+{
+ var_rec *avail_recs = (var_rec *) neg->avail_vars->elts;
+ int alg_result; /* result of variant selection algorithm */
+ int res;
+ int j;
+
+ /* Decide if resource is transparently negotiable */
+
+ /* GET or HEAD? (HEAD has same method number as GET) */
+ if (r->method_number == M_GET) {
+
+ /* maybe this should be configurable, see also the comment
+ * about recursive type maps in setup_choice_response()
+ */
+ neg->is_transparent = 1;
+
+ /* We can't be transparent if we are a map file in the middle
+ * of the request URI.
+ */
+ if (r->path_info && *r->path_info)
+ neg->is_transparent = 0;
+
+ for (j = 0; j < neg->avail_vars->nelts; ++j) {
+ var_rec *variant = &avail_recs[j];
+
+ /* We can't be transparent, because of internal
+ * assumptions in best_match(), if there is a
+ * non-neighboring variant. We can have a non-neighboring
+ * variant when processing a type map.
+ */
+ if (ap_strchr_c(variant->file_name, '/'))
+ neg->is_transparent = 0;
+
+ /* We can't be transparent, because of the behavior
+ * of variant typemap bodies.
+ */
+ if (variant->body) {
+ neg->is_transparent = 0;
+ }
+ }
+ }
+
+ if (neg->is_transparent) {
+ parse_negotiate_header(r, neg);
+ }
+ else { /* configure negotiation on non-transparent resource */
+ neg->may_choose = 1;
+ }
+
+ maybe_add_default_accepts(neg, prefer_scripts);
+
+ alg_result = best_match(neg, bestp);
+
+ /* alg_result is one of
+ * alg_choice: a best variant is chosen
+ * alg_list: no best variant is chosen
+ */
+
+ if (alg_result == alg_list) {
+ /* send a list response or HTTP_NOT_ACCEPTABLE error response */
+
+ neg->send_alternates = 1; /* always include Alternates header */
+ set_neg_headers(r, neg, alg_result);
+ store_variant_list(r, neg);
+
+ if (neg->is_transparent && neg->ua_supports_trans) {
+ /* XXX todo: expires? cachability? */
+
+ /* Some HTTP/1.0 clients are known to choke when they get
+ * a 300 (multiple choices) response without a Location
+ * header. However the 300 code response we are are about
+ * to generate will only reach 1.0 clients which support
+ * transparent negotiation, and they should be OK. The
+ * response should never reach older 1.0 clients, even if
+ * we have CacheNegotiatedDocs enabled, because no 1.0
+ * proxy cache (we know of) will cache and return 300
+ * responses (they certainly won't if they conform to the
+ * HTTP/1.0 specification).
+ */
+ return HTTP_MULTIPLE_CHOICES;
+ }
+
+ if (!*bestp) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "no acceptable variant: %s", r->filename);
+ return HTTP_NOT_ACCEPTABLE;
+ }
+ }
+
+ /* Variant selection chose a variant */
+
+ /* XXX todo: merge the two cases in the if statement below */
+ if (neg->is_transparent) {
+
+ if ((res = setup_choice_response(r, neg, *bestp)) != 0) {
+ return res; /* return if error */
+ }
+ }
+ else {
+ set_neg_headers(r, neg, alg_result);
+ }
+
+ /* Make sure caching works - Vary should handle HTTP/1.1, but for
+ * HTTP/1.0, we can't allow caching at all.
+ */
+
+ /* XXX: Note that we only set r->no_cache to 1, which causes
+ * Expires: <now> to be added, when responding to a HTTP/1.0
+ * client. If we return the response to a 1.1 client, we do not
+ * add Expires <now>, because doing so would degrade 1.1 cache
+ * performance by preventing re-use of the response without prior
+ * revalidation. On the other hand, if the 1.1 client is a proxy
+ * which was itself contacted by a 1.0 client, or a proxy cache
+ * which can be contacted later by 1.0 clients, then we currently
+ * rely on this 1.1 proxy to add the Expires: <now> when it
+ * forwards the response.
+ *
+ * XXX: TODO: Find out if the 1.1 spec requires proxies and
+ * tunnels to add Expires: <now> when forwarding the response to
+ * 1.0 clients. I (kh) recall it is rather vague on this point.
+ * Testing actual 1.1 proxy implementations would also be nice. If
+ * Expires: <now> is not added by proxies then we need to always
+ * include Expires: <now> ourselves to ensure correct caching, but
+ * this would degrade HTTP/1.1 cache efficiency unless we also add
+ * Cache-Control: max-age=N, which we currently don't.
+ *
+ * Roy: No, we are not going to screw over HTTP future just to
+ * ensure that people who can't be bothered to upgrade their
+ * clients will always receive perfect server-side negotiation.
+ * Hell, those clients are sending bogus accept headers anyway.
+ *
+ * Manual setting of cache-control/expires always overrides this
+ * automated kluge, on purpose.
+ */
+
+ if ((!do_cache_negotiated_docs(r->server)
+ && (r->proto_num < HTTP_VERSION(1,1)))
+ && neg->count_multiviews_variants != 1) {
+ r->no_cache = 1;
+ }
+
+ return OK;
+}
+
+static int handle_map_file(request_rec *r)
+{
+ negotiation_state *neg;
+ apr_file_t *map;
+ var_rec *best;
+ int res;
+ char *udir;
+
+ if(strcmp(r->handler,MAP_FILE_MAGIC_TYPE) && strcmp(r->handler,"type-map"))
+ return DECLINED;
+
+ neg = parse_accept_headers(r);
+ if ((res = read_type_map(&map, neg, r))) {
+ return res;
+ }
+
+ res = do_negotiation(r, neg, &best, 0);
+ if (res != 0) return res;
+
+ if (best->body)
+ {
+ conn_rec *c = r->connection;
+ apr_bucket_brigade *bb;
+ apr_bucket *e;
+
+ ap_allow_standard_methods(r, REPLACE_ALLOW, M_GET, M_OPTIONS,
+ M_POST, -1);
+ /* XXX: ?
+ * if (r->method_number == M_OPTIONS) {
+ * return ap_send_http_options(r);
+ *}
+ */
+ if (r->method_number != M_GET && r->method_number != M_POST) {
+ return HTTP_METHOD_NOT_ALLOWED;
+ }
+
+ /* ### These may be implemented by adding some 'extra' info
+ * of the file offset onto the etag
+ * ap_update_mtime(r, r->finfo.mtime);
+ * ap_set_last_modified(r);
+ * ap_set_etag(r);
+ */
+ apr_table_setn(r->headers_out, "Accept-Ranges", "bytes");
+ ap_set_content_length(r, best->bytes);
+
+ /* set MIME type and charset as negotiated */
+ if (best->mime_type && *best->mime_type) {
+ if (best->content_charset && *best->content_charset) {
+ ap_set_content_type(r, apr_pstrcat(r->pool,
+ best->mime_type,
+ "; charset=",
+ best->content_charset,
+ NULL));
+ }
+ else {
+ ap_set_content_type(r, apr_pstrdup(r->pool, best->mime_type));
+ }
+ }
+
+ /* set Content-language(s) as negotiated */
+ if (best->content_languages && best->content_languages->nelts) {
+ r->content_languages = apr_array_copy(r->pool,
+ best->content_languages);
+ }
+
+ /* set Content-Encoding as negotiated */
+ if (best->content_encoding && *best->content_encoding) {
+ r->content_encoding = apr_pstrdup(r->pool,
+ best->content_encoding);
+ }
+
+ if ((res = ap_meets_conditions(r)) != OK) {
+ return res;
+ }
+
+ if ((res = ap_discard_request_body(r)) != OK) {
+ return res;
+ }
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ e = apr_bucket_file_create(map, best->body,
+ (apr_size_t)best->bytes, r->pool,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+
+ return ap_pass_brigade(r->output_filters, bb);
+ }
+
+ if (r->path_info && *r->path_info) {
+ /* remove any path_info from the end of the uri before trying
+ * to change the filename. r->path_info from the original
+ * request is passed along on the redirect.
+ */
+ r->uri[ap_find_path_info(r->uri, r->path_info)] = '\0';
+ }
+ udir = ap_make_dirstr_parent(r->pool, r->uri);
+ udir = ap_escape_uri(r->pool, udir);
+ ap_internal_redirect(apr_pstrcat(r->pool, udir, best->file_name,
+ r->path_info, NULL), r);
+ return OK;
+}
+
+static int handle_multi(request_rec *r)
+{
+ negotiation_state *neg;
+ var_rec *best, *avail_recs;
+ request_rec *sub_req;
+ int res;
+ int j;
+
+ if (r->finfo.filetype != APR_NOFILE
+ || !(ap_allow_options(r) & OPT_MULTI)) {
+ return DECLINED;
+ }
+
+ neg = parse_accept_headers(r);
+
+ if ((res = read_types_multi(neg))) {
+ return_from_multi:
+ /* free all allocated memory from subrequests */
+ avail_recs = (var_rec *) neg->avail_vars->elts;
+ for (j = 0; j < neg->avail_vars->nelts; ++j) {
+ var_rec *variant = &avail_recs[j];
+ if (variant->sub_req) {
+ ap_destroy_sub_req(variant->sub_req);
+ }
+ }
+ return res;
+ }
+ if (neg->avail_vars->nelts == 0) {
+ return DECLINED;
+ }
+
+ res = do_negotiation(r, neg, &best,
+ (r->method_number != M_GET) || r->args ||
+ (r->path_info && *r->path_info));
+ if (res != 0)
+ goto return_from_multi;
+
+ if (!(sub_req = best->sub_req)) {
+ /* We got this out of a map file, so we don't actually have
+ * a sub_req structure yet. Get one now.
+ */
+
+ sub_req = ap_sub_req_lookup_file(best->file_name, r, NULL);
+ if (sub_req->status != HTTP_OK) {
+ res = sub_req->status;
+ ap_destroy_sub_req(sub_req);
+ goto return_from_multi;
+ }
+ }
+ if (sub_req->args == NULL) {
+ sub_req->args = r->args;
+ }
+
+ /* now do a "fast redirect" ... promotes the sub_req into the main req */
+ ap_internal_fast_redirect(sub_req, r);
+
+ /* give no advise for time on this subrequest. Perhaps we
+ * should tally the last mtime amoung all variants, and date
+ * the most recent, but that could confuse the proxies.
+ */
+ r->mtime = 0;
+
+ /* clean up all but our favorite variant, since that sub_req
+ * is now merged into the main request!
+ */
+ avail_recs = (var_rec *) neg->avail_vars->elts;
+ for (j = 0; j < neg->avail_vars->nelts; ++j) {
+ var_rec *variant = &avail_recs[j];
+ if (variant != best && variant->sub_req) {
+ ap_destroy_sub_req(variant->sub_req);
+ }
+ }
+ return OK;
+}
+
+/**********************************************************************
+ * There is a problem with content-encoding, as some clients send and
+ * expect an x- token (e.g. x-gzip) while others expect the plain token
+ * (i.e. gzip). To try and deal with this as best as possible we do
+ * the following: if the client sent an Accept-Encoding header and it
+ * contains a plain token corresponding to the content encoding of the
+ * response, then set content encoding using the plain token. Else if
+ * the A-E header contains the x- token use the x- token in the C-E
+ * header. Else don't do anything.
+ *
+ * Note that if no A-E header was sent, or it does not contain a token
+ * compatible with the final content encoding, then the token in the
+ * C-E header will be whatever was specified in the AddEncoding
+ * directive.
+ */
+static int fix_encoding(request_rec *r)
+{
+ const char *enc = r->content_encoding;
+ char *x_enc = NULL;
+ apr_array_header_t *accept_encodings;
+ accept_rec *accept_recs;
+ int i;
+
+ if (!enc || !*enc) {
+ return DECLINED;
+ }
+
+ if (enc[0] == 'x' && enc[1] == '-') {
+ enc += 2;
+ }
+
+ if ((accept_encodings = do_header_line(r->pool,
+ apr_table_get(r->headers_in, "Accept-Encoding"))) == NULL) {
+ return DECLINED;
+ }
+
+ accept_recs = (accept_rec *) accept_encodings->elts;
+
+ for (i = 0; i < accept_encodings->nelts; ++i) {
+ char *name = accept_recs[i].name;
+
+ if (!strcmp(name, enc)) {
+ r->content_encoding = name;
+ return OK;
+ }
+
+ if (name[0] == 'x' && name[1] == '-' && !strcmp(name+2, enc)) {
+ x_enc = name;
+ }
+ }
+
+ if (x_enc) {
+ r->content_encoding = x_enc;
+ return OK;
+ }
+
+ return DECLINED;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_fixups(fix_encoding,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_type_checker(handle_multi,NULL,NULL,APR_HOOK_FIRST);
+ ap_hook_handler(handle_map_file,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA negotiation_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_neg_dir_config, /* dir config creator */
+ merge_neg_dir_configs, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server config */
+ negotiation_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.dsp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.dsp
new file mode 100644
index 00000000..fea69f66
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_negotiation" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_negotiation - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_negotiation.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_negotiation.mak" CFG="mod_negotiation - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_negotiation - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_negotiation - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_negotiation - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_negotiation_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_negotiation.so" /base:@..\..\os\win32\BaseAddr.ref,mod_negotiation.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_negotiation.so" /base:@..\..\os\win32\BaseAddr.ref,mod_negotiation.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_negotiation - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_negotiation_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_negotiation.so" /base:@..\..\os\win32\BaseAddr.ref,mod_negotiation.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_negotiation.so" /base:@..\..\os\win32\BaseAddr.ref,mod_negotiation.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_negotiation - Win32 Release"
+# Name "mod_negotiation - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_negotiation.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_negotiation.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_negotiation - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_negotiation.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_negotiation.so "negotiation_module for Apache" ../../include/ap_release.h > .\mod_negotiation.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_negotiation - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_negotiation.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_negotiation.so "negotiation_module for Apache" ../../include/ap_release.h > .\mod_negotiation.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.exp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.exp
new file mode 100644
index 00000000..a7c18da1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.exp
@@ -0,0 +1 @@
+negotiation_module
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.la b/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.la
new file mode 100644
index 00000000..7b42e96c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.la
@@ -0,0 +1,35 @@
+# mod_negotiation.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_negotiation.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_negotiation.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.lo b/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.lo
new file mode 100644
index 00000000..c2852784
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.lo
@@ -0,0 +1,12 @@
+# mod_negotiation.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_negotiation.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_negotiation.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.o b/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.o
new file mode 100644
index 00000000..38f00426
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_negotiation.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.c b/rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.c
new file mode 100644
index 00000000..dcbac539
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.c
@@ -0,0 +1,4670 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _ _
+** _ __ ___ ___ __| | _ __ _____ ___ __(_) |_ ___
+** | '_ ` _ \ / _ \ / _` | | '__/ _ \ \ /\ / / '__| | __/ _ \
+** | | | | | | (_) | (_| | | | | __/\ V V /| | | | || __/
+** |_| |_| |_|\___/ \__,_|___|_| \___| \_/\_/ |_| |_|\__\___|
+** |_____|
+**
+** URL Rewriting Module
+**
+** This module uses a rule-based rewriting engine (based on a
+** regular-expression parser) to rewrite requested URLs on the fly.
+**
+** It supports an unlimited number of additional rule conditions (which can
+** operate on a lot of variables, even on HTTP headers) for granular
+** matching and even external database lookups (either via plain text
+** tables, DBM hash files or even external processes) for advanced URL
+** substitution.
+**
+** It operates on the full URLs (including the PATH_INFO part) both in
+** per-server context (httpd.conf) and per-dir context (.htaccess) and even
+** can generate QUERY_STRING parts on result. The rewriting result finally
+** can lead to internal subprocessing, external request redirection or even
+** to internal proxy throughput.
+**
+** This module was originally written in April 1996 and
+** gifted exclusively to the The Apache Software Foundation in July 1997 by
+**
+** Ralf S. Engelschall
+** rse engelschall.com
+** www.engelschall.com
+*/
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_hash.h"
+#include "apr_user.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+#include "apr_global_mutex.h"
+
+#define APR_WANT_STRFUNC
+#define APR_WANT_IOVEC
+#include "apr_want.h"
+
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#if APR_HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#include "mod_rewrite.h"
+
+/* mod_ssl.h is not safe for inclusion in 2.0, so duplicate the
+ * optional function declarations. */
+APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup,
+ (apr_pool_t *, server_rec *,
+ conn_rec *, request_rec *,
+ char *));
+APR_DECLARE_OPTIONAL_FN(int, ssl_is_https, (conn_rec *));
+
+#if !defined(OS2) && !defined(WIN32) && !defined(BEOS) && !defined(NETWARE)
+#include "unixd.h"
+#define MOD_REWRITE_SET_MUTEX_PERMS /* XXX Apache should define something */
+#endif
+
+/*
+** +-------------------------------------------------------+
+** | |
+** | static module configuration
+** | |
+** +-------------------------------------------------------+
+*/
+
+
+/*
+** Our interface to the Apache server kernel:
+**
+** o Runtime logic of a request is as following:
+** while(request or subrequest)
+** foreach(stage #0...#9)
+** foreach(module) (**)
+** try to run hook
+**
+** o the order of modules at (**) is the inverted order as
+** given in the "Configuration" file, i.e. the last module
+** specified is the first one called for each hook!
+** The core module is always the last!
+**
+** o there are two different types of result checking and
+** continue processing:
+** for hook #0,#1,#4,#5,#6,#8:
+** hook run loop stops on first modules which gives
+** back a result != DECLINED, i.e. it usually returns OK
+** which says "OK, module has handled this _stage_" and for #1
+** this have not to mean "Ok, the filename is now valid".
+** for hook #2,#3,#7,#9:
+** all hooks are run, independend of result
+**
+** o at the last stage, the core module always
+** - says "HTTP_BAD_REQUEST" if r->filename does not begin with "/"
+** - prefix URL with document_root or replaced server_root
+** with document_root and sets r->filename
+** - always return a "OK" independed if the file really exists
+** or not!
+*/
+
+ /* the module (predeclaration) */
+module AP_MODULE_DECLARE_DATA rewrite_module;
+
+ /* rewritemap int: handler function registry */
+static apr_hash_t *mapfunc_hash;
+
+ /* the cache */
+static cache *cachep;
+
+ /* whether proxy module is available or not */
+static int proxy_available;
+
+static const char *lockname;
+static apr_global_mutex_t *rewrite_mapr_lock_acquire = NULL;
+static apr_global_mutex_t *rewrite_log_lock = NULL;
+
+/* Optional functions imported from mod_ssl when loaded: */
+static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *rewrite_ssl_lookup = NULL;
+static APR_OPTIONAL_FN_TYPE(ssl_is_https) *rewrite_is_https = NULL;
+
+/*
+** +-------------------------------------------------------+
+** | |
+** | configuration directive handling
+** | |
+** +-------------------------------------------------------+
+*/
+
+/*
+**
+** per-server configuration structure handling
+**
+*/
+
+static void *config_server_create(apr_pool_t *p, server_rec *s)
+{
+ rewrite_server_conf *a;
+
+ a = (rewrite_server_conf *)apr_pcalloc(p, sizeof(rewrite_server_conf));
+
+ a->state = ENGINE_DISABLED;
+ a->options = OPTION_NONE;
+ a->rewritelogfile = NULL;
+ a->rewritelogfp = NULL;
+ a->rewriteloglevel = 0;
+ a->rewritemaps = apr_array_make(p, 2, sizeof(rewritemap_entry));
+ a->rewriteconds = apr_array_make(p, 2, sizeof(rewritecond_entry));
+ a->rewriterules = apr_array_make(p, 2, sizeof(rewriterule_entry));
+ a->server = s;
+ a->redirect_limit = 0; /* unset (use default) */
+
+ return (void *)a;
+}
+
+static void *config_server_merge(apr_pool_t *p, void *basev, void *overridesv)
+{
+ rewrite_server_conf *a, *base, *overrides;
+
+ a = (rewrite_server_conf *)apr_pcalloc(p,
+ sizeof(rewrite_server_conf));
+ base = (rewrite_server_conf *)basev;
+ overrides = (rewrite_server_conf *)overridesv;
+
+ a->state = overrides->state;
+ a->options = overrides->options;
+ a->server = overrides->server;
+ a->redirect_limit = overrides->redirect_limit
+ ? overrides->redirect_limit
+ : base->redirect_limit;
+
+ if (a->options & OPTION_INHERIT) {
+ /*
+ * local directives override
+ * and anything else is inherited
+ */
+ a->rewriteloglevel = overrides->rewriteloglevel != 0
+ ? overrides->rewriteloglevel
+ : base->rewriteloglevel;
+ a->rewritelogfile = overrides->rewritelogfile != NULL
+ ? overrides->rewritelogfile
+ : base->rewritelogfile;
+ a->rewritelogfp = overrides->rewritelogfp != NULL
+ ? overrides->rewritelogfp
+ : base->rewritelogfp;
+ a->rewritemaps = apr_array_append(p, overrides->rewritemaps,
+ base->rewritemaps);
+ a->rewriteconds = apr_array_append(p, overrides->rewriteconds,
+ base->rewriteconds);
+ a->rewriterules = apr_array_append(p, overrides->rewriterules,
+ base->rewriterules);
+ }
+ else {
+ /*
+ * local directives override
+ * and anything else gets defaults
+ */
+ a->rewriteloglevel = overrides->rewriteloglevel;
+ a->rewritelogfile = overrides->rewritelogfile;
+ a->rewritelogfp = overrides->rewritelogfp;
+ a->rewritemaps = overrides->rewritemaps;
+ a->rewriteconds = overrides->rewriteconds;
+ a->rewriterules = overrides->rewriterules;
+ }
+
+ return (void *)a;
+}
+
+
+/*
+**
+** per-directory configuration structure handling
+**
+*/
+
+static void *config_perdir_create(apr_pool_t *p, char *path)
+{
+ rewrite_perdir_conf *a;
+
+ a = (rewrite_perdir_conf *)apr_pcalloc(p, sizeof(rewrite_perdir_conf));
+
+ a->state = ENGINE_DISABLED;
+ a->options = OPTION_NONE;
+ a->baseurl = NULL;
+ a->rewriteconds = apr_array_make(p, 2, sizeof(rewritecond_entry));
+ a->rewriterules = apr_array_make(p, 2, sizeof(rewriterule_entry));
+ a->redirect_limit = 0; /* unset (use server config) */
+
+ if (path == NULL) {
+ a->directory = NULL;
+ }
+ else {
+ /* make sure it has a trailing slash */
+ if (path[strlen(path)-1] == '/') {
+ a->directory = apr_pstrdup(p, path);
+ }
+ else {
+ a->directory = apr_pstrcat(p, path, "/", NULL);
+ }
+ }
+
+ return (void *)a;
+}
+
+static void *config_perdir_merge(apr_pool_t *p, void *basev, void *overridesv)
+{
+ rewrite_perdir_conf *a, *base, *overrides;
+
+ a = (rewrite_perdir_conf *)apr_pcalloc(p,
+ sizeof(rewrite_perdir_conf));
+ base = (rewrite_perdir_conf *)basev;
+ overrides = (rewrite_perdir_conf *)overridesv;
+
+ a->state = overrides->state;
+ a->options = overrides->options;
+ a->directory = overrides->directory;
+ a->baseurl = overrides->baseurl;
+ a->redirect_limit = overrides->redirect_limit
+ ? overrides->redirect_limit
+ : base->redirect_limit;
+
+ if (a->options & OPTION_INHERIT) {
+ a->rewriteconds = apr_array_append(p, overrides->rewriteconds,
+ base->rewriteconds);
+ a->rewriterules = apr_array_append(p, overrides->rewriterules,
+ base->rewriterules);
+ }
+ else {
+ a->rewriteconds = overrides->rewriteconds;
+ a->rewriterules = overrides->rewriterules;
+ }
+
+ return (void *)a;
+}
+
+
+/*
+**
+** the configuration commands
+**
+*/
+
+static const char *cmd_rewriteengine(cmd_parms *cmd,
+ void *in_dconf, int flag)
+{
+ rewrite_perdir_conf *dconf = in_dconf;
+ rewrite_server_conf *sconf;
+
+ sconf = ap_get_module_config(cmd->server->module_config, &rewrite_module);
+
+ if (cmd->path == NULL) { /* is server command */
+ sconf->state = (flag ? ENGINE_ENABLED : ENGINE_DISABLED);
+ }
+ else /* is per-directory command */ {
+ dconf->state = (flag ? ENGINE_ENABLED : ENGINE_DISABLED);
+ }
+
+ return NULL;
+}
+
+static const char *cmd_rewriteoptions(cmd_parms *cmd,
+ void *in_dconf, const char *option)
+{
+ int options = 0, limit = 0;
+ char *w;
+
+ while (*option) {
+ w = ap_getword_conf(cmd->pool, &option);
+
+ if (!strcasecmp(w, "inherit")) {
+ options |= OPTION_INHERIT;
+ }
+ else if (!strncasecmp(w, "MaxRedirects=", 13)) {
+ limit = atoi(&w[13]);
+ if (limit <= 0) {
+ return "RewriteOptions: MaxRedirects takes a number greater "
+ "than zero.";
+ }
+ }
+ else if (!strcasecmp(w, "MaxRedirects")) { /* be nice */
+ return "RewriteOptions: MaxRedirects has the format MaxRedirects"
+ "=n.";
+ }
+ else {
+ return apr_pstrcat(cmd->pool, "RewriteOptions: unknown option '",
+ w, "'", NULL);
+ }
+ }
+
+ /* put it into the appropriate config */
+ if (cmd->path == NULL) { /* is server command */
+ rewrite_server_conf *conf =
+ ap_get_module_config(cmd->server->module_config,
+ &rewrite_module);
+
+ conf->options |= options;
+ conf->redirect_limit = limit;
+ }
+ else { /* is per-directory command */
+ rewrite_perdir_conf *conf = in_dconf;
+
+ conf->options |= options;
+ conf->redirect_limit = limit;
+ }
+
+ return NULL;
+}
+
+static const char *cmd_rewritelog(cmd_parms *cmd, void *dconf, const char *a1)
+{
+ rewrite_server_conf *sconf;
+
+ sconf = ap_get_module_config(cmd->server->module_config, &rewrite_module);
+
+ sconf->rewritelogfile = a1;
+
+ return NULL;
+}
+
+static const char *cmd_rewriteloglevel(cmd_parms *cmd, void *dconf,
+ const char *a1)
+{
+ rewrite_server_conf *sconf;
+
+ sconf = ap_get_module_config(cmd->server->module_config, &rewrite_module);
+
+ sconf->rewriteloglevel = atoi(a1);
+
+ return NULL;
+}
+
+static const char *cmd_rewritemap(cmd_parms *cmd, void *dconf, const char *a1,
+ const char *a2)
+{
+ rewrite_server_conf *sconf;
+ rewritemap_entry *newmap;
+ apr_finfo_t st;
+
+ sconf = ap_get_module_config(cmd->server->module_config, &rewrite_module);
+
+ newmap = apr_array_push(sconf->rewritemaps);
+
+ newmap->name = a1;
+ newmap->func = NULL;
+ if (strncmp(a2, "txt:", 4) == 0) {
+ newmap->type = MAPTYPE_TXT;
+ newmap->datafile = a2+4;
+ newmap->checkfile = a2+4;
+ newmap->cachename = apr_psprintf(cmd->pool, "%pp:%s",
+ (void *)cmd->server, a1);
+ }
+ else if (strncmp(a2, "rnd:", 4) == 0) {
+ newmap->type = MAPTYPE_RND;
+ newmap->datafile = a2+4;
+ newmap->checkfile = a2+4;
+ newmap->cachename = apr_psprintf(cmd->pool, "%pp:%s",
+ (void *)cmd->server, a1);
+ }
+ else if (strncmp(a2, "dbm", 3) == 0) {
+ const char *ignored_fname;
+ int bad = 0;
+ apr_status_t rv;
+
+ newmap->type = MAPTYPE_DBM;
+ newmap->cachename = apr_psprintf(cmd->pool, "%pp:%s",
+ (void *)cmd->server, a1);
+
+ if (a2[3] == ':') {
+ newmap->dbmtype = "default";
+ newmap->datafile = a2+4;
+ }
+ else if (a2[3] == '=') {
+ const char *colon = ap_strchr_c(a2 + 4, ':');
+
+ if (colon) {
+ newmap->dbmtype = apr_pstrndup(cmd->pool, a2 + 4,
+ colon - (a2 + 3) - 1);
+ newmap->datafile = colon + 1;
+ }
+ else {
+ ++bad;
+ }
+ }
+ else {
+ ++bad;
+ }
+
+ if (bad) {
+ return apr_pstrcat(cmd->pool, "RewriteMap: bad map:",
+ a2, NULL);
+ }
+
+ rv = apr_dbm_get_usednames_ex(cmd->pool, newmap->dbmtype,
+ newmap->datafile, &newmap->checkfile,
+ &ignored_fname);
+ if (rv != APR_SUCCESS) {
+ return apr_pstrcat(cmd->pool, "RewriteMap: dbm type ",
+ newmap->dbmtype, " is invalid", NULL);
+ }
+ }
+ else if (strncmp(a2, "prg:", 4) == 0) {
+ newmap->type = MAPTYPE_PRG;
+ apr_tokenize_to_argv(a2 + 4, &newmap->argv, cmd->pool);
+ newmap->datafile = NULL;
+ newmap->checkfile = newmap->argv[0];
+ newmap->cachename = NULL;
+
+ }
+ else if (strncmp(a2, "int:", 4) == 0) {
+ newmap->type = MAPTYPE_INT;
+ newmap->datafile = NULL;
+ newmap->checkfile = NULL;
+ newmap->cachename = NULL;
+ newmap->func = (char *(*)(request_rec *,char *))
+ apr_hash_get(mapfunc_hash, a2+4, strlen(a2+4));
+ if ((sconf->state == ENGINE_ENABLED) && (newmap->func == NULL)) {
+ return apr_pstrcat(cmd->pool, "RewriteMap: internal map not found:",
+ a2+4, NULL);
+ }
+ }
+ else {
+ newmap->type = MAPTYPE_TXT;
+ newmap->datafile = a2;
+ newmap->checkfile = a2;
+ newmap->cachename = apr_psprintf(cmd->pool, "%pp:%s",
+ (void *)cmd->server, a1);
+ }
+ newmap->fpin = NULL;
+ newmap->fpout = NULL;
+
+ if (newmap->checkfile && (sconf->state == ENGINE_ENABLED)
+ && (apr_stat(&st, newmap->checkfile, APR_FINFO_MIN,
+ cmd->pool) != APR_SUCCESS)) {
+ return apr_pstrcat(cmd->pool,
+ "RewriteMap: file for map ", newmap->name,
+ " not found:", newmap->checkfile, NULL);
+ }
+
+ return NULL;
+}
+
+static const char *cmd_rewritelock(cmd_parms *cmd, void *dconf, const char *a1)
+{
+ const char *error;
+
+ if ((error = ap_check_cmd_context(cmd, GLOBAL_ONLY)) != NULL)
+ return error;
+
+ /* fixup the path, especially for rewritelock_remove() */
+ lockname = ap_server_root_relative(cmd->pool, a1);
+
+ if (!lockname) {
+ return apr_pstrcat(cmd->pool, "Invalid RewriteLock path ", a1);
+ }
+
+ return NULL;
+}
+
+static const char *cmd_rewritebase(cmd_parms *cmd, void *in_dconf,
+ const char *a1)
+{
+ rewrite_perdir_conf *dconf = in_dconf;
+
+ if (cmd->path == NULL || dconf == NULL) {
+ return "RewriteBase: only valid in per-directory config files";
+ }
+ if (a1[0] == '\0') {
+ return "RewriteBase: empty URL not allowed";
+ }
+ if (a1[0] != '/') {
+ return "RewriteBase: argument is not a valid URL";
+ }
+
+ dconf->baseurl = a1;
+
+ return NULL;
+}
+
+static const char *cmd_rewritecond(cmd_parms *cmd, void *in_dconf,
+ const char *in_str)
+{
+ rewrite_perdir_conf *dconf = in_dconf;
+ char *str = apr_pstrdup(cmd->pool, in_str);
+ rewrite_server_conf *sconf;
+ rewritecond_entry *newcond;
+ regex_t *regexp;
+ char *a1;
+ char *a2;
+ char *a3;
+ char *cp;
+ const char *err;
+ int rc;
+
+ sconf = ap_get_module_config(cmd->server->module_config, &rewrite_module);
+
+ /* make a new entry in the internal temporary rewrite rule list */
+ if (cmd->path == NULL) { /* is server command */
+ newcond = apr_array_push(sconf->rewriteconds);
+ }
+ else { /* is per-directory command */
+ newcond = apr_array_push(dconf->rewriteconds);
+ }
+
+ /* parse the argument line ourself */
+ if (parseargline(str, &a1, &a2, &a3)) {
+ return apr_pstrcat(cmd->pool, "RewriteCond: bad argument line '", str,
+ "'", NULL);
+ }
+
+ /* arg1: the input string */
+ newcond->input = apr_pstrdup(cmd->pool, a1);
+
+ /* arg3: optional flags field
+ (this have to be first parsed, because we need to
+ know if the regex should be compiled with ICASE!) */
+ newcond->flags = CONDFLAG_NONE;
+ if (a3 != NULL) {
+ if ((err = cmd_rewritecond_parseflagfield(cmd->pool, newcond,
+ a3)) != NULL) {
+ return err;
+ }
+ }
+
+ /* arg2: the pattern
+ try to compile the regexp to test if is ok */
+ cp = a2;
+ if (cp[0] == '!') {
+ newcond->flags |= CONDFLAG_NOTMATCH;
+ cp++;
+ }
+
+ /* now be careful: Under the POSIX regex library
+ we can compile the pattern for case insensitive matching,
+ under the old V8 library we have to do it self via a hack */
+ if (newcond->flags & CONDFLAG_NOCASE) {
+ rc = ((regexp = ap_pregcomp(cmd->pool, cp, REG_EXTENDED|REG_ICASE))
+ == NULL);
+ }
+ else {
+ rc = ((regexp = ap_pregcomp(cmd->pool, cp, REG_EXTENDED)) == NULL);
+ }
+ if (rc) {
+ return apr_pstrcat(cmd->pool,
+ "RewriteCond: cannot compile regular expression '",
+ a2, "'", NULL);
+ }
+
+ newcond->pattern = apr_pstrdup(cmd->pool, cp);
+ newcond->regexp = regexp;
+
+ return NULL;
+}
+
+static const char *cmd_rewritecond_parseflagfield(apr_pool_t *p,
+ rewritecond_entry *cfg,
+ char *str)
+{
+ char *cp;
+ char *cp1;
+ char *cp2;
+ char *cp3;
+ char *key;
+ char *val;
+ const char *err;
+
+ if (str[0] != '[' || str[strlen(str)-1] != ']') {
+ return "RewriteCond: bad flag delimiters";
+ }
+
+ cp = str+1;
+ str[strlen(str)-1] = ','; /* for simpler parsing */
+ for ( ; *cp != '\0'; ) {
+ /* skip whitespaces */
+ for ( ; (*cp == ' ' || *cp == '\t') && *cp != '\0'; cp++)
+ ;
+ if (*cp == '\0') {
+ break;
+ }
+ cp1 = cp;
+ if ((cp2 = strchr(cp, ',')) != NULL) {
+ cp = cp2+1;
+ for ( ; (*(cp2-1) == ' ' || *(cp2-1) == '\t'); cp2--)
+ ;
+ *cp2 = '\0';
+ if ((cp3 = strchr(cp1, '=')) != NULL) {
+ *cp3 = '\0';
+ key = cp1;
+ val = cp3+1;
+ }
+ else {
+ key = cp1;
+ val = "";
+ }
+ if ((err = cmd_rewritecond_setflag(p, cfg, key, val)) != NULL) {
+ return err;
+ }
+ }
+ else {
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+static const char *cmd_rewritecond_setflag(apr_pool_t *p,
+ rewritecond_entry *cfg,
+ char *key, char *val)
+{
+ if ( strcasecmp(key, "nocase") == 0
+ || strcasecmp(key, "NC") == 0 ) {
+ cfg->flags |= CONDFLAG_NOCASE;
+ }
+ else if ( strcasecmp(key, "ornext") == 0
+ || strcasecmp(key, "OR") == 0 ) {
+ cfg->flags |= CONDFLAG_ORNEXT;
+ }
+ else {
+ return apr_pstrcat(p, "RewriteCond: unknown flag '", key, "'", NULL);
+ }
+ return NULL;
+}
+
+static const char *cmd_rewriterule(cmd_parms *cmd, void *in_dconf,
+ const char *in_str)
+{
+ rewrite_perdir_conf *dconf = in_dconf;
+ char *str = apr_pstrdup(cmd->pool, in_str);
+ rewrite_server_conf *sconf;
+ rewriterule_entry *newrule;
+ regex_t *regexp;
+ char *a1;
+ char *a2;
+ char *a3;
+ char *cp;
+ const char *err;
+ int mode;
+
+ sconf = ap_get_module_config(cmd->server->module_config, &rewrite_module);
+
+ /* make a new entry in the internal rewrite rule list */
+ if (cmd->path == NULL) { /* is server command */
+ newrule = apr_array_push(sconf->rewriterules);
+ }
+ else { /* is per-directory command */
+ newrule = apr_array_push(dconf->rewriterules);
+ }
+
+ /* parse the argument line ourself */
+ if (parseargline(str, &a1, &a2, &a3)) {
+ return apr_pstrcat(cmd->pool, "RewriteRule: bad argument line '", str,
+ "'", NULL);
+ }
+
+ /* arg3: optional flags field */
+ newrule->forced_mimetype = NULL;
+ newrule->forced_responsecode = HTTP_MOVED_TEMPORARILY;
+ newrule->flags = RULEFLAG_NONE;
+ newrule->env[0] = NULL;
+ newrule->cookie[0] = NULL;
+ newrule->skip = 0;
+ if (a3 != NULL) {
+ if ((err = cmd_rewriterule_parseflagfield(cmd->pool, newrule,
+ a3)) != NULL) {
+ return err;
+ }
+ }
+
+ /* arg1: the pattern
+ * try to compile the regexp to test if is ok
+ */
+ cp = a1;
+ if (cp[0] == '!') {
+ newrule->flags |= RULEFLAG_NOTMATCH;
+ cp++;
+ }
+ mode = REG_EXTENDED;
+ if (newrule->flags & RULEFLAG_NOCASE) {
+ mode |= REG_ICASE;
+ }
+ if ((regexp = ap_pregcomp(cmd->pool, cp, mode)) == NULL) {
+ return apr_pstrcat(cmd->pool,
+ "RewriteRule: cannot compile regular expression '",
+ a1, "'", NULL);
+ }
+ newrule->pattern = apr_pstrdup(cmd->pool, cp);
+ newrule->regexp = regexp;
+
+ /* arg2: the output string
+ * replace the $<N> by \<n> which is needed by the currently
+ * used Regular Expression library
+ *
+ * TODO: Is this still required for PCRE? If not, does it *work* with PCRE?
+ */
+ newrule->output = apr_pstrdup(cmd->pool, a2);
+
+ /* now, if the server or per-dir config holds an
+ * array of RewriteCond entries, we take it for us
+ * and clear the array
+ */
+ if (cmd->path == NULL) { /* is server command */
+ newrule->rewriteconds = sconf->rewriteconds;
+ sconf->rewriteconds = apr_array_make(cmd->pool, 2,
+ sizeof(rewritecond_entry));
+ }
+ else { /* is per-directory command */
+ newrule->rewriteconds = dconf->rewriteconds;
+ dconf->rewriteconds = apr_array_make(cmd->pool, 2,
+ sizeof(rewritecond_entry));
+ }
+
+ return NULL;
+}
+
+static const char *cmd_rewriterule_parseflagfield(apr_pool_t *p,
+ rewriterule_entry *cfg,
+ char *str)
+{
+ char *cp;
+ char *cp1;
+ char *cp2;
+ char *cp3;
+ char *key;
+ char *val;
+ const char *err;
+
+ if (str[0] != '[' || str[strlen(str)-1] != ']') {
+ return "RewriteRule: bad flag delimiters";
+ }
+
+ cp = str+1;
+ str[strlen(str)-1] = ','; /* for simpler parsing */
+ for ( ; *cp != '\0'; ) {
+ /* skip whitespaces */
+ for ( ; (*cp == ' ' || *cp == '\t') && *cp != '\0'; cp++)
+ ;
+ if (*cp == '\0') {
+ break;
+ }
+ cp1 = cp;
+ if ((cp2 = strchr(cp, ',')) != NULL) {
+ cp = cp2+1;
+ for ( ; (*(cp2-1) == ' ' || *(cp2-1) == '\t'); cp2--)
+ ;
+ *cp2 = '\0';
+ if ((cp3 = strchr(cp1, '=')) != NULL) {
+ *cp3 = '\0';
+ key = cp1;
+ val = cp3+1;
+ }
+ else {
+ key = cp1;
+ val = "";
+ }
+ if ((err = cmd_rewriterule_setflag(p, cfg, key, val)) != NULL) {
+ return err;
+ }
+ }
+ else {
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+static const char *cmd_rewriterule_setflag(apr_pool_t *p,
+ rewriterule_entry *cfg,
+ char *key, char *val)
+{
+ int status = 0;
+ int i;
+
+ if ( strcasecmp(key, "redirect") == 0
+ || strcasecmp(key, "R") == 0 ) {
+ cfg->flags |= RULEFLAG_FORCEREDIRECT;
+ if (strlen(val) > 0) {
+ if (strcasecmp(val, "permanent") == 0) {
+ status = HTTP_MOVED_PERMANENTLY;
+ }
+ else if (strcasecmp(val, "temp") == 0) {
+ status = HTTP_MOVED_TEMPORARILY;
+ }
+ else if (strcasecmp(val, "seeother") == 0) {
+ status = HTTP_SEE_OTHER;
+ }
+ else if (apr_isdigit(*val)) {
+ status = atoi(val);
+ }
+ if (!ap_is_HTTP_REDIRECT(status)) {
+ return "RewriteRule: invalid HTTP response code "
+ "for flag 'R'";
+ }
+ cfg->forced_responsecode = status;
+ }
+ }
+ else if ( strcasecmp(key, "noescape") == 0
+ || strcasecmp(key, "NE") == 0 ) {
+ cfg->flags |= RULEFLAG_NOESCAPE;
+ }
+ else if ( strcasecmp(key, "last") == 0
+ || strcasecmp(key, "L") == 0 ) {
+ cfg->flags |= RULEFLAG_LASTRULE;
+ }
+ else if ( strcasecmp(key, "next") == 0
+ || strcasecmp(key, "N") == 0 ) {
+ cfg->flags |= RULEFLAG_NEWROUND;
+ }
+ else if ( strcasecmp(key, "chain") == 0
+ || strcasecmp(key, "C") == 0 ) {
+ cfg->flags |= RULEFLAG_CHAIN;
+ }
+ else if ( strcasecmp(key, "type") == 0
+ || strcasecmp(key, "T") == 0 ) {
+ cfg->forced_mimetype = apr_pstrdup(p, val);
+ ap_str_tolower(cfg->forced_mimetype);
+ }
+ else if ( strcasecmp(key, "env") == 0
+ || strcasecmp(key, "E") == 0 ) {
+ for (i = 0; (cfg->env[i] != NULL) && (i < MAX_ENV_FLAGS); i++)
+ ;
+ if (i < MAX_ENV_FLAGS) {
+ cfg->env[i] = apr_pstrdup(p, val);
+ cfg->env[i+1] = NULL;
+ }
+ else {
+ return "RewriteRule: too many environment flags 'E'";
+ }
+ }
+ else if ( strcasecmp(key, "cookie") == 0 || strcasecmp(key, "CO") == 0) {
+ for (i = 0; (cfg->cookie[i] != NULL) && (i < MAX_COOKIE_FLAGS); i++)
+ ;
+ if (i < MAX_COOKIE_FLAGS) {
+ cfg->cookie[i] = apr_pstrdup(p, val);
+ cfg->cookie[i+1] = NULL;
+ }
+ else {
+ return "RewriteRule: too many cookie flags 'CO'";
+ }
+ }
+ else if ( strcasecmp(key, "nosubreq") == 0
+ || strcasecmp(key, "NS") == 0 ) {
+ cfg->flags |= RULEFLAG_IGNOREONSUBREQ;
+ }
+ else if ( strcasecmp(key, "proxy") == 0
+ || strcasecmp(key, "P") == 0 ) {
+ cfg->flags |= RULEFLAG_PROXY;
+ }
+ else if ( strcasecmp(key, "passthrough") == 0
+ || strcasecmp(key, "PT") == 0 ) {
+ cfg->flags |= RULEFLAG_PASSTHROUGH;
+ }
+ else if ( strcasecmp(key, "skip") == 0
+ || strcasecmp(key, "S") == 0 ) {
+ cfg->skip = atoi(val);
+ }
+ else if ( strcasecmp(key, "forbidden") == 0
+ || strcasecmp(key, "F") == 0 ) {
+ cfg->flags |= RULEFLAG_FORBIDDEN;
+ }
+ else if ( strcasecmp(key, "gone") == 0
+ || strcasecmp(key, "G") == 0 ) {
+ cfg->flags |= RULEFLAG_GONE;
+ }
+ else if ( strcasecmp(key, "qsappend") == 0
+ || strcasecmp(key, "QSA") == 0 ) {
+ cfg->flags |= RULEFLAG_QSAPPEND;
+ }
+ else if ( strcasecmp(key, "nocase") == 0
+ || strcasecmp(key, "NC") == 0 ) {
+ cfg->flags |= RULEFLAG_NOCASE;
+ }
+ else {
+ return apr_pstrcat(p, "RewriteRule: unknown flag '", key, "'", NULL);
+ }
+ return NULL;
+}
+
+
+/*
+**
+** Global Module Initialization
+**
+*/
+
+static int pre_config(apr_pool_t *pconf,
+ apr_pool_t *plog,
+ apr_pool_t *ptemp)
+{
+ APR_OPTIONAL_FN_TYPE(ap_register_rewrite_mapfunc) *map_pfn_register;
+
+ /* register int: rewritemap handlers */
+ mapfunc_hash = apr_hash_make(pconf);
+ map_pfn_register = APR_RETRIEVE_OPTIONAL_FN(ap_register_rewrite_mapfunc);
+ if (map_pfn_register) {
+ map_pfn_register("tolower", rewrite_mapfunc_tolower);
+ map_pfn_register("toupper", rewrite_mapfunc_toupper);
+ map_pfn_register("escape", rewrite_mapfunc_escape);
+ map_pfn_register("unescape", rewrite_mapfunc_unescape);
+ }
+ return OK;
+}
+
+static int post_config(apr_pool_t *p,
+ apr_pool_t *plog,
+ apr_pool_t *ptemp,
+ server_rec *s)
+{
+ apr_status_t rv;
+ void *data;
+ int first_time = 0;
+ const char *userdata_key = "rewrite_init_module";
+
+ apr_pool_userdata_get(&data, userdata_key, s->process->pool);
+ if (!data) {
+ first_time = 1;
+ apr_pool_userdata_set((const void *)1, userdata_key,
+ apr_pool_cleanup_null, s->process->pool);
+ }
+
+ /* check if proxy module is available */
+ proxy_available = (ap_find_linked_module("mod_proxy.c") != NULL);
+
+ /* create the rewriting lockfiles in the parent */
+ if ((rv = apr_global_mutex_create(&rewrite_log_lock, NULL,
+ APR_LOCK_DEFAULT, p)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s,
+ "mod_rewrite: could not create rewrite_log_lock");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+#ifdef MOD_REWRITE_SET_MUTEX_PERMS
+ rv = unixd_set_global_mutex_perms(rewrite_log_lock);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s,
+ "mod_rewrite: Could not set permissions on "
+ "rewrite_log_lock; check User and Group directives");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+#endif
+
+ rv = rewritelock_create(s, p);
+ if (rv != APR_SUCCESS) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ apr_pool_cleanup_register(p, (void *)s, rewritelock_remove,
+ apr_pool_cleanup_null);
+
+ /* step through the servers and
+ * - open each rewriting logfile
+ * - open the RewriteMap prg:xxx programs
+ */
+ for (; s; s = s->next) {
+ if (!open_rewritelog(s, p)) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ if (!first_time) {
+ if (run_rewritemap_programs(s, p) != APR_SUCCESS) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+ }
+
+ rewrite_ssl_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup);
+ rewrite_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https);
+
+ return OK;
+}
+
+
+/*
+**
+** Per-Child Module Initialization
+** [called after a child process is spawned]
+**
+*/
+
+static void init_child(apr_pool_t *p, server_rec *s)
+{
+ apr_status_t rv;
+
+ if (lockname != NULL && *(lockname) != '\0') {
+ rv = apr_global_mutex_child_init(&rewrite_mapr_lock_acquire,
+ lockname, p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s,
+ "mod_rewrite: could not init rewrite_mapr_lock_acquire"
+ " in child");
+ }
+ }
+
+ rv = apr_global_mutex_child_init(&rewrite_log_lock, NULL, p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s,
+ "mod_rewrite: could not init rewrite log lock in child");
+ }
+
+ /* create the lookup cache */
+ cachep = init_cache(p);
+}
+
+
+/*
+** +-------------------------------------------------------+
+** | |
+** | runtime hooks
+** | |
+** +-------------------------------------------------------+
+*/
+
+/*
+**
+** URI-to-filename hook
+**
+** [used for the rewriting engine triggered by
+** the per-server 'RewriteRule' directives]
+**
+*/
+
+static int hook_uri2file(request_rec *r)
+{
+ rewrite_server_conf *conf;
+ const char *saved_rulestatus;
+ const char *var;
+ const char *thisserver;
+ char *thisport;
+ const char *thisurl;
+ char buf[512];
+ char docroot[512];
+ const char *ccp;
+ unsigned int port;
+ int rulestatus;
+ int n;
+ int l;
+
+ /*
+ * retrieve the config structures
+ */
+ conf = ap_get_module_config(r->server->module_config, &rewrite_module);
+
+ /*
+ * only do something under runtime if the engine is really enabled,
+ * else return immediately!
+ */
+ if (conf->state == ENGINE_DISABLED) {
+ return DECLINED;
+ }
+
+ /*
+ * check for the ugly API case of a virtual host section where no
+ * mod_rewrite directives exists. In this situation we became no chance
+ * by the API to setup our default per-server config so we have to
+ * on-the-fly assume we have the default config. But because the default
+ * config has a disabled rewriting engine we are lucky because can
+ * just stop operating now.
+ */
+ if (conf->server != r->server) {
+ return DECLINED;
+ }
+
+ /*
+ * add the SCRIPT_URL variable to the env. this is a bit complicated
+ * due to the fact that apache uses subrequests and internal redirects
+ */
+
+ if (r->main == NULL) {
+ var = apr_pstrcat(r->pool, "REDIRECT_", ENVVAR_SCRIPT_URL, NULL);
+ var = apr_table_get(r->subprocess_env, var);
+ if (var == NULL) {
+ apr_table_setn(r->subprocess_env, ENVVAR_SCRIPT_URL, r->uri);
+ }
+ else {
+ apr_table_setn(r->subprocess_env, ENVVAR_SCRIPT_URL, var);
+ }
+ }
+ else {
+ var = apr_table_get(r->main->subprocess_env, ENVVAR_SCRIPT_URL);
+ apr_table_setn(r->subprocess_env, ENVVAR_SCRIPT_URL, var);
+ }
+
+ /*
+ * create the SCRIPT_URI variable for the env
+ */
+
+ /* add the canonical URI of this URL */
+ thisserver = ap_get_server_name(r);
+ port = ap_get_server_port(r);
+ if (ap_is_default_port(port, r)) {
+ thisport = "";
+ }
+ else {
+ apr_snprintf(buf, sizeof(buf), ":%u", port);
+ thisport = buf;
+ }
+ thisurl = apr_table_get(r->subprocess_env, ENVVAR_SCRIPT_URL);
+
+ /* set the variable */
+ var = apr_pstrcat(r->pool, ap_http_method(r), "://", thisserver, thisport,
+ thisurl, NULL);
+ apr_table_setn(r->subprocess_env, ENVVAR_SCRIPT_URI, var);
+
+ if (!(saved_rulestatus = apr_table_get(r->notes,"mod_rewrite_rewritten"))) {
+ /* if filename was not initially set,
+ * we start with the requested URI
+ */
+ if (r->filename == NULL) {
+ r->filename = apr_pstrdup(r->pool, r->uri);
+ rewritelog(r, 2, "init rewrite engine with requested uri %s",
+ r->filename);
+ }
+ else {
+ rewritelog(r, 2, "init rewrite engine with passed filename %s."
+ " Original uri = %s", r->filename, r->uri);
+ }
+
+ /*
+ * now apply the rules ...
+ */
+ rulestatus = apply_rewrite_list(r, conf->rewriterules, NULL);
+ apr_table_set(r->notes,"mod_rewrite_rewritten",
+ apr_psprintf(r->pool,"%d",rulestatus));
+ }
+ else {
+ rewritelog(r, 2,
+ "uri already rewritten. Status %s, Uri %s, r->filename %s",
+ saved_rulestatus, r->uri, r->filename);
+ rulestatus = atoi(saved_rulestatus);
+ }
+
+ if (rulestatus) {
+ unsigned skip;
+
+ if (strlen(r->filename) > 6 &&
+ strncmp(r->filename, "proxy:", 6) == 0) {
+ /* it should be go on as an internal proxy request */
+
+ /* check if the proxy module is enabled, so
+ * we can actually use it!
+ */
+ if (!proxy_available) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "attempt to make remote request from mod_rewrite "
+ "without proxy enabled: %s", r->filename);
+ return HTTP_FORBIDDEN;
+ }
+
+ /* make sure the QUERY_STRING and
+ * PATH_INFO parts get incorporated
+ */
+ if (r->path_info != NULL) {
+ r->filename = apr_pstrcat(r->pool, r->filename,
+ r->path_info, NULL);
+ }
+ if (r->args != NULL &&
+ r->uri == r->unparsed_uri) {
+ /* see proxy_http:proxy_http_canon() */
+ r->filename = apr_pstrcat(r->pool, r->filename,
+ "?", r->args, NULL);
+ }
+
+ /* now make sure the request gets handled by the proxy handler */
+ if (PROXYREQ_NONE == r->proxyreq) {
+ r->proxyreq = PROXYREQ_REVERSE;
+ }
+ r->handler = "proxy-server";
+
+ rewritelog(r, 1, "go-ahead with proxy request %s [OK]",
+ r->filename);
+ return OK;
+ }
+ else if ((skip = is_absolute_uri(r->filename)) > 0) {
+ /* it was finally rewritten to a remote URL */
+
+ if (rulestatus != ACTION_NOESCAPE) {
+ rewritelog(r, 1, "escaping %s for redirect", r->filename);
+ r->filename = escape_absolute_uri(r->pool, r->filename, skip);
+ }
+
+ /* append the QUERY_STRING part */
+ if (r->args) {
+ r->filename = apr_pstrcat(r->pool, r->filename, "?",
+ (rulestatus == ACTION_NOESCAPE)
+ ? r->args
+ : ap_escape_uri(r->pool, r->args),
+ NULL);
+ }
+
+ /* determine HTTP redirect response code */
+ if (ap_is_HTTP_REDIRECT(r->status)) {
+ n = r->status;
+ r->status = HTTP_OK; /* make Apache kernel happy */
+ }
+ else {
+ n = HTTP_MOVED_TEMPORARILY;
+ }
+
+ /* now do the redirection */
+ apr_table_setn(r->headers_out, "Location", r->filename);
+ rewritelog(r, 1, "redirect to %s [REDIRECT/%d]", r->filename, n);
+ return n;
+ }
+ else if (strlen(r->filename) > 10 &&
+ strncmp(r->filename, "forbidden:", 10) == 0) {
+ /* This URLs is forced to be forbidden for the requester */
+ return HTTP_FORBIDDEN;
+ }
+ else if (strlen(r->filename) > 5 &&
+ strncmp(r->filename, "gone:", 5) == 0) {
+ /* This URLs is forced to be gone */
+ return HTTP_GONE;
+ }
+ else if (strlen(r->filename) > 12 &&
+ strncmp(r->filename, "passthrough:", 12) == 0) {
+ /*
+ * Hack because of underpowered API: passing the current
+ * rewritten filename through to other URL-to-filename handlers
+ * just as it were the requested URL. This is to enable
+ * post-processing by mod_alias, etc. which always act on
+ * r->uri! The difference here is: We do not try to
+ * add the document root
+ */
+ r->uri = apr_pstrdup(r->pool, r->filename+12);
+ return DECLINED;
+ }
+ else {
+ /* it was finally rewritten to a local path */
+
+ /* expand "/~user" prefix */
+#if APR_HAS_USER
+ r->filename = expand_tildepaths(r, r->filename);
+#endif
+ rewritelog(r, 2, "local path result: %s", r->filename);
+
+ /* the filename must be either an absolute local path or an
+ * absolute local URL.
+ */
+ if ( *r->filename != '/'
+ && !ap_os_is_path_absolute(r->pool, r->filename)) {
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* if there is no valid prefix, we have
+ * to emulate the translator from the core and
+ * prefix the filename with document_root
+ *
+ * NOTICE:
+ * We cannot leave out the prefix_stat because
+ * - when we always prefix with document_root
+ * then no absolute path can be created, e.g. via
+ * emulating a ScriptAlias directive, etc.
+ * - when we always NOT prefix with document_root
+ * then the files under document_root have to
+ * be references directly and document_root
+ * gets never used and will be a dummy parameter -
+ * this is also bad
+ *
+ * BUT:
+ * Under real Unix systems this is no problem,
+ * because we only do stat() on the first directory
+ * and this gets cached by the kernel for along time!
+ */
+ n = prefix_stat(r->filename, r->pool);
+ if (n == 0) {
+ if ((ccp = ap_document_root(r)) != NULL) {
+ l = apr_cpystrn(docroot, ccp, sizeof(docroot)) - docroot;
+
+ /* always NOT have a trailing slash */
+ if (docroot[l-1] == '/') {
+ docroot[l-1] = '\0';
+ }
+ if (r->server->path
+ && !strncmp(r->filename, r->server->path,
+ r->server->pathlen)) {
+ r->filename = apr_pstrcat(r->pool, docroot,
+ (r->filename +
+ r->server->pathlen), NULL);
+ }
+ else {
+ r->filename = apr_pstrcat(r->pool, docroot,
+ r->filename, NULL);
+ }
+ rewritelog(r, 2, "prefixed with document_root to %s",
+ r->filename);
+ }
+ }
+
+ rewritelog(r, 1, "go-ahead with %s [OK]", r->filename);
+ return OK;
+ }
+ }
+ else {
+ rewritelog(r, 1, "pass through %s", r->filename);
+ return DECLINED;
+ }
+}
+
+
+/*
+**
+** MIME-type hook
+**
+** [used to support the forced-MIME-type feature]
+**
+*/
+
+static int hook_mimetype(request_rec *r)
+{
+ const char *t;
+
+ /* now check if we have to force a MIME-type */
+ t = apr_table_get(r->notes, REWRITE_FORCED_MIMETYPE_NOTEVAR);
+ if (t == NULL) {
+ return DECLINED;
+ }
+ else {
+ rewritelog(r, 1, "force filename %s to have MIME-type '%s'",
+ r->filename, t);
+ ap_set_content_type(r, t);
+ return OK;
+ }
+}
+
+
+/*
+**
+** Fixup hook
+**
+** [used for the rewriting engine triggered by
+** the per-directory 'RewriteRule' directives]
+**
+*/
+
+static int hook_fixup(request_rec *r)
+{
+ rewrite_perdir_conf *dconf;
+ char *cp;
+ char *cp2;
+ const char *ccp;
+ char *prefix;
+ apr_size_t l;
+ int rulestatus;
+ int n;
+ char *ofilename;
+ int is_proxyreq;
+
+ dconf = (rewrite_perdir_conf *)ap_get_module_config(r->per_dir_config,
+ &rewrite_module);
+
+ /* if there is no per-dir config we return immediately */
+ if (dconf == NULL) {
+ return DECLINED;
+ }
+
+ /* we shouldn't do anything in subrequests */
+ if (r->main != NULL) {
+ return DECLINED;
+ }
+
+ /* if there are no real (i.e. no RewriteRule directives!)
+ per-dir config of us, we return also immediately */
+ if (dconf->directory == NULL) {
+ return DECLINED;
+ }
+
+ /*
+ * Proxy request?
+ */
+ is_proxyreq = ( r->proxyreq && r->filename
+ && !strncmp(r->filename, "proxy:", 6));
+
+ /*
+ * .htaccess file is called before really entering the directory, i.e.:
+ * URL: http://localhost/foo and .htaccess is located in foo directory
+ * Ignore such attempts, since they may lead to undefined behaviour.
+ */
+ if (is_proxyreq) {
+ l = strlen(dconf->directory) - 1;
+ if (r->filename && strlen(r->filename) == l &&
+ (dconf->directory)[l] == '/' &&
+ !strncmp(r->filename, dconf->directory, l)) {
+ return DECLINED;
+ }
+ }
+
+ /*
+ * only do something under runtime if the engine is really enabled,
+ * for this directory, else return immediately!
+ */
+ if (dconf->state == ENGINE_DISABLED) {
+ return DECLINED;
+ }
+
+ /*
+ * Do the Options check after engine check, so
+ * the user is able to explicitely turn RewriteEngine Off.
+ */
+ if (!(ap_allow_options(r) & (OPT_SYM_LINKS | OPT_SYM_OWNER))) {
+ /* FollowSymLinks is mandatory! */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Options FollowSymLinks or SymLinksIfOwnerMatch is off "
+ "which implies that RewriteRule directive is forbidden: "
+ "%s", r->filename);
+ return HTTP_FORBIDDEN;
+ }
+
+ /*
+ * remember the current filename before rewriting for later check
+ * to prevent deadlooping because of internal redirects
+ * on final URL/filename which can be equal to the inital one.
+ * also, we'll restore original r->filename if we decline this
+ * request
+ */
+ ofilename = r->filename;
+
+ if (r->filename == NULL) {
+ r->filename = apr_pstrdup(r->pool, r->uri);
+ rewritelog(r, 2, "init rewrite engine with requested uri %s",
+ r->filename);
+ }
+
+ /*
+ * now apply the rules ...
+ */
+ rulestatus = apply_rewrite_list(r, dconf->rewriterules, dconf->directory);
+ if (rulestatus) {
+ unsigned skip;
+
+ if (strlen(r->filename) > 6 &&
+ strncmp(r->filename, "proxy:", 6) == 0) {
+ /* it should go on as an internal proxy request */
+
+ /* make sure the QUERY_STRING and
+ * PATH_INFO parts get incorporated
+ * (r->path_info was already appended by the
+ * rewriting engine because of the per-dir context!)
+ */
+ if (r->args != NULL) {
+ r->filename = apr_pstrcat(r->pool, r->filename,
+ "?", r->args, NULL);
+ }
+
+ /* now make sure the request gets handled by the proxy handler */
+ if (PROXYREQ_NONE == r->proxyreq) {
+ r->proxyreq = PROXYREQ_REVERSE;
+ }
+ r->handler = "proxy-server";
+
+ rewritelog(r, 1, "[per-dir %s] go-ahead with proxy request "
+ "%s [OK]", dconf->directory, r->filename);
+ return OK;
+ }
+ else if ((skip = is_absolute_uri(r->filename)) > 0) {
+ /* it was finally rewritten to a remote URL */
+
+ /* because we are in a per-dir context
+ * first try to replace the directory with its base-URL
+ * if there is a base-URL available
+ */
+ if (dconf->baseurl != NULL) {
+ /* skip 'scheme://' */
+ cp = r->filename + skip;
+
+ if ((cp = ap_strchr(cp, '/')) != NULL && *(++cp)) {
+ rewritelog(r, 2,
+ "[per-dir %s] trying to replace "
+ "prefix %s with %s",
+ dconf->directory, dconf->directory,
+ dconf->baseurl);
+
+ /* I think, that hack needs an explanation:
+ * well, here is it:
+ * mod_rewrite was written for unix systems, were
+ * absolute file-system paths start with a slash.
+ * URL-paths _also_ start with slashes, so they
+ * can be easily compared with system paths.
+ *
+ * the following assumes, that the actual url-path
+ * may be prefixed by the current directory path and
+ * tries to replace the system path with the RewriteBase
+ * URL.
+ * That assumption is true if we use a RewriteRule like
+ *
+ * RewriteRule ^foo bar [R]
+ *
+ * (see apply_rewrite_rule function)
+ * However on systems that don't have a / as system
+ * root this will never match, so we skip the / after the
+ * hostname and compare/substitute only the stuff after it.
+ *
+ * (note that cp was already increased to the right value)
+ */
+ cp2 = subst_prefix_path(r, cp, (*dconf->directory == '/')
+ ? dconf->directory + 1
+ : dconf->directory,
+ dconf->baseurl + 1);
+ if (strcmp(cp2, cp) != 0) {
+ *cp = '\0';
+ r->filename = apr_pstrcat(r->pool, r->filename,
+ cp2, NULL);
+ }
+ }
+ }
+
+ /* now prepare the redirect... */
+ if (rulestatus != ACTION_NOESCAPE) {
+ rewritelog(r, 1, "[per-dir %s] escaping %s for redirect",
+ dconf->directory, r->filename);
+ r->filename = escape_absolute_uri(r->pool, r->filename, skip);
+ }
+
+ /* append the QUERY_STRING part */
+ if (r->args) {
+ r->filename = apr_pstrcat(r->pool, r->filename, "?",
+ (rulestatus == ACTION_NOESCAPE)
+ ? r->args
+ : ap_escape_uri(r->pool, r->args),
+ NULL);
+ }
+
+ /* determine HTTP redirect response code */
+ if (ap_is_HTTP_REDIRECT(r->status)) {
+ n = r->status;
+ r->status = HTTP_OK; /* make Apache kernel happy */
+ }
+ else {
+ n = HTTP_MOVED_TEMPORARILY;
+ }
+
+ /* now do the redirection */
+ apr_table_setn(r->headers_out, "Location", r->filename);
+ rewritelog(r, 1, "[per-dir %s] redirect to %s [REDIRECT/%d]",
+ dconf->directory, r->filename, n);
+ return n;
+ }
+ else if (strlen(r->filename) > 10 &&
+ strncmp(r->filename, "forbidden:", 10) == 0) {
+ /* This URL is forced to be forbidden for the requester */
+ return HTTP_FORBIDDEN;
+ }
+ else if (strlen(r->filename) > 5 &&
+ strncmp(r->filename, "gone:", 5) == 0) {
+ /* This URL is forced to be gone */
+ return HTTP_GONE;
+ }
+ else {
+ /* it was finally rewritten to a local path */
+
+ /* if someone used the PASSTHROUGH flag in per-dir
+ * context we just ignore it. It is only useful
+ * in per-server context
+ */
+ if (strlen(r->filename) > 12 &&
+ strncmp(r->filename, "passthrough:", 12) == 0) {
+ r->filename = apr_pstrdup(r->pool, r->filename+12);
+ }
+
+ /* the filename must be either an absolute local path or an
+ * absolute local URL.
+ */
+ if ( *r->filename != '/'
+ && !ap_os_is_path_absolute(r->pool, r->filename)) {
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* Check for deadlooping:
+ * At this point we KNOW that at least one rewriting
+ * rule was applied, but when the resulting URL is
+ * the same as the initial URL, we are not allowed to
+ * use the following internal redirection stuff because
+ * this would lead to a deadloop.
+ */
+ if (ofilename != NULL && strcmp(r->filename, ofilename) == 0) {
+ rewritelog(r, 1, "[per-dir %s] initial URL equal rewritten "
+ "URL: %s [IGNORING REWRITE]",
+ dconf->directory, r->filename);
+ return OK;
+ }
+
+ /* if there is a valid base-URL then substitute
+ * the per-dir prefix with this base-URL if the
+ * current filename still is inside this per-dir
+ * context. If not then treat the result as a
+ * plain URL
+ */
+ if (dconf->baseurl != NULL) {
+ rewritelog(r, 2,
+ "[per-dir %s] trying to replace prefix %s with %s",
+ dconf->directory, dconf->directory, dconf->baseurl);
+ r->filename = subst_prefix_path(r, r->filename,
+ dconf->directory,
+ dconf->baseurl);
+ }
+ else {
+ /* if no explicit base-URL exists we assume
+ * that the directory prefix is also a valid URL
+ * for this webserver and only try to remove the
+ * document_root if it is prefix
+ */
+ if ((ccp = ap_document_root(r)) != NULL) {
+ prefix = apr_pstrdup(r->pool, ccp);
+ /* always NOT have a trailing slash */
+ l = strlen(prefix);
+ if (prefix[l-1] == '/') {
+ prefix[l-1] = '\0';
+ l--;
+ }
+ if (strncmp(r->filename, prefix, l) == 0) {
+ rewritelog(r, 2,
+ "[per-dir %s] strip document_root "
+ "prefix: %s -> %s",
+ dconf->directory, r->filename,
+ r->filename+l);
+ r->filename = apr_pstrdup(r->pool, r->filename+l);
+ }
+ }
+ }
+
+ /* now initiate the internal redirect */
+ rewritelog(r, 1, "[per-dir %s] internal redirect with %s "
+ "[INTERNAL REDIRECT]", dconf->directory, r->filename);
+ r->filename = apr_pstrcat(r->pool, "redirect:", r->filename, NULL);
+ r->handler = "redirect-handler";
+ return OK;
+ }
+ }
+ else {
+ rewritelog(r, 1, "[per-dir %s] pass through %s",
+ dconf->directory, r->filename);
+ r->filename = ofilename;
+ return DECLINED;
+ }
+}
+
+
+/*
+**
+** Content-Handlers
+**
+** [used for redirect support]
+**
+*/
+
+static int handler_redirect(request_rec *r)
+{
+ if (strcmp(r->handler, "redirect-handler")) {
+ return DECLINED;
+ }
+
+ /* just make sure that we are really meant! */
+ if (strncmp(r->filename, "redirect:", 9) != 0) {
+ return DECLINED;
+ }
+
+ if (is_redirect_limit_exceeded(r)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "mod_rewrite: maximum number of internal redirects "
+ "reached. Assuming configuration error. Use "
+ "'RewriteOptions MaxRedirects' to increase the limit "
+ "if neccessary.");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* now do the internal redirect */
+ ap_internal_redirect(apr_pstrcat(r->pool, r->filename+9,
+ r->args ? "?" : NULL, r->args, NULL), r);
+
+ /* and return gracefully */
+ return OK;
+}
+
+/*
+ * check whether redirect limit is reached
+ */
+static int is_redirect_limit_exceeded(request_rec *r)
+{
+ request_rec *top = r;
+ rewrite_request_conf *reqc;
+ rewrite_perdir_conf *dconf;
+
+ /* we store it in the top request */
+ while (top->main) {
+ top = top->main;
+ }
+ while (top->prev) {
+ top = top->prev;
+ }
+
+ /* fetch our config */
+ reqc = (rewrite_request_conf *) ap_get_module_config(top->request_config,
+ &rewrite_module);
+
+ /* no config there? create one. */
+ if (!reqc) {
+ rewrite_server_conf *sconf;
+
+ reqc = apr_palloc(top->pool, sizeof(rewrite_request_conf));
+ sconf = ap_get_module_config(r->server->module_config, &rewrite_module);
+
+ reqc->redirects = 0;
+ reqc->redirect_limit = sconf->redirect_limit
+ ? sconf->redirect_limit
+ : REWRITE_REDIRECT_LIMIT;
+
+ /* associate it with this request */
+ ap_set_module_config(top->request_config, &rewrite_module, reqc);
+ }
+
+ /* allow to change the limit during redirects. */
+ dconf = (rewrite_perdir_conf *)ap_get_module_config(r->per_dir_config,
+ &rewrite_module);
+
+ /* 0 == unset; take server conf ... */
+ if (dconf->redirect_limit) {
+ reqc->redirect_limit = dconf->redirect_limit;
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "mod_rewrite's internal redirect status: %d/%d.",
+ reqc->redirects, reqc->redirect_limit);
+
+ /* and now give the caller a hint */
+ return (reqc->redirects++ >= reqc->redirect_limit);
+}
+
+
+/*
+** +-------------------------------------------------------+
+** | |
+** | the rewriting engine
+** | |
+** +-------------------------------------------------------+
+*/
+
+/*
+ * Apply a complete rule set,
+ * i.e. a list of rewrite rules
+ */
+static int apply_rewrite_list(request_rec *r, apr_array_header_t *rewriterules,
+ char *perdir)
+{
+ rewriterule_entry *entries;
+ rewriterule_entry *p;
+ int i;
+ int changed;
+ int rc;
+ int s;
+
+ /*
+ * Iterate over all existing rules
+ */
+ entries = (rewriterule_entry *)rewriterules->elts;
+ changed = 0;
+ loop:
+ for (i = 0; i < rewriterules->nelts; i++) {
+ p = &entries[i];
+
+ /*
+ * Ignore this rule on subrequests if we are explicitly
+ * asked to do so or this is a proxy-throughput or a
+ * forced redirect rule.
+ */
+ if (r->main != NULL &&
+ (p->flags & RULEFLAG_IGNOREONSUBREQ ||
+ p->flags & RULEFLAG_PROXY ||
+ p->flags & RULEFLAG_FORCEREDIRECT )) {
+ continue;
+ }
+
+ /*
+ * Apply the current rule.
+ */
+ rc = apply_rewrite_rule(r, p, perdir);
+ if (rc) {
+ /*
+ * Indicate a change if this was not a match-only rule.
+ */
+ if (rc != 2) {
+ changed = ((p->flags & RULEFLAG_NOESCAPE)
+ ? ACTION_NOESCAPE : ACTION_NORMAL);
+ }
+
+ /*
+ * Pass-Through Feature (`RewriteRule .. .. [PT]'):
+ * Because the Apache 1.x API is very limited we
+ * need this hack to pass the rewritten URL to other
+ * modules like mod_alias, mod_userdir, etc.
+ */
+ if (p->flags & RULEFLAG_PASSTHROUGH) {
+ rewritelog(r, 2, "forcing '%s' to get passed through "
+ "to next API URI-to-filename handler", r->filename);
+ r->filename = apr_pstrcat(r->pool, "passthrough:",
+ r->filename, NULL);
+ changed = ACTION_NORMAL;
+ break;
+ }
+
+ /*
+ * Rule has the "forbidden" flag set which means that
+ * we stop processing and indicate this to the caller.
+ */
+ if (p->flags & RULEFLAG_FORBIDDEN) {
+ rewritelog(r, 2, "forcing '%s' to be forbidden", r->filename);
+ r->filename = apr_pstrcat(r->pool, "forbidden:",
+ r->filename, NULL);
+ changed = ACTION_NORMAL;
+ break;
+ }
+
+ /*
+ * Rule has the "gone" flag set which means that
+ * we stop processing and indicate this to the caller.
+ */
+ if (p->flags & RULEFLAG_GONE) {
+ rewritelog(r, 2, "forcing '%s' to be gone", r->filename);
+ r->filename = apr_pstrcat(r->pool, "gone:", r->filename, NULL);
+ changed = ACTION_NORMAL;
+ break;
+ }
+
+ /*
+ * Stop processing also on proxy pass-through and
+ * last-rule and new-round flags.
+ */
+ if (p->flags & RULEFLAG_PROXY) {
+ break;
+ }
+ if (p->flags & RULEFLAG_LASTRULE) {
+ break;
+ }
+
+ /*
+ * On "new-round" flag we just start from the top of
+ * the rewriting ruleset again.
+ */
+ if (p->flags & RULEFLAG_NEWROUND) {
+ goto loop;
+ }
+
+ /*
+ * If we are forced to skip N next rules, do it now.
+ */
+ if (p->skip > 0) {
+ s = p->skip;
+ while ( i < rewriterules->nelts
+ && s > 0) {
+ i++;
+ p = &entries[i];
+ s--;
+ }
+ }
+ }
+ else {
+ /*
+ * If current rule is chained with next rule(s),
+ * skip all this next rule(s)
+ */
+ while ( i < rewriterules->nelts
+ && p->flags & RULEFLAG_CHAIN) {
+ i++;
+ p = &entries[i];
+ }
+ }
+ }
+ return changed;
+}
+
+/*
+ * Apply a single(!) rewrite rule
+ */
+static int apply_rewrite_rule(request_rec *r, rewriterule_entry *p,
+ char *perdir)
+{
+ char *uri;
+ char *output;
+ const char *vary;
+ char newuri[MAX_STRING_LEN];
+ regex_t *regexp;
+ regmatch_t regmatch[AP_MAX_REG_MATCH];
+ backrefinfo *briRR = NULL;
+ backrefinfo *briRC = NULL;
+ int failed;
+ apr_array_header_t *rewriteconds;
+ rewritecond_entry *conds;
+ rewritecond_entry *c;
+ int i;
+ int rc;
+ int is_proxyreq = 0;
+
+ /*
+ * Initialisation
+ */
+ uri = r->filename;
+ regexp = p->regexp;
+ output = p->output;
+
+ /*
+ * Add (perhaps splitted away) PATH_INFO postfix to URL to
+ * make sure we really match against the complete URL.
+ */
+ if (perdir != NULL && r->path_info != NULL && r->path_info[0] != '\0') {
+ rewritelog(r, 3, "[per-dir %s] add path info postfix: %s -> %s%s",
+ perdir, uri, uri, r->path_info);
+ uri = apr_pstrcat(r->pool, uri, r->path_info, NULL);
+ }
+
+ /*
+ * On per-directory context (.htaccess) strip the location
+ * prefix from the URL to make sure patterns apply only to
+ * the local part. Additionally indicate this special
+ * threatment in the logfile.
+ */
+ if (perdir) {
+ /*
+ * Proxy request?
+ */
+ is_proxyreq = ( r->proxyreq && r->filename
+ && !strncmp(r->filename, "proxy:", 6));
+
+ if ( !is_proxyreq && strlen(uri) >= strlen(perdir)
+ && strncmp(uri, perdir, strlen(perdir)) == 0) {
+ rewritelog(r, 3, "[per-dir %s] strip per-dir prefix: %s -> %s",
+ perdir, uri, uri+strlen(perdir));
+ uri = uri+strlen(perdir);
+ }
+ }
+
+ /*
+ * Try to match the URI against the RewriteRule pattern
+ * and exit immeddiately if it didn't apply.
+ */
+ if (perdir == NULL) {
+ rewritelog(r, 3, "applying pattern '%s' to uri '%s'",
+ p->pattern, uri);
+ }
+ else {
+ rewritelog(r, 3, "[per-dir %s] applying pattern '%s' to uri '%s'",
+ perdir, p->pattern, uri);
+ }
+ rc = (ap_regexec(regexp, uri, AP_MAX_REG_MATCH, regmatch, 0) == 0);
+ if (! (( rc && !(p->flags & RULEFLAG_NOTMATCH)) ||
+ (!rc && (p->flags & RULEFLAG_NOTMATCH)) ) ) {
+ return 0;
+ }
+
+ /*
+ * Else create the RewriteRule `regsubinfo' structure which
+ * holds the substitution information.
+ */
+ briRR = (backrefinfo *)apr_palloc(r->pool, sizeof(backrefinfo));
+ if (!rc && (p->flags & RULEFLAG_NOTMATCH)) {
+ /* empty info on negative patterns */
+ briRR->source = "";
+ briRR->nsub = 0;
+ }
+ else {
+ briRR->source = apr_pstrdup(r->pool, uri);
+ briRR->nsub = regexp->re_nsub;
+ memcpy((void *)(briRR->regmatch), (void *)(regmatch),
+ sizeof(regmatch));
+ }
+
+ /*
+ * Initiallally create the RewriteCond backrefinfo with
+ * empty backrefinfo, i.e. not subst parts
+ * (this one is adjusted inside apply_rewrite_cond() later!!)
+ */
+ briRC = (backrefinfo *)apr_pcalloc(r->pool, sizeof(backrefinfo));
+ briRC->source = "";
+ briRC->nsub = 0;
+
+ /*
+ * Ok, we already know the pattern has matched, but we now
+ * additionally have to check for all existing preconditions
+ * (RewriteCond) which have to be also true. We do this at
+ * this very late stage to avoid unnessesary checks which
+ * would slow down the rewriting engine!!
+ */
+ rewriteconds = p->rewriteconds;
+ conds = (rewritecond_entry *)rewriteconds->elts;
+ failed = 0;
+ for (i = 0; i < rewriteconds->nelts; i++) {
+ c = &conds[i];
+ rc = apply_rewrite_cond(r, c, perdir, briRR, briRC);
+ if (c->flags & CONDFLAG_ORNEXT) {
+ /*
+ * The "OR" case
+ */
+ if (rc == 0) {
+ /* One condition is false, but another can be
+ * still true, so we have to continue...
+ */
+ apr_table_unset(r->notes, VARY_KEY_THIS);
+ continue;
+ }
+ else {
+ /* One true condition is enough in "or" case, so
+ * skip the other conditions which are "ornext"
+ * chained
+ */
+ while ( i < rewriteconds->nelts
+ && c->flags & CONDFLAG_ORNEXT) {
+ i++;
+ c = &conds[i];
+ }
+ continue;
+ }
+ }
+ else {
+ /*
+ * The "AND" case, i.e. no "or" flag,
+ * so a single failure means total failure.
+ */
+ if (rc == 0) {
+ failed = 1;
+ break;
+ }
+ }
+ vary = apr_table_get(r->notes, VARY_KEY_THIS);
+ if (vary != NULL) {
+ apr_table_merge(r->notes, VARY_KEY, vary);
+ apr_table_unset(r->notes, VARY_KEY_THIS);
+ }
+ }
+ /* if any condition fails the complete rule fails */
+ if (failed) {
+ apr_table_unset(r->notes, VARY_KEY);
+ apr_table_unset(r->notes, VARY_KEY_THIS);
+ return 0;
+ }
+
+ /*
+ * Regardless of what we do next, we've found a match. Check to see
+ * if any of the request header fields were involved, and add them
+ * to the Vary field of the response.
+ */
+ if ((vary = apr_table_get(r->notes, VARY_KEY)) != NULL) {
+ apr_table_merge(r->headers_out, "Vary", vary);
+ apr_table_unset(r->notes, VARY_KEY);
+ }
+
+ /*
+ * If this is a pure matching rule (`RewriteRule <pat> -')
+ * we stop processing and return immediately. The only thing
+ * we have not to forget are the environment variables and
+ * cookies:
+ * (`RewriteRule <pat> - [E=...,CO=...]')
+ */
+ if (strcmp(output, "-") == 0) {
+ do_expand_env(r, p->env, briRR, briRC);
+ do_expand_cookie(r, p->cookie, briRR, briRC);
+ if (p->forced_mimetype != NULL) {
+ if (perdir == NULL) {
+ /* In the per-server context we can force the MIME-type
+ * the correct way by notifying our MIME-type hook handler
+ * to do the job when the MIME-type API stage is reached.
+ */
+ rewritelog(r, 2, "remember %s to have MIME-type '%s'",
+ r->filename, p->forced_mimetype);
+ apr_table_setn(r->notes, REWRITE_FORCED_MIMETYPE_NOTEVAR,
+ p->forced_mimetype);
+ }
+ else {
+ /* In per-directory context we operate in the Fixup API hook
+ * which is after the MIME-type hook, so our MIME-type handler
+ * has no chance to set r->content_type. And because we are
+ * in the situation where no substitution takes place no
+ * sub-request will happen (which could solve the
+ * restriction). As a workaround we do it ourself now
+ * immediately although this is not strictly API-conforming.
+ * But it's the only chance we have...
+ */
+ rewritelog(r, 1, "[per-dir %s] force %s to have MIME-type "
+ "'%s'", perdir, r->filename, p->forced_mimetype);
+ ap_set_content_type(r, p->forced_mimetype);
+ }
+ }
+ return 2;
+ }
+
+ /*
+ * Ok, now we finally know all patterns have matched and
+ * that there is something to replace, so we create the
+ * substitution URL string in `newuri'.
+ */
+ do_expand(r, output, newuri, sizeof(newuri), briRR, briRC);
+ if (perdir == NULL) {
+ rewritelog(r, 2, "rewrite %s -> %s", uri, newuri);
+ }
+ else {
+ rewritelog(r, 2, "[per-dir %s] rewrite %s -> %s", perdir, uri, newuri);
+ }
+
+ /*
+ * Additionally do expansion for the environment variable
+ * strings (`RewriteRule .. .. [E=<string>]').
+ */
+ do_expand_env(r, p->env, briRR, briRC);
+
+ /*
+ * Also set cookies for any cookie strings
+ * (`RewriteRule .. .. [CO=<string>]').
+ */
+ do_expand_cookie(r, p->cookie, briRR, briRC);
+
+ /*
+ * Now replace API's knowledge of the current URI:
+ * Replace r->filename with the new URI string and split out
+ * an on-the-fly generated QUERY_STRING part into r->args
+ */
+ r->filename = apr_pstrdup(r->pool, newuri);
+ splitout_queryargs(r, p->flags & RULEFLAG_QSAPPEND);
+
+ /*
+ * Add the previously stripped per-directory location
+ * prefix if the new URI is not a new one for this
+ * location, i.e. if it's not an absolute URL (!) path nor
+ * a fully qualified URL scheme.
+ */
+ if ( perdir && !is_proxyreq && *r->filename != '/'
+ && !is_absolute_uri(r->filename)) {
+ rewritelog(r, 3, "[per-dir %s] add per-dir prefix: %s -> %s%s",
+ perdir, r->filename, perdir, r->filename);
+ r->filename = apr_pstrcat(r->pool, perdir, r->filename, NULL);
+ }
+
+ /*
+ * If this rule is forced for proxy throughput
+ * (`RewriteRule ... ... [P]') then emulate mod_proxy's
+ * URL-to-filename handler to be sure mod_proxy is triggered
+ * for this URL later in the Apache API. But make sure it is
+ * a fully-qualified URL. (If not it is qualified with
+ * ourself).
+ */
+ if (p->flags & RULEFLAG_PROXY) {
+ fully_qualify_uri(r);
+ if (perdir == NULL) {
+ rewritelog(r, 2, "forcing proxy-throughput with %s", r->filename);
+ }
+ else {
+ rewritelog(r, 2, "[per-dir %s] forcing proxy-throughput with %s",
+ perdir, r->filename);
+ }
+ r->filename = apr_pstrcat(r->pool, "proxy:", r->filename, NULL);
+ return 1;
+ }
+
+ /*
+ * If this rule is explicitly forced for HTTP redirection
+ * (`RewriteRule .. .. [R]') then force an external HTTP
+ * redirect. But make sure it is a fully-qualified URL. (If
+ * not it is qualified with ourself).
+ */
+ if (p->flags & RULEFLAG_FORCEREDIRECT) {
+ fully_qualify_uri(r);
+ if (perdir == NULL) {
+ rewritelog(r, 2,
+ "explicitly forcing redirect with %s", r->filename);
+ }
+ else {
+ rewritelog(r, 2,
+ "[per-dir %s] explicitly forcing redirect with %s",
+ perdir, r->filename);
+ }
+ r->status = p->forced_responsecode;
+ return 1;
+ }
+
+ /*
+ * Special Rewriting Feature: Self-Reduction
+ * We reduce the URL by stripping a possible
+ * http[s]://<ourhost>[:<port>] prefix, i.e. a prefix which
+ * corresponds to ourself. This is to simplify rewrite maps
+ * and to avoid recursion, etc. When this prefix is not a
+ * coincidence then the user has to use [R] explicitly (see
+ * above).
+ */
+ reduce_uri(r);
+
+ /*
+ * If this rule is still implicitly forced for HTTP
+ * redirection (`RewriteRule .. <scheme>://...') then
+ * directly force an external HTTP redirect.
+ */
+ if (is_absolute_uri(r->filename)) {
+ if (perdir == NULL) {
+ rewritelog(r, 2,
+ "implicitly forcing redirect (rc=%d) with %s",
+ p->forced_responsecode, r->filename);
+ }
+ else {
+ rewritelog(r, 2, "[per-dir %s] implicitly forcing redirect "
+ "(rc=%d) with %s", perdir, p->forced_responsecode,
+ r->filename);
+ }
+ r->status = p->forced_responsecode;
+ return 1;
+ }
+
+ /*
+ * Finally we had to remember if a MIME-type should be
+ * forced for this URL (`RewriteRule .. .. [T=<type>]')
+ * Later in the API processing phase this is forced by our
+ * MIME API-hook function. This time it's no problem even for
+ * the per-directory context (where the MIME-type hook was
+ * already processed) because a sub-request happens ;-)
+ */
+ if (p->forced_mimetype != NULL) {
+ apr_table_setn(r->notes, REWRITE_FORCED_MIMETYPE_NOTEVAR,
+ p->forced_mimetype);
+ if (perdir == NULL) {
+ rewritelog(r, 2, "remember %s to have MIME-type '%s'",
+ r->filename, p->forced_mimetype);
+ }
+ else {
+ rewritelog(r, 2,
+ "[per-dir %s] remember %s to have MIME-type '%s'",
+ perdir, r->filename, p->forced_mimetype);
+ }
+ }
+
+ /*
+ * Puuhhhhhhhh... WHAT COMPLICATED STUFF ;_)
+ * But now we're done for this particular rule.
+ */
+ return 1;
+}
+
+static int apply_rewrite_cond(request_rec *r, rewritecond_entry *p,
+ char *perdir, backrefinfo *briRR,
+ backrefinfo *briRC)
+{
+ char input[MAX_STRING_LEN];
+ apr_finfo_t sb;
+ request_rec *rsub;
+ regmatch_t regmatch[AP_MAX_REG_MATCH];
+ int rc;
+
+ /*
+ * Construct the string we match against
+ */
+
+ do_expand(r, p->input, input, sizeof(input), briRR, briRC);
+
+ /*
+ * Apply the patterns
+ */
+
+ rc = 0;
+ if (strcmp(p->pattern, "-f") == 0) {
+ if (apr_stat(&sb, input, APR_FINFO_MIN, r->pool) == APR_SUCCESS) {
+ if (sb.filetype == APR_REG) {
+ rc = 1;
+ }
+ }
+ }
+ else if (strcmp(p->pattern, "-s") == 0) {
+ if (apr_stat(&sb, input, APR_FINFO_MIN, r->pool) == APR_SUCCESS) {
+ if ((sb.filetype == APR_REG) && sb.size > 0) {
+ rc = 1;
+ }
+ }
+ }
+ else if (strcmp(p->pattern, "-l") == 0) {
+#if !defined(OS2)
+ if (apr_lstat(&sb, input, APR_FINFO_MIN, r->pool) == APR_SUCCESS) {
+ if (sb.filetype == APR_LNK) {
+ rc = 1;
+ }
+ }
+#endif
+ }
+ else if (strcmp(p->pattern, "-d") == 0) {
+ if (apr_stat(&sb, input, APR_FINFO_MIN, r->pool) == APR_SUCCESS) {
+ if (sb.filetype == APR_DIR) {
+ rc = 1;
+ }
+ }
+ }
+ else if (strcmp(p->pattern, "-U") == 0) {
+ /* avoid infinite subrequest recursion */
+ if (strlen(input) > 0 && subreq_ok(r)) {
+
+ /* run a URI-based subrequest */
+ rsub = ap_sub_req_lookup_uri(input, r, NULL);
+
+ /* URI exists for any result up to 3xx, redirects allowed */
+ if (rsub->status < 400)
+ rc = 1;
+
+ /* log it */
+ rewritelog(r, 5, "RewriteCond URI (-U) check: "
+ "path=%s -> status=%d", input, rsub->status);
+
+ /* cleanup by destroying the subrequest */
+ ap_destroy_sub_req(rsub);
+ }
+ }
+ else if (strcmp(p->pattern, "-F") == 0) {
+ /* avoid infinite subrequest recursion */
+ if (strlen(input) > 0 && subreq_ok(r)) {
+
+ /* process a file-based subrequest:
+ * this differs from -U in that no path translation is done.
+ */
+ rsub = ap_sub_req_lookup_file(input, r, NULL);
+
+ /* file exists for any result up to 2xx, no redirects */
+ if (rsub->status < 300 &&
+ /* double-check that file exists since default result is 200 */
+ apr_stat(&sb, rsub->filename, APR_FINFO_MIN,
+ r->pool) == APR_SUCCESS) {
+ rc = 1;
+ }
+
+ /* log it */
+ rewritelog(r, 5, "RewriteCond file (-F) check: path=%s "
+ "-> file=%s status=%d", input, rsub->filename,
+ rsub->status);
+
+ /* cleanup by destroying the subrequest */
+ ap_destroy_sub_req(rsub);
+ }
+ }
+ else if (strlen(p->pattern) > 1 && *(p->pattern) == '>') {
+ rc = (compare_lexicography(input, p->pattern+1) == 1 ? 1 : 0);
+ }
+ else if (strlen(p->pattern) > 1 && *(p->pattern) == '<') {
+ rc = (compare_lexicography(input, p->pattern+1) == -1 ? 1 : 0);
+ }
+ else if (strlen(p->pattern) > 1 && *(p->pattern) == '=') {
+ if (strcmp(p->pattern+1, "\"\"") == 0) {
+ rc = (*input == '\0');
+ }
+ else {
+ rc = (strcmp(input, p->pattern+1) == 0 ? 1 : 0);
+ }
+ }
+ else {
+ /* it is really a regexp pattern, so apply it */
+ rc = (ap_regexec(p->regexp, input, AP_MAX_REG_MATCH, regmatch,0) == 0);
+
+ /* if it isn't a negated pattern and really matched
+ we update the passed-through regex subst info structure */
+ if (rc && !(p->flags & CONDFLAG_NOTMATCH)) {
+ briRC->source = apr_pstrdup(r->pool, input);
+ briRC->nsub = p->regexp->re_nsub;
+ memcpy((void *)(briRC->regmatch), (void *)(regmatch),
+ sizeof(regmatch));
+ }
+ }
+
+ /* if this is a non-matching regexp, just negate the result */
+ if (p->flags & CONDFLAG_NOTMATCH) {
+ rc = !rc;
+ }
+
+ rewritelog(r, 4, "RewriteCond: input='%s' pattern='%s%s' => %s",
+ input, (p->flags & CONDFLAG_NOTMATCH ? "!" : ""),
+ p->pattern, rc ? "matched" : "not-matched");
+
+ /* end just return the result */
+ return rc;
+}
+
+
+/*
+** +-------------------------------------------------------+
+** | |
+** | URL transformation functions
+** | |
+** +-------------------------------------------------------+
+*/
+
+
+/*
+**
+** perform all the expansions on the input string
+** leaving the result in the supplied buffer
+**
+*/
+
+static void do_expand(request_rec *r, char *input, char *buffer, int nbuf,
+ backrefinfo *briRR, backrefinfo *briRC)
+{
+ char *inp, *outp;
+ apr_size_t span, space;
+
+ /*
+ * for security reasons this expansion must be performed in a
+ * single pass, otherwise an attacker can arrange for the result
+ * of an earlier expansion to include expansion specifiers that
+ * are interpreted by a later expansion, producing results that
+ * were not intended by the administrator.
+ */
+
+ inp = input;
+ outp = buffer;
+ space = nbuf - 1; /* room for '\0' */
+
+ for (;;) {
+ span = strcspn(inp, "\\$%");
+ if (span > space) {
+ span = space;
+ }
+ memcpy(outp, inp, span);
+ inp += span;
+ outp += span;
+ space -= span;
+ if (space == 0 || *inp == '\0') {
+ break;
+ }
+ /* now we have a '\', '$', or '%' */
+ if (inp[0] == '\\') {
+ if (inp[1] != '\0') {
+ inp++;
+ goto skip;
+ }
+ }
+ else if (inp[1] == '{') {
+ char *endp;
+ endp = find_closing_bracket(inp+2, '{', '}');
+ if (endp == NULL) {
+ goto skip;
+ }
+ /*
+ * These lookups may be recursive in a very convoluted
+ * fashion -- see the LA-U and LA-F variable expansion
+ * prefixes -- so we copy lookup keys to a separate buffer
+ * rather than adding zero bytes in order to use them in
+ * place.
+ */
+ if (inp[0] == '$') {
+ /* ${...} map lookup expansion */
+ /*
+ * To make rewrite maps useful the lookup key and
+ * default values must be expanded, so we make
+ * recursive calls to do the work. For security
+ * reasons we must never expand a string that includes
+ * verbatim data from the network. The recursion here
+ * isn't a problem because the result of expansion is
+ * only passed to lookup_map() so it cannot be
+ * re-expanded, only re-looked-up. Another way of
+ * looking at it is that the recursion is entirely
+ * driven by the syntax of the nested curly brackets.
+ */
+ char *map, *key, *dflt, *result;
+ char xkey[MAX_STRING_LEN];
+ char xdflt[MAX_STRING_LEN];
+ key = find_char_in_brackets(inp+2, ':', '{', '}');
+ if (key == NULL) {
+ goto skip;
+ }
+ map = apr_pstrndup(r->pool, inp+2, key-inp-2);
+ dflt = find_char_in_brackets(key+1, '|', '{', '}');
+ if (dflt == NULL) {
+ key = apr_pstrndup(r->pool, key+1, endp-key-1);
+ dflt = "";
+ }
+ else {
+ key = apr_pstrndup(r->pool, key+1, dflt-key-1);
+ dflt = apr_pstrndup(r->pool, dflt+1, endp-dflt-1);
+ }
+ do_expand(r, key, xkey, sizeof(xkey), briRR, briRC);
+ result = lookup_map(r, map, xkey);
+ if (result) {
+ span = apr_cpystrn(outp, result, space) - outp;
+ }
+ else {
+ do_expand(r, dflt, xdflt, sizeof(xdflt), briRR, briRC);
+ span = apr_cpystrn(outp, xdflt, space) - outp;
+ }
+ }
+ else if (inp[0] == '%') {
+ /* %{...} variable lookup expansion */
+ char *var;
+ var = apr_pstrndup(r->pool, inp+2, endp-inp-2);
+ span = apr_cpystrn(outp, lookup_variable(r, var), space) - outp;
+ }
+ else {
+ span = 0;
+ }
+ inp = endp+1;
+ outp += span;
+ space -= span;
+ continue;
+ }
+ else if (apr_isdigit(inp[1])) {
+ int n = inp[1] - '0';
+ backrefinfo *bri = NULL;
+ if (inp[0] == '$') {
+ /* $N RewriteRule regexp backref expansion */
+ bri = briRR;
+ }
+ else if (inp[0] == '%') {
+ /* %N RewriteCond regexp backref expansion */
+ bri = briRC;
+ }
+ /* see ap_pregsub() in src/main/util.c */
+ if (bri && n < AP_MAX_REG_MATCH
+ && bri->regmatch[n].rm_eo > bri->regmatch[n].rm_so) {
+ span = bri->regmatch[n].rm_eo - bri->regmatch[n].rm_so;
+ if (span > space) {
+ span = space;
+ }
+ memcpy(outp, bri->source + bri->regmatch[n].rm_so, span);
+ outp += span;
+ space -= span;
+ }
+ inp += 2;
+ continue;
+ }
+ skip:
+ *outp++ = *inp++;
+ space--;
+ }
+ *outp++ = '\0';
+}
+
+
+/*
+**
+** perform all the expansions on the environment variables
+**
+*/
+
+static void do_expand_env(request_rec *r, char *env[],
+ backrefinfo *briRR, backrefinfo *briRC)
+{
+ int i;
+ char buf[MAX_STRING_LEN];
+
+ for (i = 0; env[i] != NULL; i++) {
+ do_expand(r, env[i], buf, sizeof(buf), briRR, briRC);
+ add_env_variable(r, buf);
+ }
+}
+
+static void do_expand_cookie( request_rec *r, char *cookie[],
+ backrefinfo *briRR, backrefinfo *briRC)
+{
+ int i;
+ char buf[MAX_STRING_LEN];
+
+ for (i = 0; cookie[i] != NULL; i++) {
+ do_expand(r, cookie[i], buf, sizeof(buf), briRR, briRC);
+ add_cookie(r, buf);
+ }
+}
+
+
+/*
+**
+** split out a QUERY_STRING part from
+** the current URI string
+**
+*/
+
+static void splitout_queryargs(request_rec *r, int qsappend)
+{
+ char *q;
+ char *olduri;
+
+ /* don't touch, unless it's an http or mailto URL.
+ * See RFC 1738 and RFC 2368.
+ */
+ if ( is_absolute_uri(r->filename)
+ && strncasecmp(r->filename, "http", 4)
+ && strncasecmp(r->filename, "mailto", 6)) {
+ r->args = NULL; /* forget the query that's still flying around */
+ return;
+ }
+
+ q = strchr(r->filename, '?');
+ if (q != NULL) {
+ olduri = apr_pstrdup(r->pool, r->filename);
+ *q++ = '\0';
+ if (qsappend) {
+ r->args = apr_pstrcat(r->pool, q, "&", r->args, NULL);
+ }
+ else {
+ r->args = apr_pstrdup(r->pool, q);
+ }
+ if (strlen(r->args) == 0) {
+ r->args = NULL;
+ rewritelog(r, 3, "split uri=%s -> uri=%s, args=<none>", olduri,
+ r->filename);
+ }
+ else {
+ if (r->args[strlen(r->args)-1] == '&') {
+ r->args[strlen(r->args)-1] = '\0';
+ }
+ rewritelog(r, 3, "split uri=%s -> uri=%s, args=%s", olduri,
+ r->filename, r->args);
+ }
+ }
+
+ return;
+}
+
+
+/*
+**
+** strip 'http[s]://ourhost/' from URI
+**
+*/
+
+static void reduce_uri(request_rec *r)
+{
+ char *cp;
+ unsigned short port;
+ char *portp;
+ char *hostp;
+ char *url;
+ char c;
+ char host[LONG_STRING_LEN];
+ char buf[MAX_STRING_LEN];
+ char *olduri;
+ apr_size_t l;
+
+ cp = (char *)ap_http_method(r);
+ l = strlen(cp);
+ if ( strlen(r->filename) > l+3
+ && strncasecmp(r->filename, cp, l) == 0
+ && r->filename[l] == ':'
+ && r->filename[l+1] == '/'
+ && r->filename[l+2] == '/' ) {
+ /* there was really a rewrite to a remote path */
+
+ olduri = apr_pstrdup(r->pool, r->filename); /* save for logging */
+
+ /* cut the hostname and port out of the URI */
+ apr_cpystrn(buf, r->filename+(l+3), sizeof(buf));
+ hostp = buf;
+ for (cp = hostp; *cp != '\0' && *cp != '/' && *cp != ':'; cp++)
+ ;
+ if (*cp == ':') {
+ /* set host */
+ *cp++ = '\0';
+ apr_cpystrn(host, hostp, sizeof(host));
+ /* set port */
+ portp = cp;
+ for (; *cp != '\0' && *cp != '/'; cp++)
+ ;
+ c = *cp;
+ *cp = '\0';
+ port = atoi(portp);
+ *cp = c;
+ /* set remaining url */
+ url = cp;
+ }
+ else if (*cp == '/') {
+ /* set host */
+ *cp = '\0';
+ apr_cpystrn(host, hostp, sizeof(host));
+ *cp = '/';
+ /* set port */
+ port = ap_default_port(r);
+ /* set remaining url */
+ url = cp;
+ }
+ else {
+ /* set host */
+ apr_cpystrn(host, hostp, sizeof(host));
+ /* set port */
+ port = ap_default_port(r);
+ /* set remaining url */
+ url = "/";
+ }
+
+ /* now check whether we could reduce it to a local path... */
+ if (ap_matches_request_vhost(r, host, port)) {
+ /* this is our host, so only the URL remains */
+ r->filename = apr_pstrdup(r->pool, url);
+ rewritelog(r, 3, "reduce %s -> %s", olduri, r->filename);
+ }
+ }
+ return;
+}
+
+
+/*
+**
+** add 'http[s]://ourhost[:ourport]/' to URI
+** if URI is still not fully qualified
+**
+*/
+
+static void fully_qualify_uri(request_rec *r)
+{
+ char buf[32];
+ const char *thisserver;
+ char *thisport;
+ int port;
+
+ if (!is_absolute_uri(r->filename)) {
+
+ thisserver = ap_get_server_name(r);
+ port = ap_get_server_port(r);
+ if (ap_is_default_port(port,r)) {
+ thisport = "";
+ }
+ else {
+ apr_snprintf(buf, sizeof(buf), ":%u", port);
+ thisport = buf;
+ }
+
+ if (r->filename[0] == '/') {
+ r->filename = apr_psprintf(r->pool, "%s://%s%s%s",
+ ap_http_method(r), thisserver,
+ thisport, r->filename);
+ }
+ else {
+ r->filename = apr_psprintf(r->pool, "%s://%s%s/%s",
+ ap_http_method(r), thisserver,
+ thisport, r->filename);
+ }
+ }
+ return;
+}
+
+
+/* return number of chars of the scheme (incl. '://')
+ * if the URI is absolute (includes a scheme etc.)
+ * otherwise 0.
+ *
+ * NOTE: If you add new schemes here, please have a
+ * look at escape_absolute_uri and splitout_queryargs.
+ * Not every scheme takes query strings and some schemes
+ * may be handled in a special way.
+ *
+ * XXX: we should consider a scheme registry, perhaps with
+ * appropriate escape callbacks to allow other modules
+ * to extend mod_rewrite at runtime.
+ */
+static unsigned is_absolute_uri(char *uri)
+{
+ /* fast exit */
+ if (*uri == '/' || strlen(uri) <= 5) {
+ return 0;
+ }
+
+ switch (*uri++) {
+ case 'f':
+ case 'F':
+ if (!strncasecmp(uri, "tp://", 5)) { /* ftp:// */
+ return 6;
+ }
+ break;
+
+ case 'g':
+ case 'G':
+ if (!strncasecmp(uri, "opher://", 8)) { /* gopher:// */
+ return 9;
+ }
+ break;
+
+ case 'h':
+ case 'H':
+ if (!strncasecmp(uri, "ttp://", 6)) { /* http:// */
+ return 7;
+ }
+ else if (!strncasecmp(uri, "ttps://", 7)) { /* https:// */
+ return 8;
+ }
+ break;
+
+ case 'l':
+ case 'L':
+ if (!strncasecmp(uri, "dap://", 6)) { /* ldap:// */
+ return 7;
+ }
+ break;
+
+ case 'm':
+ case 'M':
+ if (!strncasecmp(uri, "ailto:", 6)) { /* mailto: */
+ return 7;
+ }
+ break;
+
+ case 'n':
+ case 'N':
+ if (!strncasecmp(uri, "ews:", 4)) { /* news: */
+ return 5;
+ }
+ else if (!strncasecmp(uri, "ntp://", 6)) { /* nntp:// */
+ return 7;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+
+/* escape absolute uri, which may or may not be path oriented.
+ * So let's handle them differently.
+ */
+static char *escape_absolute_uri(apr_pool_t *p, char *uri, unsigned scheme)
+{
+ char *cp;
+
+ /* be safe.
+ * NULL should indicate elsewhere, that something's wrong
+ */
+ if (!scheme || strlen(uri) < scheme) {
+ return NULL;
+ }
+
+ cp = uri + scheme;
+
+ /* scheme with authority part? */
+ if (cp[-1] == '/') {
+ /* skip host part */
+ while (*cp && *cp != '/') {
+ ++cp;
+ }
+
+ /* nothing after the hostpart. ready! */
+ if (!*cp || !*++cp) {
+ return apr_pstrdup(p, uri);
+ }
+
+ /* remember the hostname stuff */
+ scheme = cp - uri;
+
+ /* special thing for ldap.
+ * The parts are separated by question marks. From RFC 2255:
+ * ldapurl = scheme "://" [hostport] ["/"
+ * [dn ["?" [attributes] ["?" [scope]
+ * ["?" [filter] ["?" extensions]]]]]]
+ */
+ if (!strncasecmp(uri, "ldap", 4)) {
+ char *token[5];
+ int c = 0;
+
+ token[0] = cp = apr_pstrdup(p, cp);
+ while (*cp && c < 4) {
+ if (*cp == '?') {
+ token[++c] = cp + 1;
+ *cp = '\0';
+ }
+ ++cp;
+ }
+
+ return apr_pstrcat(p, apr_pstrndup(p, uri, scheme),
+ ap_escape_uri(p, token[0]),
+ (c >= 1) ? "?" : NULL,
+ (c >= 1) ? ap_escape_uri(p, token[1]) : NULL,
+ (c >= 2) ? "?" : NULL,
+ (c >= 2) ? ap_escape_uri(p, token[2]) : NULL,
+ (c >= 3) ? "?" : NULL,
+ (c >= 3) ? ap_escape_uri(p, token[3]) : NULL,
+ (c >= 4) ? "?" : NULL,
+ (c >= 4) ? ap_escape_uri(p, token[4]) : NULL,
+ NULL);
+ }
+ }
+
+ /* Nothing special here. Apply normal escaping. */
+ return apr_pstrcat(p, apr_pstrndup(p, uri, scheme),
+ ap_escape_uri(p, cp), NULL);
+}
+
+
+/*
+**
+** Expand tilde-paths (/~user) through Unix /etc/passwd
+** database information (or other OS-specific database)
+**
+*/
+#if APR_HAS_USER
+static char *expand_tildepaths(request_rec *r, char *uri)
+{
+ char user[LONG_STRING_LEN];
+ char *newuri;
+ int i, j;
+ char *homedir;
+
+ newuri = uri;
+ if (uri != NULL && strlen(uri) > 2 && uri[0] == '/' && uri[1] == '~') {
+ /* cut out the username */
+ for (j = 0, i = 2; j < sizeof(user)-1
+ && uri[i] != '\0'
+ && uri[i] != '/' ; ) {
+ user[j++] = uri[i++];
+ }
+ user[j] = '\0';
+
+ /* lookup username in systems passwd file */
+ if (apr_get_home_directory(&homedir, user, r->pool) == APR_SUCCESS) {
+ /* ok, user was found, so expand the ~user string */
+ if (uri[i] != '\0') {
+ /* ~user/anything... has to be expanded */
+ if (homedir[strlen(homedir)-1] == '/') {
+ homedir[strlen(homedir)-1] = '\0';
+ }
+ newuri = apr_pstrcat(r->pool, homedir, uri+i, NULL);
+ }
+ else {
+ /* only ~user has to be expanded */
+ newuri = homedir;
+ }
+ }
+ }
+ return newuri;
+}
+#endif /* if APR_HAS_USER */
+
+
+
+/*
+** +-------------------------------------------------------+
+** | |
+** | DBM hashfile support
+** | |
+** +-------------------------------------------------------+
+*/
+
+
+static char *lookup_map(request_rec *r, char *name, char *key)
+{
+ rewrite_server_conf *conf;
+ apr_array_header_t *rewritemaps;
+ rewritemap_entry *entries;
+ rewritemap_entry *s;
+ char *value;
+ apr_finfo_t st;
+ apr_status_t rv;
+ int i;
+
+ /* get map configuration */
+ conf = ap_get_module_config(r->server->module_config, &rewrite_module);
+ rewritemaps = conf->rewritemaps;
+
+ entries = (rewritemap_entry *)rewritemaps->elts;
+ for (i = 0; i < rewritemaps->nelts; i++) {
+ s = &entries[i];
+ if (strcmp(s->name, name) == 0) {
+ if (s->type == MAPTYPE_TXT) {
+ if ((rv = apr_stat(&st, s->checkfile,
+ APR_FINFO_MIN, r->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "mod_rewrite: can't access text RewriteMap "
+ "file %s", s->checkfile);
+ rewritelog(r, 1, "can't open RewriteMap file, "
+ "see error log");
+ return NULL;
+ }
+ value = get_cache_string(cachep, s->cachename, CACHEMODE_TS,
+ st.mtime, key);
+ if (value == NULL) {
+ rewritelog(r, 6, "cache lookup FAILED, forcing new "
+ "map lookup");
+ if ((value =
+ lookup_map_txtfile(r, s->datafile, key)) != NULL) {
+ rewritelog(r, 5, "map lookup OK: map=%s key=%s[txt] "
+ "-> val=%s", s->name, key, value);
+ set_cache_string(cachep, s->cachename, CACHEMODE_TS,
+ st.mtime, key, value);
+ return value;
+ }
+ else {
+ rewritelog(r, 5, "map lookup FAILED: map=%s[txt] "
+ "key=%s", s->name, key);
+ set_cache_string(cachep, s->cachename, CACHEMODE_TS,
+ st.mtime, key, "");
+ return NULL;
+ }
+ }
+ else {
+ rewritelog(r, 5, "cache lookup OK: map=%s[txt] key=%s "
+ "-> val=%s", s->name, key, value);
+ return value[0] != '\0' ? value : NULL;
+ }
+ }
+ else if (s->type == MAPTYPE_DBM) {
+ if ((rv = apr_stat(&st, s->checkfile,
+ APR_FINFO_MIN, r->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "mod_rewrite: can't access DBM RewriteMap "
+ "file %s", s->checkfile);
+ rewritelog(r, 1, "can't open DBM RewriteMap file, "
+ "see error log");
+ return NULL;
+ }
+ value = get_cache_string(cachep, s->cachename, CACHEMODE_TS,
+ st.mtime, key);
+ if (value == NULL) {
+ rewritelog(r, 6,
+ "cache lookup FAILED, forcing new map lookup");
+ if ((value =
+ lookup_map_dbmfile(r, s->datafile, s->dbmtype, key)) != NULL) {
+ rewritelog(r, 5, "map lookup OK: map=%s[dbm] key=%s "
+ "-> val=%s", s->name, key, value);
+ set_cache_string(cachep, s->cachename, CACHEMODE_TS,
+ st.mtime, key, value);
+ return value;
+ }
+ else {
+ rewritelog(r, 5, "map lookup FAILED: map=%s[dbm] "
+ "key=%s", s->name, key);
+ set_cache_string(cachep, s->cachename, CACHEMODE_TS,
+ st.mtime, key, "");
+ return NULL;
+ }
+ }
+ else {
+ rewritelog(r, 5, "cache lookup OK: map=%s[dbm] key=%s "
+ "-> val=%s", s->name, key, value);
+ return value[0] != '\0' ? value : NULL;
+ }
+ }
+ else if (s->type == MAPTYPE_PRG) {
+ if ((value =
+ lookup_map_program(r, s->fpin, s->fpout, key)) != NULL) {
+ rewritelog(r, 5, "map lookup OK: map=%s key=%s -> val=%s",
+ s->name, key, value);
+ return value;
+ }
+ else {
+ rewritelog(r, 5, "map lookup FAILED: map=%s key=%s",
+ s->name, key);
+ }
+ }
+ else if (s->type == MAPTYPE_INT) {
+ if ((value = s->func(r, key)) != NULL) {
+ rewritelog(r, 5, "map lookup OK: map=%s key=%s -> val=%s",
+ s->name, key, value);
+ return value;
+ }
+ else {
+ rewritelog(r, 5, "map lookup FAILED: map=%s key=%s",
+ s->name, key);
+ }
+ }
+ else if (s->type == MAPTYPE_RND) {
+ if ((rv = apr_stat(&st, s->checkfile,
+ APR_FINFO_MIN, r->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "mod_rewrite: can't access text RewriteMap "
+ "file %s", s->checkfile);
+ rewritelog(r, 1, "can't open RewriteMap file, "
+ "see error log");
+ return NULL;
+ }
+ value = get_cache_string(cachep, s->cachename, CACHEMODE_TS,
+ st.mtime, key);
+ if (value == NULL) {
+ rewritelog(r, 6, "cache lookup FAILED, forcing new "
+ "map lookup");
+ if ((value =
+ lookup_map_txtfile(r, s->datafile, key)) != NULL) {
+ rewritelog(r, 5, "map lookup OK: map=%s key=%s[txt] "
+ "-> val=%s", s->name, key, value);
+ set_cache_string(cachep, s->cachename, CACHEMODE_TS,
+ st.mtime, key, value);
+ }
+ else {
+ rewritelog(r, 5, "map lookup FAILED: map=%s[txt] "
+ "key=%s", s->name, key);
+ set_cache_string(cachep, s->cachename, CACHEMODE_TS,
+ st.mtime, key, "");
+ return NULL;
+ }
+ }
+ else {
+ rewritelog(r, 5, "cache lookup OK: map=%s[txt] key=%s "
+ "-> val=%s", s->name, key, value);
+ }
+ if (value[0] != '\0') {
+ value = select_random_value_part(r, value);
+ rewritelog(r, 5, "randomly choosen the subvalue `%s'",
+ value);
+ }
+ else {
+ value = NULL;
+ }
+ return value;
+ }
+ }
+ }
+ return NULL;
+}
+
+static char *lookup_map_txtfile(request_rec *r, const char *file, char *key)
+{
+ apr_file_t *fp = NULL;
+ apr_status_t rc;
+ char line[1024];
+ char *value = NULL;
+ char *cpT;
+ apr_size_t skip;
+ char *curkey;
+ char *curval;
+
+ rc = apr_file_open(&fp, file, APR_READ|APR_BUFFERED, APR_OS_DEFAULT, r->pool);
+ if (rc != APR_SUCCESS) {
+ return NULL;
+ }
+
+ while (apr_file_gets(line, sizeof(line), fp) == APR_SUCCESS) {
+ if (line[0] == '#') {
+ continue; /* ignore comments */
+ }
+ cpT = line;
+ curkey = cpT;
+ skip = strcspn(cpT," \t\r\n");
+ if (skip == 0) {
+ continue; /* ignore lines that start with a space, tab, CR, or LF */
+ }
+ cpT += skip;
+ *cpT = '\0';
+ if (strcmp(curkey, key) != 0) {
+ continue; /* key does not match... */
+ }
+
+ /* found a matching key; now extract and return the value */
+ ++cpT;
+ skip = strspn(cpT, " \t\r\n");
+ cpT += skip;
+ curval = cpT;
+ skip = strcspn(cpT, " \t\r\n");
+ if (skip == 0) {
+ continue; /* no value... */
+ }
+ cpT += skip;
+ *cpT = '\0';
+ value = apr_pstrdup(r->pool, curval);
+ break;
+ }
+ apr_file_close(fp);
+ return value;
+}
+
+static char *lookup_map_dbmfile(request_rec *r, const char *file,
+ const char *dbmtype, char *key)
+{
+ apr_dbm_t *dbmfp = NULL;
+ apr_datum_t dbmkey;
+ apr_datum_t dbmval;
+ char *value;
+
+ if (apr_dbm_open_ex(&dbmfp, dbmtype, file, APR_DBM_READONLY, APR_OS_DEFAULT,
+ r->pool) != APR_SUCCESS) {
+ return NULL;
+ }
+
+ dbmkey.dptr = key;
+ dbmkey.dsize = strlen(key);
+
+ if (apr_dbm_fetch(dbmfp, dbmkey, &dbmval) == APR_SUCCESS && dbmval.dptr) {
+ value = apr_pstrmemdup(r->pool, dbmval.dptr, dbmval.dsize);
+ }
+ else {
+ value = NULL;
+ }
+
+ apr_dbm_close(dbmfp);
+
+ return value;
+}
+
+static char *lookup_map_program(request_rec *r, apr_file_t *fpin,
+ apr_file_t *fpout, char *key)
+{
+ char buf[LONG_STRING_LEN];
+ char c;
+ int i;
+ apr_size_t nbytes;
+ apr_status_t rv;
+
+#ifndef NO_WRITEV
+ struct iovec iova[2];
+ apr_size_t niov;
+#endif
+
+ /* when `RewriteEngine off' was used in the per-server
+ * context then the rewritemap-programs were not spawned.
+ * In this case using such a map (usually in per-dir context)
+ * is useless because it is not available.
+ *
+ * newlines in the key leave bytes in the pipe and cause
+ * bad things to happen (next map lookup will use the chars
+ * after the \n instead of the new key etc etc - in other words,
+ * the Rewritemap falls out of sync with the requests).
+ */
+ if (fpin == NULL || fpout == NULL || ap_strchr(key, '\n')) {
+ return NULL;
+ }
+
+ /* take the lock */
+
+ if (rewrite_mapr_lock_acquire) {
+ rv = apr_global_mutex_lock(rewrite_mapr_lock_acquire);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "apr_global_mutex_lock(rewrite_mapr_lock_acquire) "
+ "failed");
+ return NULL; /* Maybe this should be fatal? */
+ }
+ }
+
+ /* write out the request key */
+#ifdef NO_WRITEV
+ nbytes = strlen(key);
+ apr_file_write(fpin, key, &nbytes);
+ nbytes = 1;
+ apr_file_write(fpin, "\n", &nbytes);
+#else
+ iova[0].iov_base = key;
+ iova[0].iov_len = strlen(key);
+ iova[1].iov_base = "\n";
+ iova[1].iov_len = 1;
+
+ niov = 2;
+ apr_file_writev(fpin, iova, niov, &nbytes);
+#endif
+
+ /* read in the response value */
+ i = 0;
+ nbytes = 1;
+ apr_file_read(fpout, &c, &nbytes);
+ while (nbytes == 1 && (i < LONG_STRING_LEN-1)) {
+ if (c == '\n') {
+ break;
+ }
+ buf[i++] = c;
+
+ apr_file_read(fpout, &c, &nbytes);
+ }
+ buf[i] = '\0';
+
+ /* give the lock back */
+ if (rewrite_mapr_lock_acquire) {
+ rv = apr_global_mutex_unlock(rewrite_mapr_lock_acquire);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "apr_global_mutex_unlock(rewrite_mapr_lock_acquire) "
+ "failed");
+ return NULL; /* Maybe this should be fatal? */
+ }
+ }
+
+ if (strcasecmp(buf, "NULL") == 0) {
+ return NULL;
+ }
+ else {
+ return apr_pstrdup(r->pool, buf);
+ }
+}
+
+static void ap_register_rewrite_mapfunc(char *name, rewrite_mapfunc_t *func)
+{
+ apr_hash_set(mapfunc_hash, name, strlen(name), (const void *)func);
+}
+
+static char *rewrite_mapfunc_toupper(request_rec *r, char *key)
+{
+ char *value, *cp;
+
+ for (cp = value = apr_pstrdup(r->pool, key); cp != NULL && *cp != '\0';
+ cp++) {
+ *cp = apr_toupper(*cp);
+ }
+ return value;
+}
+
+static char *rewrite_mapfunc_tolower(request_rec *r, char *key)
+{
+ char *value, *cp;
+
+ for (cp = value = apr_pstrdup(r->pool, key); cp != NULL && *cp != '\0';
+ cp++) {
+ *cp = apr_tolower(*cp);
+ }
+ return value;
+}
+
+static char *rewrite_mapfunc_escape(request_rec *r, char *key)
+{
+ char *value;
+
+ value = ap_escape_uri(r->pool, key);
+ return value;
+}
+
+static char *rewrite_mapfunc_unescape(request_rec *r, char *key)
+{
+ char *value;
+
+ value = apr_pstrdup(r->pool, key);
+ ap_unescape_url(value);
+ return value;
+}
+
+static int rewrite_rand_init_done = 0;
+
+static void rewrite_rand_init(void)
+{
+ if (!rewrite_rand_init_done) {
+ srand((unsigned)(getpid()));
+ rewrite_rand_init_done = 1;
+ }
+ return;
+}
+
+static int rewrite_rand(int l, int h)
+{
+ rewrite_rand_init();
+
+ /* Get [0,1) and then scale to the appropriate range. Note that using
+ * a floating point value ensures that we use all bits of the rand()
+ * result. Doing an integer modulus would only use the lower-order bits
+ * which may not be as uniformly random.
+ */
+ return (int)(((double)(rand() % RAND_MAX) / RAND_MAX) * (h - l + 1) + l);
+}
+
+static char *select_random_value_part(request_rec *r, char *value)
+{
+ char *buf;
+ int n, i, k;
+
+ /* count number of distinct values */
+ for (n = 1, i = 0; value[i] != '\0'; i++) {
+ if (value[i] == '|') {
+ n++;
+ }
+ }
+
+ /* when only one value we have no option to choose */
+ if (n == 1) {
+ return value;
+ }
+
+ /* else randomly select one */
+ k = rewrite_rand(1, n);
+
+ /* and grep it out */
+ for (n = 1, i = 0; value[i] != '\0'; i++) {
+ if (n == k) {
+ break;
+ }
+ if (value[i] == '|') {
+ n++;
+ }
+ }
+ buf = apr_pstrdup(r->pool, &value[i]);
+ for (i = 0; buf[i] != '\0' && buf[i] != '|'; i++)
+ ;
+ buf[i] = '\0';
+ return buf;
+}
+
+
+/*
+** +-------------------------------------------------------+
+** | |
+** | rewriting logfile support
+** | |
+** +-------------------------------------------------------+
+*/
+
+
+static int open_rewritelog(server_rec *s, apr_pool_t *p)
+{
+ rewrite_server_conf *conf;
+ const char *fname;
+ apr_status_t rc;
+ piped_log *pl;
+ int rewritelog_flags = ( APR_WRITE | APR_APPEND | APR_CREATE );
+ apr_fileperms_t rewritelog_mode = ( APR_UREAD | APR_UWRITE |
+ APR_GREAD | APR_WREAD );
+
+ conf = ap_get_module_config(s->module_config, &rewrite_module);
+
+ /* - no logfile configured
+ * - logfilename empty
+ * - virtual log shared w/ main server
+ */
+ if (!conf->rewritelogfile || !*conf->rewritelogfile || conf->rewritelogfp) {
+ return 1;
+ }
+
+ if (*conf->rewritelogfile == '|') {
+ if ((pl = ap_open_piped_log(p, conf->rewritelogfile+1)) == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "mod_rewrite: could not open reliable pipe "
+ "to RewriteLog filter %s", conf->rewritelogfile+1);
+ return 0;
+ }
+ conf->rewritelogfp = ap_piped_log_write_fd(pl);
+ }
+ else if (*conf->rewritelogfile != '\0') {
+ fname = ap_server_root_relative(p, conf->rewritelogfile);
+ if (!fname) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, APR_EBADPATH, s,
+ "mod_rewrite: Invalid RewriteLog "
+ "path %s", conf->rewritelogfile);
+ return 0;
+ }
+ if ((rc = apr_file_open(&conf->rewritelogfp, fname,
+ rewritelog_flags, rewritelog_mode, p))
+ != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rc, s,
+ "mod_rewrite: could not open RewriteLog "
+ "file %s", fname);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static void rewritelog(request_rec *r, int level, const char *text, ...)
+{
+ rewrite_server_conf *conf;
+ conn_rec *conn;
+ char *str1;
+ char str2[512];
+ char str3[1024];
+ const char *type;
+ char redir[20]; /* enough for "/redir#%d" if int is 32 bit */
+ va_list ap;
+ int i;
+ apr_size_t nbytes;
+ request_rec *req;
+ char *ruser;
+ const char *rhost;
+ apr_status_t rv;
+
+ va_start(ap, text);
+ conf = ap_get_module_config(r->server->module_config, &rewrite_module);
+ conn = r->connection;
+
+ if (conf->rewritelogfp == NULL) {
+ return;
+ }
+ if (conf->rewritelogfile == NULL) {
+ return;
+ }
+ if (*(conf->rewritelogfile) == '\0') {
+ return;
+ }
+
+ if (level > conf->rewriteloglevel) {
+ return;
+ }
+
+ if (r->user == NULL) {
+ ruser = "-";
+ }
+ else if (strlen(r->user) != 0) {
+ ruser = r->user;
+ }
+ else {
+ ruser = "\"\"";
+ }
+
+ rhost = ap_get_remote_host(conn, r->per_dir_config,
+ REMOTE_NOLOOKUP, NULL);
+ if (rhost == NULL) {
+ rhost = "UNKNOWN-HOST";
+ }
+
+ str1 = apr_pstrcat(r->pool, rhost, " ",
+ (conn->remote_logname != NULL ?
+ conn->remote_logname : "-"), " ",
+ ruser, NULL);
+ apr_vsnprintf(str2, sizeof(str2), text, ap);
+
+ if (r->main == NULL) {
+ type = "initial";
+ }
+ else {
+ type = "subreq";
+ }
+
+ for (i = 0, req = r; req->prev != NULL; req = req->prev) {
+ i++;
+ }
+ if (i == 0) {
+ redir[0] = '\0';
+ }
+ else {
+ apr_snprintf(redir, sizeof(redir), "/redir#%d", i);
+ }
+
+ apr_snprintf(str3, sizeof(str3),
+ "%s %s [%s/sid#%lx][rid#%lx/%s%s] (%d) %s" APR_EOL_STR, str1,
+ current_logtime(r), ap_get_server_name(r),
+ (unsigned long)(r->server), (unsigned long)r,
+ type, redir, level, str2);
+
+ rv = apr_global_mutex_lock(rewrite_log_lock);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "apr_global_mutex_lock(rewrite_log_lock) failed");
+ /* XXX: Maybe this should be fatal? */
+ }
+ nbytes = strlen(str3);
+ apr_file_write(conf->rewritelogfp, str3, &nbytes);
+ rv = apr_global_mutex_unlock(rewrite_log_lock);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "apr_global_mutex_unlock(rewrite_log_lock) failed");
+ /* XXX: Maybe this should be fatal? */
+ }
+
+ va_end(ap);
+ return;
+}
+
+static char *current_logtime(request_rec *r)
+{
+ apr_time_exp_t t;
+ char tstr[80];
+ apr_size_t len;
+
+ apr_time_exp_lt(&t, apr_time_now());
+
+ apr_strftime(tstr, &len, 80, "[%d/%b/%Y:%H:%M:%S ", &t);
+ apr_snprintf(tstr + strlen(tstr), 80-strlen(tstr), "%c%.2d%.2d]",
+ t.tm_gmtoff < 0 ? '-' : '+',
+ t.tm_gmtoff / (60*60), t.tm_gmtoff % (60*60));
+ return apr_pstrdup(r->pool, tstr);
+}
+
+
+
+
+/*
+** +-------------------------------------------------------+
+** | |
+** | rewriting lockfile support
+** | |
+** +-------------------------------------------------------+
+*/
+
+#define REWRITELOCK_MODE ( APR_UREAD | APR_UWRITE | APR_GREAD | APR_WREAD )
+
+static apr_status_t rewritelock_create(server_rec *s, apr_pool_t *p)
+{
+ apr_status_t rc;
+
+ /* only operate if a lockfile is used */
+ if (lockname == NULL || *(lockname) == '\0') {
+ return APR_SUCCESS;
+ }
+
+ /* create the lockfile */
+ rc = apr_global_mutex_create(&rewrite_mapr_lock_acquire, lockname,
+ APR_LOCK_DEFAULT, p);
+ if (rc != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rc, s,
+ "mod_rewrite: Parent could not create RewriteLock "
+ "file %s", lockname);
+ return rc;
+ }
+
+#ifdef MOD_REWRITE_SET_MUTEX_PERMS
+ rc = unixd_set_global_mutex_perms(rewrite_mapr_lock_acquire);
+ if (rc != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rc, s,
+ "mod_rewrite: Parent could not set permissions "
+ "on RewriteLock; check User and Group directives");
+ return rc;
+ }
+#endif
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t rewritelock_remove(void *data)
+{
+ /* only operate if a lockfile is used */
+ if (lockname == NULL || *(lockname) == '\0') {
+ return APR_SUCCESS;
+ }
+
+ /* destroy the rewritelock */
+ apr_global_mutex_destroy (rewrite_mapr_lock_acquire);
+ rewrite_mapr_lock_acquire = NULL;
+ lockname = NULL;
+ return(0);
+}
+
+
+/*
+** +-------------------------------------------------------+
+** | |
+** | program map support
+** | |
+** +-------------------------------------------------------+
+*/
+
+static apr_status_t run_rewritemap_programs(server_rec *s, apr_pool_t *p)
+{
+ rewrite_server_conf *conf;
+ apr_array_header_t *rewritemaps;
+ rewritemap_entry *entries;
+ int i;
+ apr_status_t rc;
+
+ conf = ap_get_module_config(s->module_config, &rewrite_module);
+
+ /* If the engine isn't turned on,
+ * don't even try to do anything.
+ */
+ if (conf->state == ENGINE_DISABLED) {
+ return APR_SUCCESS;
+ }
+
+ rewritemaps = conf->rewritemaps;
+ entries = (rewritemap_entry *)rewritemaps->elts;
+ for (i = 0; i < rewritemaps->nelts; i++) {
+ apr_file_t *fpin = NULL;
+ apr_file_t *fpout = NULL;
+ rewritemap_entry *map = &entries[i];
+
+ if (map->type != MAPTYPE_PRG) {
+ continue;
+ }
+ if (map->argv[0] == NULL
+ || *(map->argv[0]) == '\0'
+ || map->fpin != NULL
+ || map->fpout != NULL ) {
+ continue;
+ }
+ rc = rewritemap_program_child(p, map->argv[0], map->argv,
+ &fpout, &fpin);
+ if (rc != APR_SUCCESS || fpin == NULL || fpout == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rc, s,
+ "mod_rewrite: could not startup RewriteMap "
+ "program %s", map->datafile);
+ return rc;
+ }
+ map->fpin = fpin;
+ map->fpout = fpout;
+ }
+ return APR_SUCCESS;
+}
+
+/* child process code */
+static apr_status_t rewritemap_program_child(apr_pool_t *p,
+ const char *progname, char **argv,
+ apr_file_t **fpout,
+ apr_file_t **fpin)
+{
+ apr_status_t rc;
+ apr_procattr_t *procattr;
+ apr_proc_t *procnew;
+
+ if (((rc = apr_procattr_create(&procattr, p)) != APR_SUCCESS) ||
+ ((rc = apr_procattr_io_set(procattr, APR_FULL_BLOCK, APR_FULL_BLOCK,
+ APR_NO_PIPE)) != APR_SUCCESS) ||
+ ((rc = apr_procattr_dir_set(procattr,
+ ap_make_dirstr_parent(p, argv[0])))
+ != APR_SUCCESS) ||
+ ((rc = apr_procattr_cmdtype_set(procattr, APR_PROGRAM))
+ != APR_SUCCESS)) {
+ /* Something bad happened, give up and go away. */
+ }
+ else {
+ procnew = apr_pcalloc(p, sizeof(*procnew));
+ rc = apr_proc_create(procnew, argv[0], (const char **)argv, NULL,
+ procattr, p);
+
+ if (rc == APR_SUCCESS) {
+ apr_pool_note_subprocess(p, procnew, APR_KILL_AFTER_TIMEOUT);
+
+ if (fpin) {
+ (*fpin) = procnew->in;
+ }
+
+ if (fpout) {
+ (*fpout) = procnew->out;
+ }
+ }
+ }
+
+ return (rc);
+}
+
+
+
+
+/*
+** +-------------------------------------------------------+
+** | |
+** | environment variable support
+** | |
+** +-------------------------------------------------------+
+*/
+
+
+static char *lookup_variable(request_rec *r, char *var)
+{
+ const char *result;
+ char resultbuf[LONG_STRING_LEN];
+ apr_time_exp_t tm;
+ request_rec *rsub;
+
+ result = NULL;
+
+ /* HTTP headers */
+ if (strcasecmp(var, "HTTP_USER_AGENT") == 0) {
+ result = lookup_header(r, "User-Agent");
+ }
+ else if (strcasecmp(var, "HTTP_REFERER") == 0) {
+ result = lookup_header(r, "Referer");
+ }
+ else if (strcasecmp(var, "HTTP_COOKIE") == 0) {
+ result = lookup_header(r, "Cookie");
+ }
+ else if (strcasecmp(var, "HTTP_FORWARDED") == 0) {
+ result = lookup_header(r, "Forwarded");
+ }
+ else if (strcasecmp(var, "HTTP_HOST") == 0) {
+ result = lookup_header(r, "Host");
+ }
+ else if (strcasecmp(var, "HTTP_PROXY_CONNECTION") == 0) {
+ result = lookup_header(r, "Proxy-Connection");
+ }
+ else if (strcasecmp(var, "HTTP_ACCEPT") == 0) {
+ result = lookup_header(r, "Accept");
+ }
+ /* all other headers from which we are still not know about */
+ else if (strlen(var) > 5 && strncasecmp(var, "HTTP:", 5) == 0) {
+ result = lookup_header(r, var+5);
+ }
+
+ /* connection stuff */
+ else if (strcasecmp(var, "REMOTE_ADDR") == 0) {
+ result = r->connection->remote_ip;
+ }
+ else if (strcasecmp(var, "REMOTE_PORT") == 0) {
+ return apr_itoa(r->pool, r->connection->remote_addr->port);
+ }
+ else if (strcasecmp(var, "REMOTE_HOST") == 0) {
+ result = (char *)ap_get_remote_host(r->connection,
+ r->per_dir_config, REMOTE_NAME, NULL);
+ }
+ else if (strcasecmp(var, "REMOTE_USER") == 0) {
+ result = r->user;
+ }
+ else if (strcasecmp(var, "REMOTE_IDENT") == 0) {
+ result = (char *)ap_get_remote_logname(r);
+ }
+
+ /* request stuff */
+ else if (strcasecmp(var, "THE_REQUEST") == 0) { /* non-standard */
+ result = r->the_request;
+ }
+ else if (strcasecmp(var, "REQUEST_METHOD") == 0) {
+ result = r->method;
+ }
+ else if (strcasecmp(var, "REQUEST_URI") == 0) { /* non-standard */
+ result = r->uri;
+ }
+ else if (strcasecmp(var, "SCRIPT_FILENAME") == 0 ||
+ strcasecmp(var, "REQUEST_FILENAME") == 0 ) {
+ result = r->filename;
+ }
+ else if (strcasecmp(var, "PATH_INFO") == 0) {
+ result = r->path_info;
+ }
+ else if (strcasecmp(var, "QUERY_STRING") == 0) {
+ result = r->args;
+ }
+ else if (strcasecmp(var, "AUTH_TYPE") == 0) {
+ result = r->ap_auth_type;
+ }
+ else if (strcasecmp(var, "IS_SUBREQ") == 0) { /* non-standard */
+ result = (r->main != NULL ? "true" : "false");
+ }
+
+ /* internal server stuff */
+ else if (strcasecmp(var, "DOCUMENT_ROOT") == 0) {
+ result = ap_document_root(r);
+ }
+ else if (strcasecmp(var, "SERVER_ADMIN") == 0) {
+ result = r->server->server_admin;
+ }
+ else if (strcasecmp(var, "SERVER_NAME") == 0) {
+ result = ap_get_server_name(r);
+ }
+ else if (strcasecmp(var, "SERVER_ADDR") == 0) { /* non-standard */
+ result = r->connection->local_ip;
+ }
+ else if (strcasecmp(var, "SERVER_PORT") == 0) {
+ apr_snprintf(resultbuf, sizeof(resultbuf), "%u", ap_get_server_port(r));
+ result = resultbuf;
+ }
+ else if (strcasecmp(var, "SERVER_PROTOCOL") == 0) {
+ result = r->protocol;
+ }
+ else if (strcasecmp(var, "SERVER_SOFTWARE") == 0) {
+ result = ap_get_server_version();
+ }
+ else if (strcasecmp(var, "API_VERSION") == 0) { /* non-standard */
+ apr_snprintf(resultbuf, sizeof(resultbuf), "%d:%d",
+ MODULE_MAGIC_NUMBER_MAJOR, MODULE_MAGIC_NUMBER_MINOR);
+ result = resultbuf;
+ }
+
+/* XXX: wow this has gotta be slow if you actually use it for a lot, recalculates exploded time for each variable */
+ /* underlaying Unix system stuff */
+ else if (strcasecmp(var, "TIME_YEAR") == 0) {
+ apr_time_exp_lt(&tm, apr_time_now());
+ apr_snprintf(resultbuf, sizeof(resultbuf), "%04d", tm.tm_year + 1900);
+ result = resultbuf;
+ }
+#define MKTIMESTR(format, tmfield) \
+ apr_time_exp_lt(&tm, apr_time_now()); \
+ apr_snprintf(resultbuf, sizeof(resultbuf), format, tm.tmfield); \
+ result = resultbuf;
+ else if (strcasecmp(var, "TIME_MON") == 0) {
+ MKTIMESTR("%02d", tm_mon+1)
+ }
+ else if (strcasecmp(var, "TIME_DAY") == 0) {
+ MKTIMESTR("%02d", tm_mday)
+ }
+ else if (strcasecmp(var, "TIME_HOUR") == 0) {
+ MKTIMESTR("%02d", tm_hour)
+ }
+ else if (strcasecmp(var, "TIME_MIN") == 0) {
+ MKTIMESTR("%02d", tm_min)
+ }
+ else if (strcasecmp(var, "TIME_SEC") == 0) {
+ MKTIMESTR("%02d", tm_sec)
+ }
+ else if (strcasecmp(var, "TIME_WDAY") == 0) {
+ MKTIMESTR("%d", tm_wday)
+ }
+ else if (strcasecmp(var, "TIME") == 0) {
+ apr_time_exp_lt(&tm, apr_time_now());
+ apr_snprintf(resultbuf, sizeof(resultbuf),
+ "%04d%02d%02d%02d%02d%02d", tm.tm_year + 1900,
+ tm.tm_mon+1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+ result = resultbuf;
+ rewritelog(r, 1, "RESULT='%s'", result);
+ }
+
+ /* all other env-variables from the parent Apache process */
+ else if (strlen(var) > 4 && strncasecmp(var, "ENV:", 4) == 0) {
+ /* first try the internal Apache notes structure */
+ result = apr_table_get(r->notes, var+4);
+ /* second try the internal Apache env structure */
+ if (result == NULL) {
+ result = apr_table_get(r->subprocess_env, var+4);
+ }
+ /* third try the external OS env */
+ if (result == NULL) {
+ result = getenv(var+4);
+ }
+ }
+ else if (strlen(var) > 4 && !strncasecmp(var, "SSL:", 4)
+ && rewrite_ssl_lookup) {
+ result = rewrite_ssl_lookup(r->pool, r->server, r->connection, r,
+ var + 4);
+ }
+
+#define LOOKAHEAD(subrecfunc) \
+ if ( \
+ /* filename is safe to use */ \
+ r->filename != NULL \
+ /* - and we're either not in a subrequest */ \
+ && ( r->main == NULL \
+ /* - or in a subrequest where paths are non-NULL... */ \
+ || ( r->main->uri != NULL && r->uri != NULL \
+ /* ...and sub and main paths differ */ \
+ && strcmp(r->main->uri, r->uri) != 0))) { \
+ /* process a file-based subrequest */ \
+ rsub = subrecfunc(r->filename, r, NULL); \
+ /* now recursively lookup the variable in the sub_req */ \
+ result = lookup_variable(rsub, var+5); \
+ /* copy it up to our scope before we destroy sub_req's apr_pool_t */ \
+ result = apr_pstrdup(r->pool, result); \
+ /* cleanup by destroying the subrequest */ \
+ ap_destroy_sub_req(rsub); \
+ /* log it */ \
+ rewritelog(r, 5, "lookahead: path=%s var=%s -> val=%s", \
+ r->filename, var+5, result); \
+ /* return ourself to prevent re-pstrdup */ \
+ return (char *)result; \
+ }
+
+ /* look-ahead for parameter through URI-based sub-request */
+ else if (strlen(var) > 5 && strncasecmp(var, "LA-U:", 5) == 0) {
+ LOOKAHEAD(ap_sub_req_lookup_uri)
+ }
+ /* look-ahead for parameter through file-based sub-request */
+ else if (strlen(var) > 5 && strncasecmp(var, "LA-F:", 5) == 0) {
+ LOOKAHEAD(ap_sub_req_lookup_file)
+ }
+
+ /* file stuff */
+ else if (strcasecmp(var, "SCRIPT_USER") == 0) {
+ result = "<unknown>";
+ if (r->finfo.valid & APR_FINFO_USER) {
+ apr_get_username((char **)&result, r->finfo.user, r->pool);
+ }
+ }
+ else if (strcasecmp(var, "SCRIPT_GROUP") == 0) {
+ result = "<unknown>";
+ if (r->finfo.valid & APR_FINFO_GROUP) {
+ apr_group_name_get((char **)&result, r->finfo.group, r->pool);
+ }
+ } else if (strcasecmp(var, "HTTPS") == 0) {
+ int flag = rewrite_is_https && rewrite_is_https(r->connection);
+ result = flag ? "on" : "off";
+ }
+
+ if (result == NULL) {
+ return apr_pstrdup(r->pool, "");
+ }
+ else {
+ return apr_pstrdup(r->pool, result);
+ }
+}
+
+static char *lookup_header(request_rec *r, const char *name)
+{
+ const apr_array_header_t *hdrs_arr;
+ const apr_table_entry_t *hdrs;
+ int i;
+
+ hdrs_arr = apr_table_elts(r->headers_in);
+ hdrs = (const apr_table_entry_t *)hdrs_arr->elts;
+ for (i = 0; i < hdrs_arr->nelts; ++i) {
+ if (hdrs[i].key == NULL) {
+ continue;
+ }
+ if (strcasecmp(hdrs[i].key, name) == 0) {
+ apr_table_merge(r->notes, VARY_KEY_THIS, name);
+ return hdrs[i].val;
+ }
+ }
+ return NULL;
+}
+
+
+
+
+/*
+** +-------------------------------------------------------+
+** | |
+** | caching support
+** | |
+** +-------------------------------------------------------+
+*/
+
+
+static cache *init_cache(apr_pool_t *p)
+{
+ cache *c;
+
+ c = (cache *)apr_palloc(p, sizeof(cache));
+ if (apr_pool_create(&c->pool, p) != APR_SUCCESS) {
+ return NULL;
+ }
+ c->lists = apr_array_make(c->pool, 2, sizeof(cachelist));
+#if APR_HAS_THREADS
+ (void)apr_thread_mutex_create(&(c->lock), APR_THREAD_MUTEX_DEFAULT, p);
+#endif
+ return c;
+}
+
+static void set_cache_string(cache *c, const char *res, int mode, apr_time_t t,
+ char *key, char *value)
+{
+ cacheentry ce;
+
+ ce.time = t;
+ ce.key = key;
+ ce.value = value;
+ store_cache_string(c, res, &ce);
+ return;
+}
+
+static char *get_cache_string(cache *c, const char *res, int mode,
+ apr_time_t t, char *key)
+{
+ cacheentry *ce;
+
+ ce = retrieve_cache_string(c, res, key);
+ if (ce == NULL) {
+ return NULL;
+ }
+ if (mode & CACHEMODE_TS) {
+ if (t != ce->time) {
+ return NULL;
+ }
+ }
+ else if (mode & CACHEMODE_TTL) {
+ if (t > ce->time) {
+ return NULL;
+ }
+ }
+ return ce->value;
+}
+
+static int cache_tlb_hash(char *key)
+{
+ unsigned long n;
+ char *p;
+
+ n = 0;
+ for (p = key; *p != '\0'; p++) {
+ n = ((n << 5) + n) ^ (unsigned long)(*p++);
+ }
+
+ return n % CACHE_TLB_ROWS;
+}
+
+static cacheentry *cache_tlb_lookup(cachetlbentry *tlb, cacheentry *elt,
+ char *key)
+{
+ int ix = cache_tlb_hash(key);
+ int i;
+ int j;
+
+ for (i=0; i < CACHE_TLB_COLS; ++i) {
+ j = tlb[ix].t[i];
+ if (j < 0)
+ return NULL;
+ if (strcmp(elt[j].key, key) == 0)
+ return &elt[j];
+ }
+ return NULL;
+}
+
+static void cache_tlb_replace(cachetlbentry *tlb, cacheentry *elt,
+ cacheentry *e)
+{
+ int ix = cache_tlb_hash(e->key);
+ int i;
+
+ tlb = &tlb[ix];
+
+ for (i=1; i < CACHE_TLB_COLS; ++i)
+ tlb->t[i] = tlb->t[i-1];
+
+ tlb->t[0] = e - elt;
+}
+
+static void store_cache_string(cache *c, const char *res, cacheentry *ce)
+{
+ int i;
+ int j;
+ cachelist *l;
+ cacheentry *e;
+ cachetlbentry *t;
+ int found_list;
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(c->lock);
+#endif
+
+ found_list = 0;
+ /* first try to edit an existing entry */
+ for (i = 0; i < c->lists->nelts; i++) {
+ l = &(((cachelist *)c->lists->elts)[i]);
+ if (strcmp(l->resource, res) == 0) {
+ found_list = 1;
+
+ e = cache_tlb_lookup((cachetlbentry *)l->tlb->elts,
+ (cacheentry *)l->entries->elts, ce->key);
+ if (e != NULL) {
+ e->time = ce->time;
+ e->value = apr_pstrdup(c->pool, ce->value);
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(c->lock);
+#endif
+ return;
+ }
+
+ for (j = 0; j < l->entries->nelts; j++) {
+ e = &(((cacheentry *)l->entries->elts)[j]);
+ if (strcmp(e->key, ce->key) == 0) {
+ e->time = ce->time;
+ e->value = apr_pstrdup(c->pool, ce->value);
+ cache_tlb_replace((cachetlbentry *)l->tlb->elts,
+ (cacheentry *)l->entries->elts, e);
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(c->lock);
+#endif
+ return;
+ }
+ }
+ }
+ }
+
+ /* create a needed new list */
+ if (!found_list) {
+ l = apr_array_push(c->lists);
+ l->resource = apr_pstrdup(c->pool, res);
+ l->entries = apr_array_make(c->pool, 2, sizeof(cacheentry));
+ l->tlb = apr_array_make(c->pool, CACHE_TLB_ROWS,
+ sizeof(cachetlbentry));
+ for (i=0; i<CACHE_TLB_ROWS; ++i) {
+ t = &((cachetlbentry *)l->tlb->elts)[i];
+ for (j=0; j<CACHE_TLB_COLS; ++j)
+ t->t[j] = -1;
+ }
+ }
+
+ /* create the new entry */
+ for (i = 0; i < c->lists->nelts; i++) {
+ l = &(((cachelist *)c->lists->elts)[i]);
+ if (strcmp(l->resource, res) == 0) {
+ e = apr_array_push(l->entries);
+ e->time = ce->time;
+ e->key = apr_pstrdup(c->pool, ce->key);
+ e->value = apr_pstrdup(c->pool, ce->value);
+ cache_tlb_replace((cachetlbentry *)l->tlb->elts,
+ (cacheentry *)l->entries->elts, e);
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(c->lock);
+#endif
+ return;
+ }
+ }
+
+ /* not reached, but when it is no problem... */
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(c->lock);
+#endif
+ return;
+}
+
+static cacheentry *retrieve_cache_string(cache *c, const char *res, char *key)
+{
+ int i;
+ int j;
+ cachelist *l;
+ cacheentry *e;
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(c->lock);
+#endif
+
+ for (i = 0; i < c->lists->nelts; i++) {
+ l = &(((cachelist *)c->lists->elts)[i]);
+ if (strcmp(l->resource, res) == 0) {
+
+ e = cache_tlb_lookup((cachetlbentry *)l->tlb->elts,
+ (cacheentry *)l->entries->elts, key);
+ if (e != NULL) {
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(c->lock);
+#endif
+ return e;
+ }
+
+ for (j = 0; j < l->entries->nelts; j++) {
+ e = &(((cacheentry *)l->entries->elts)[j]);
+ if (strcmp(e->key, key) == 0) {
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(c->lock);
+#endif
+ return e;
+ }
+ }
+ }
+ }
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(c->lock);
+#endif
+ return NULL;
+}
+
+
+
+
+/*
+** +-------------------------------------------------------+
+** | |
+** | misc functions
+** | |
+** +-------------------------------------------------------+
+*/
+
+/*
+ * substitute the prefix path 'match' in 'input' with 'subst'
+ * (think of RewriteBase which substitutes the physical path with
+ * the virtual path)
+ */
+
+static char *subst_prefix_path(request_rec *r, char *input, char *match,
+ const char *subst)
+{
+ apr_size_t len = strlen(match);
+
+ if (len && match[len - 1] == '/') {
+ --len;
+ }
+
+ if (!strncmp(input, match, len) && input[len++] == '/') {
+ apr_size_t slen, outlen;
+ char *output;
+
+ rewritelog(r, 5, "strip matching prefix: %s -> %s", input, input+len);
+
+ slen = strlen(subst);
+ if (slen && subst[slen - 1] != '/') {
+ ++slen;
+ }
+
+ outlen = strlen(input) + slen - len;
+ output = apr_palloc(r->pool, outlen + 1); /* don't forget the \0 */
+
+ memcpy(output, subst, slen);
+ if (slen && !output[slen-1]) {
+ output[slen-1] = '/';
+ }
+ memcpy(output+slen, input+len, outlen - slen);
+ output[outlen] = '\0';
+
+ rewritelog(r, 4, "add subst prefix: %s -> %s", input+len, output);
+
+ return output;
+ }
+
+ /* prefix didn't match */
+ return input;
+}
+
+
+/*
+**
+** own command line parser which don't have the '\\' problem
+**
+*/
+
+static int parseargline(char *str, char **a1, char **a2, char **a3)
+{
+ char *cp;
+ int isquoted;
+
+#define SKIP_WHITESPACE(cp) \
+ for ( ; *cp == ' ' || *cp == '\t'; ) { \
+ cp++; \
+ };
+
+#define CHECK_QUOTATION(cp,isquoted) \
+ isquoted = 0; \
+ if (*cp == '"') { \
+ isquoted = 1; \
+ cp++; \
+ }
+
+#define DETERMINE_NEXTSTRING(cp,isquoted) \
+ for ( ; *cp != '\0'; cp++) { \
+ if ( (isquoted && (*cp == ' ' || *cp == '\t')) \
+ || (*cp == '\\' && (*(cp+1) == ' ' || *(cp+1) == '\t'))) { \
+ cp++; \
+ continue; \
+ } \
+ if ( (!isquoted && (*cp == ' ' || *cp == '\t')) \
+ || (isquoted && *cp == '"') ) { \
+ break; \
+ } \
+ }
+
+ cp = str;
+ SKIP_WHITESPACE(cp);
+
+ /* determine first argument */
+ CHECK_QUOTATION(cp, isquoted);
+ *a1 = cp;
+ DETERMINE_NEXTSTRING(cp, isquoted);
+ if (*cp == '\0') {
+ return 1;
+ }
+ *cp++ = '\0';
+
+ SKIP_WHITESPACE(cp);
+
+ /* determine second argument */
+ CHECK_QUOTATION(cp, isquoted);
+ *a2 = cp;
+ DETERMINE_NEXTSTRING(cp, isquoted);
+ if (*cp == '\0') {
+ *cp++ = '\0';
+ *a3 = NULL;
+ return 0;
+ }
+ *cp++ = '\0';
+
+ SKIP_WHITESPACE(cp);
+
+ /* again check if there are only two arguments */
+ if (*cp == '\0') {
+ *cp++ = '\0';
+ *a3 = NULL;
+ return 0;
+ }
+
+ /* determine second argument */
+ CHECK_QUOTATION(cp, isquoted);
+ *a3 = cp;
+ DETERMINE_NEXTSTRING(cp, isquoted);
+ *cp++ = '\0';
+
+ return 0;
+}
+
+
+static void add_env_variable(request_rec *r, char *s)
+{
+ char var[MAX_STRING_LEN];
+ char val[MAX_STRING_LEN];
+ char *cp;
+ int n;
+
+ if ((cp = strchr(s, ':')) != NULL) {
+ n = ((cp-s) > MAX_STRING_LEN-1 ? MAX_STRING_LEN-1 : (cp-s));
+ memcpy(var, s, n);
+ var[n] = '\0';
+ apr_cpystrn(val, cp+1, sizeof(val));
+ apr_table_set(r->subprocess_env, var, val);
+ rewritelog(r, 5, "setting env variable '%s' to '%s'", var, val);
+ }
+}
+
+static void add_cookie(request_rec *r, char *s)
+{
+ char *var;
+ char *val;
+ char *domain;
+ char *expires;
+ char *path;
+
+ char *tok_cntx;
+ char *cookie;
+
+ if (s) {
+ var = apr_strtok(s, ":", &tok_cntx);
+ val = apr_strtok(NULL, ":", &tok_cntx);
+ domain = apr_strtok(NULL, ":", &tok_cntx);
+ /** the line below won't hit the token ever **/
+ expires = apr_strtok(NULL, ":", &tok_cntx);
+ if (expires) {
+ path = apr_strtok(NULL,":", &tok_cntx);
+ }
+ else {
+ path = NULL;
+ }
+
+ if (var && val && domain) {
+ /* FIX: use cached time similar to how logging does it */
+ request_rec *rmain = r;
+ char *notename;
+ void *data;
+ while (rmain->main) {
+ rmain = rmain->main;
+ }
+
+ notename = apr_pstrcat(rmain->pool, var, "_rewrite", NULL);
+ apr_pool_userdata_get(&data, notename, rmain->pool);
+ if (data == NULL) {
+ cookie = apr_pstrcat(rmain->pool,
+ var, "=", val,
+ "; path=", (path)? path : "/",
+ "; domain=", domain,
+ (expires)? "; expires=" : NULL,
+ (expires)?
+ ap_ht_time(r->pool,
+ r->request_time +
+ apr_time_from_sec((60 *
+ atol(expires))),
+ "%a, %d-%b-%Y %T GMT", 1)
+ : NULL,
+ NULL);
+ /*
+ * XXX: should we add it to err_headers_out as well ?
+ * if we do we need to be careful that only ONE gets sent out
+ */
+ apr_table_add(rmain->err_headers_out, "Set-Cookie", cookie);
+ apr_pool_userdata_set("set", notename, NULL, rmain->pool);
+ rewritelog(rmain, 5, "setting cookie '%s'", cookie);
+ }
+ else {
+ rewritelog(rmain, 5, "skipping already set cookie '%s'", var);
+ }
+ }
+ }
+}
+
+
+/*
+**
+** check that a subrequest won't cause infinite recursion
+**
+*/
+
+static int subreq_ok(request_rec *r)
+{
+ /*
+ * either not in a subrequest, or in a subrequest
+ * and URIs aren't NULL and sub/main URIs differ
+ */
+ return (r->main == NULL
+ || (r->main->uri != NULL
+ && r->uri != NULL
+ && strcmp(r->main->uri, r->uri) != 0));
+}
+
+
+/*
+**
+** stat() for only the prefix of a path
+**
+*/
+
+static int prefix_stat(const char *path, apr_pool_t *pool)
+{
+ const char *curpath = path;
+ const char *root;
+ const char *slash;
+ char *statpath;
+ apr_status_t rv;
+
+ rv = apr_filepath_root(&root, &curpath, APR_FILEPATH_TRUENAME, pool);
+
+ if (rv != APR_SUCCESS) {
+ return 0;
+ }
+
+ /* let's recognize slashes only, the mod_rewrite semantics are opaque
+ * enough.
+ */
+ if ((slash = ap_strchr_c(curpath, '/')) != NULL) {
+ rv = apr_filepath_merge(&statpath, root,
+ apr_pstrndup(pool, curpath,
+ (apr_size_t)(slash - curpath)),
+ APR_FILEPATH_NOTABOVEROOT |
+ APR_FILEPATH_NOTRELATIVE, pool);
+ }
+ else {
+ rv = apr_filepath_merge(&statpath, root, curpath,
+ APR_FILEPATH_NOTABOVEROOT |
+ APR_FILEPATH_NOTRELATIVE, pool);
+ }
+
+ if (rv == APR_SUCCESS) {
+ apr_finfo_t sb;
+
+ if (apr_stat(&sb, statpath, APR_FINFO_MIN, pool) == APR_SUCCESS) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+**
+** Lexicographic Compare
+**
+*/
+
+static int compare_lexicography(char *cpNum1, char *cpNum2)
+{
+ int i;
+ int n1, n2;
+
+ n1 = strlen(cpNum1);
+ n2 = strlen(cpNum2);
+ if (n1 > n2) {
+ return 1;
+ }
+ if (n1 < n2) {
+ return -1;
+ }
+ for (i = 0; i < n1; i++) {
+ if (cpNum1[i] > cpNum2[i]) {
+ return 1;
+ }
+ if (cpNum1[i] < cpNum2[i]) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+**
+** Bracketed expression handling
+** s points after the opening bracket
+**
+*/
+
+static char *find_closing_bracket(char *s, int left, int right)
+{
+ int depth;
+
+ for (depth = 1; *s; ++s) {
+ if (*s == right && --depth == 0) {
+ return s;
+ }
+ else if (*s == left) {
+ ++depth;
+ }
+ }
+ return NULL;
+}
+
+static char *find_char_in_brackets(char *s, int c, int left, int right)
+{
+ int depth;
+
+ for (depth = 1; *s; ++s) {
+ if (*s == c && depth == 1) {
+ return s;
+ }
+ else if (*s == right && --depth == 0) {
+ return NULL;
+ }
+ else if (*s == left) {
+ ++depth;
+ }
+ }
+ return NULL;
+}
+
+/*
+**
+** Module paraphernalia
+**
+*/
+
+ /* the apr_table_t of commands we provide */
+static const command_rec command_table[] = {
+ AP_INIT_FLAG( "RewriteEngine", cmd_rewriteengine, NULL, OR_FILEINFO,
+ "On or Off to enable or disable (default) the whole "
+ "rewriting engine"),
+ AP_INIT_ITERATE( "RewriteOptions", cmd_rewriteoptions, NULL, OR_FILEINFO,
+ "List of option strings to set"),
+ AP_INIT_TAKE1( "RewriteBase", cmd_rewritebase, NULL, OR_FILEINFO,
+ "the base URL of the per-directory context"),
+ AP_INIT_RAW_ARGS("RewriteCond", cmd_rewritecond, NULL, OR_FILEINFO,
+ "an input string and a to be applied regexp-pattern"),
+ AP_INIT_RAW_ARGS("RewriteRule", cmd_rewriterule, NULL, OR_FILEINFO,
+ "an URL-applied regexp-pattern and a substitution URL"),
+ AP_INIT_TAKE2( "RewriteMap", cmd_rewritemap, NULL, RSRC_CONF,
+ "a mapname and a filename"),
+ AP_INIT_TAKE1( "RewriteLock", cmd_rewritelock, NULL, RSRC_CONF,
+ "the filename of a lockfile used for inter-process "
+ "synchronization"),
+ AP_INIT_TAKE1( "RewriteLog", cmd_rewritelog, NULL, RSRC_CONF,
+ "the filename of the rewriting logfile"),
+ AP_INIT_TAKE1( "RewriteLogLevel", cmd_rewriteloglevel, NULL, RSRC_CONF,
+ "the level of the rewriting logfile verbosity "
+ "(0=none, 1=std, .., 9=max)"),
+ { NULL }
+};
+
+static void register_hooks(apr_pool_t *p)
+{
+ /* fixup after mod_proxy, so that the proxied url will not
+ * escaped accidentally by mod_proxy's fixup.
+ */
+ static const char * const aszPre[]={ "mod_proxy.c", NULL };
+
+ /* check type before mod_mime, so that [T=foo/bar] will not be
+ * overridden by AddType definitions.
+ */
+ static const char * const ct_aszSucc[]={ "mod_mime.c", NULL };
+
+ APR_REGISTER_OPTIONAL_FN(ap_register_rewrite_mapfunc);
+
+ ap_hook_handler(handler_redirect, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_pre_config(pre_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_config(post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_child_init(init_child, NULL, NULL, APR_HOOK_MIDDLE);
+
+ ap_hook_fixups(hook_fixup, aszPre, NULL, APR_HOOK_FIRST);
+ ap_hook_translate_name(hook_uri2file, NULL, NULL, APR_HOOK_FIRST);
+ ap_hook_type_checker(hook_mimetype, NULL, ct_aszSucc, APR_HOOK_MIDDLE);
+}
+
+ /* the main config structure */
+module AP_MODULE_DECLARE_DATA rewrite_module = {
+ STANDARD20_MODULE_STUFF,
+ config_perdir_create, /* create per-dir config structures */
+ config_perdir_merge, /* merge per-dir config structures */
+ config_server_create, /* create per-server config structures */
+ config_server_merge, /* merge per-server config structures */
+ command_table, /* table of config file commands */
+ register_hooks /* register hooks */
+};
+
+/*EOF*/
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.dsp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.dsp
new file mode 100644
index 00000000..9f8bee18
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_rewrite" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_rewrite - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_rewrite.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_rewrite.mak" CFG="mod_rewrite - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_rewrite - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_rewrite - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_rewrite - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_rewrite_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_rewrite.so" /base:@..\..\os\win32\BaseAddr.ref,mod_rewrite.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_rewrite.so" /base:@..\..\os\win32\BaseAddr.ref,mod_rewrite.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_rewrite - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_rewrite_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_rewrite.so" /base:@..\..\os\win32\BaseAddr.ref,mod_rewrite.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_rewrite.so" /base:@..\..\os\win32\BaseAddr.ref,mod_rewrite.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_rewrite - Win32 Release"
+# Name "mod_rewrite - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_rewrite.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_rewrite.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_rewrite - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_rewrite.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_rewrite.so "rewrite_module for Apache" ../../include/ap_release.h > .\mod_rewrite.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_rewrite - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_rewrite.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_rewrite.so "rewrite_module for Apache" ../../include/ap_release.h > .\mod_rewrite.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.exp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.exp
new file mode 100644
index 00000000..8f2165bf
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.exp
@@ -0,0 +1 @@
+rewrite_module
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.h b/rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.h
new file mode 100644
index 00000000..e648da98
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_rewrite.h
@@ -0,0 +1,446 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MOD_REWRITE_H
+#define MOD_REWRITE_H 1
+
+/*
+** _ _ _
+** _ __ ___ ___ __| | _ __ _____ ___ __(_) |_ ___
+** | '_ ` _ \ / _ \ / _` | | '__/ _ \ \ /\ / / '__| | __/ _ \
+** | | | | | | (_) | (_| | | | | __/\ V V /| | | | || __/
+** |_| |_| |_|\___/ \__,_|___|_| \___| \_/\_/ |_| |_|\__\___|
+** |_____|
+**
+** URL Rewriting Module
+**
+** This module uses a rule-based rewriting engine (based on a
+** regular-expression parser) to rewrite requested URLs on the fly.
+**
+** It supports an unlimited number of additional rule conditions (which can
+** operate on a lot of variables, even on HTTP headers) for granular
+** matching and even external database lookups (either via plain text
+** tables, DBM hash files or even external processes) for advanced URL
+** substitution.
+**
+** It operates on the full URLs (including the PATH_INFO part) both in
+** per-server context (httpd.conf) and per-dir context (.htaccess) and even
+** can generate QUERY_STRING parts on result. The rewriting result finally
+** can lead to internal subprocessing, external request redirection or even
+** to internal proxy throughput.
+**
+** This module was originally written in April 1996 and
+** gifted exclusively to the The Apache Software Foundation in July 1997 by
+**
+** Ralf S. Engelschall
+** rse@engelschall.com
+** www.engelschall.com
+*/
+
+#include "apr.h"
+
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+ /* Include from the underlaying Unix system ... */
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#if APR_HAVE_CTYPE_H
+#include <ctype.h>
+#endif
+#if APR_HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#if APR_HAS_THREADS
+#include "apr_thread_mutex.h"
+#endif
+#include "apr_optional.h"
+#include "apr_dbm.h"
+#include "ap_config.h"
+
+ /* Include from the Apache server ... */
+#define CORE_PRIVATE
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_vhost.h"
+
+ /*
+ * The key in the r->notes apr_table_t wherein we store our accumulated
+ * Vary values, and the one used for per-condition checks in a chain.
+ */
+#define VARY_KEY "rewrite-Vary"
+#define VARY_KEY_THIS "rewrite-Vary-this"
+
+/*
+**
+** Some defines
+**
+*/
+
+#define ENVVAR_SCRIPT_URL "SCRIPT_URL"
+#define ENVVAR_SCRIPT_URI "SCRIPT_URI"
+
+#define REWRITE_FORCED_MIMETYPE_NOTEVAR "rewrite-forced-mimetype"
+
+#define CONDFLAG_NONE 1<<0
+#define CONDFLAG_NOCASE 1<<1
+#define CONDFLAG_NOTMATCH 1<<2
+#define CONDFLAG_ORNEXT 1<<3
+
+#define RULEFLAG_NONE 1<<0
+#define RULEFLAG_FORCEREDIRECT 1<<1
+#define RULEFLAG_LASTRULE 1<<2
+#define RULEFLAG_NEWROUND 1<<3
+#define RULEFLAG_CHAIN 1<<4
+#define RULEFLAG_IGNOREONSUBREQ 1<<5
+#define RULEFLAG_NOTMATCH 1<<6
+#define RULEFLAG_PROXY 1<<7
+#define RULEFLAG_PASSTHROUGH 1<<8
+#define RULEFLAG_FORBIDDEN 1<<9
+#define RULEFLAG_GONE 1<<10
+#define RULEFLAG_QSAPPEND 1<<11
+#define RULEFLAG_NOCASE 1<<12
+#define RULEFLAG_NOESCAPE 1<<13
+
+#define ACTION_NORMAL 1<<0
+#define ACTION_NOESCAPE 1<<1
+
+#define MAPTYPE_TXT 1<<0
+#define MAPTYPE_DBM 1<<1
+#define MAPTYPE_PRG 1<<2
+#define MAPTYPE_INT 1<<3
+#define MAPTYPE_RND 1<<4
+
+#define ENGINE_DISABLED 1<<0
+#define ENGINE_ENABLED 1<<1
+
+#define OPTION_NONE 1<<0
+#define OPTION_INHERIT 1<<1
+
+#define CACHEMODE_TS 1<<0
+#define CACHEMODE_TTL 1<<1
+
+#define CACHE_TLB_ROWS 1024
+#define CACHE_TLB_COLS 4
+
+#ifndef FALSE
+#define FALSE 0
+#define TRUE !FALSE
+#endif
+
+#ifndef NO
+#define NO FALSE
+#define YES TRUE
+#endif
+
+#ifndef RAND_MAX
+#define RAND_MAX 32767
+#endif
+
+#ifndef LONG_STRING_LEN
+#define LONG_STRING_LEN 2048
+#endif
+
+#define MAX_ENV_FLAGS 15
+#define MAX_COOKIE_FLAGS 15
+/*** max cookie size in rfc 2109 ***/
+#define MAX_COOKIE_LEN 4096
+
+/* default maximum number of internal redirects */
+#define REWRITE_REDIRECT_LIMIT 10
+
+
+/*
+**
+** our private data structures we handle with
+**
+*/
+
+ /* the list structures for holding the mapfile information
+ * and the rewrite rules
+ */
+typedef struct {
+ const char *name; /* the name of the map */
+ const char *datafile; /* filename for map data files */
+ const char *dbmtype; /* dbm type for dbm map data files */
+ const char *checkfile; /* filename to check for map existence */
+ int type; /* the type of the map */
+ apr_file_t *fpin; /* in file pointer for program maps */
+ apr_file_t *fpout; /* out file pointer for program maps */
+ apr_file_t *fperr; /* err file pointer for program maps */
+ char *(*func)(request_rec *, /* function pointer for internal maps */
+ char *);
+ char **argv;
+ char *cachename; /* name for the cache */
+} rewritemap_entry;
+
+typedef struct {
+ char *input; /* Input string of RewriteCond */
+ char *pattern; /* the RegExp pattern string */
+ regex_t *regexp;
+ int flags; /* Flags which control the match */
+} rewritecond_entry;
+
+typedef struct {
+ apr_array_header_t *rewriteconds; /* the corresponding RewriteCond entries */
+ char *pattern; /* the RegExp pattern string */
+ regex_t *regexp; /* the RegExp pattern compilation */
+ char *output; /* the Substitution string */
+ int flags; /* Flags which control the substitution */
+ char *forced_mimetype; /* forced MIME type of substitution */
+ int forced_responsecode; /* forced HTTP redirect response status */
+ char *env[MAX_ENV_FLAGS+1]; /* added environment variables */
+ char *cookie[MAX_COOKIE_FLAGS+1]; /* added cookies */
+ int skip; /* number of next rules to skip */
+} rewriterule_entry;
+
+
+ /* the per-server or per-virtual-server configuration
+ * statically generated once on startup for every server
+ */
+typedef struct {
+ int state; /* the RewriteEngine state */
+ int options; /* the RewriteOption state */
+ const char *rewritelogfile; /* the RewriteLog filename */
+ apr_file_t *rewritelogfp; /* the RewriteLog open filepointer */
+ int rewriteloglevel; /* the RewriteLog level of verbosity */
+ apr_array_header_t *rewritemaps; /* the RewriteMap entries */
+ apr_array_header_t *rewriteconds; /* the RewriteCond entries (temporary) */
+ apr_array_header_t *rewriterules; /* the RewriteRule entries */
+ server_rec *server; /* the corresponding server indicator */
+ int redirect_limit; /* maximum number of internal redirects */
+} rewrite_server_conf;
+
+
+ /* the per-directory configuration
+ * generated on-the-fly by Apache server for current request
+ */
+typedef struct {
+ int state; /* the RewriteEngine state */
+ int options; /* the RewriteOption state */
+ apr_array_header_t *rewriteconds; /* the RewriteCond entries (temporary) */
+ apr_array_header_t *rewriterules; /* the RewriteRule entries */
+ char *directory; /* the directory where it applies */
+ const char *baseurl; /* the base-URL where it applies */
+ int redirect_limit; /* maximum number of internal redirects */
+} rewrite_perdir_conf;
+
+
+ /* the per-request configuration
+ */
+typedef struct {
+ int redirects; /* current number of redirects */
+ int redirect_limit; /* maximum number of redirects */
+} rewrite_request_conf;
+
+
+ /* the cache structures,
+ * a 4-way hash apr_table_t with LRU functionality
+ */
+typedef struct cacheentry {
+ apr_time_t time;
+ char *key;
+ char *value;
+} cacheentry;
+
+typedef struct tlbentry {
+ int t[CACHE_TLB_COLS];
+} cachetlbentry;
+
+typedef struct cachelist {
+ char *resource;
+ apr_array_header_t *entries;
+ apr_array_header_t *tlb;
+} cachelist;
+
+typedef struct cache {
+ apr_pool_t *pool;
+ apr_array_header_t *lists;
+#if APR_HAS_THREADS
+ apr_thread_mutex_t *lock;
+#endif
+} cache;
+
+
+ /* the regex structure for the
+ * substitution of backreferences
+ */
+typedef struct backrefinfo {
+ char *source;
+ int nsub;
+ regmatch_t regmatch[AP_MAX_REG_MATCH];
+} backrefinfo;
+
+
+/*
+**
+** forward declarations
+**
+*/
+
+ /* config structure handling */
+static void *config_server_create(apr_pool_t *p, server_rec *s);
+static void *config_server_merge (apr_pool_t *p, void *basev, void *overridesv);
+static void *config_perdir_create(apr_pool_t *p, char *path);
+static void *config_perdir_merge (apr_pool_t *p, void *basev, void *overridesv);
+
+ /* config directive handling */
+static const char *cmd_rewriteengine(cmd_parms *cmd,
+ void *dconf, int flag);
+static const char *cmd_rewriteoptions(cmd_parms *cmd,
+ void *dconf,
+ const char *option);
+static const char *cmd_rewritelog (cmd_parms *cmd, void *dconf, const char *a1);
+static const char *cmd_rewriteloglevel(cmd_parms *cmd, void *dconf, const char *a1);
+static const char *cmd_rewritemap (cmd_parms *cmd, void *dconf,
+ const char *a1, const char *a2);
+static const char *cmd_rewritelock(cmd_parms *cmd, void *dconf, const char *a1);
+static const char *cmd_rewritebase(cmd_parms *cmd, void *dconf,
+ const char *a1);
+static const char *cmd_rewritecond(cmd_parms *cmd, void *dconf,
+ const char *str);
+static const char *cmd_rewritecond_parseflagfield(apr_pool_t *p,
+ rewritecond_entry *new,
+ char *str);
+static const char *cmd_rewritecond_setflag(apr_pool_t *p, rewritecond_entry *cfg,
+ char *key, char *val);
+static const char *cmd_rewriterule(cmd_parms *cmd, void *dconf,
+ const char *str);
+static const char *cmd_rewriterule_parseflagfield(apr_pool_t *p,
+ rewriterule_entry *new,
+ char *str);
+static const char *cmd_rewriterule_setflag(apr_pool_t *p, rewriterule_entry *cfg,
+ char *key, char *val);
+
+ /* initialisation */
+static int pre_config(apr_pool_t *pconf,
+ apr_pool_t *plog,
+ apr_pool_t *ptemp);
+static int post_config(apr_pool_t *pconf,
+ apr_pool_t *plog,
+ apr_pool_t *ptemp,
+ server_rec *s);
+static void init_child(apr_pool_t *p, server_rec *s);
+
+ /* runtime hooks */
+static int hook_uri2file (request_rec *r);
+static int hook_mimetype (request_rec *r);
+static int hook_fixup (request_rec *r);
+static int handler_redirect(request_rec *r);
+
+ /* rewriting engine */
+static int apply_rewrite_list(request_rec *r, apr_array_header_t *rewriterules,
+ char *perdir);
+static int apply_rewrite_rule(request_rec *r, rewriterule_entry *p,
+ char *perdir);
+static int apply_rewrite_cond(request_rec *r, rewritecond_entry *p,
+ char *perdir, backrefinfo *briRR,
+ backrefinfo *briRC);
+
+static void do_expand(request_rec *r, char *input, char *buffer, int nbuf,
+ backrefinfo *briRR, backrefinfo *briRC);
+static void do_expand_env(request_rec *r, char *env[],
+ backrefinfo *briRR, backrefinfo *briRC);
+static void do_expand_cookie(request_rec *r, char *cookie[],
+ backrefinfo *briRR, backrefinfo *briRC);
+
+ /* URI transformation function */
+static void splitout_queryargs(request_rec *r, int qsappend);
+static void fully_qualify_uri(request_rec *r);
+static void reduce_uri(request_rec *r);
+static unsigned is_absolute_uri(char *uri);
+static char *escape_absolute_uri(apr_pool_t *p, char *uri, unsigned scheme);
+static char *expand_tildepaths(request_rec *r, char *uri);
+
+ /* rewrite map support functions */
+static char *lookup_map(request_rec *r, char *name, char *key);
+static char *lookup_map_txtfile(request_rec *r, const char *file, char *key);
+static char *lookup_map_dbmfile(request_rec *r, const char *file,
+ const char *dbmtype, char *key);
+static char *lookup_map_program(request_rec *r, apr_file_t *fpin,
+ apr_file_t *fpout, char *key);
+
+typedef char *(rewrite_mapfunc_t)(request_rec *r, char *key);
+static void ap_register_rewrite_mapfunc(char *name, rewrite_mapfunc_t *func);
+APR_DECLARE_OPTIONAL_FN(void, ap_register_rewrite_mapfunc,
+ (char *name, rewrite_mapfunc_t *func));
+
+static char *rewrite_mapfunc_toupper(request_rec *r, char *key);
+static char *rewrite_mapfunc_tolower(request_rec *r, char *key);
+static char *rewrite_mapfunc_escape(request_rec *r, char *key);
+static char *rewrite_mapfunc_unescape(request_rec *r, char *key);
+
+static char *select_random_value_part(request_rec *r, char *value);
+static void rewrite_rand_init(void);
+static int rewrite_rand(int l, int h);
+
+ /* rewriting logfile support */
+static int open_rewritelog(server_rec *s, apr_pool_t *p);
+static void rewritelog(request_rec *r, int level, const char *text, ...)
+ __attribute__((format(printf,3,4)));
+static char *current_logtime(request_rec *r);
+
+ /* rewriting lockfile support */
+static apr_status_t rewritelock_create(server_rec *s, apr_pool_t *p);
+static apr_status_t rewritelock_remove(void *data);
+
+ /* program map support */
+static apr_status_t run_rewritemap_programs(server_rec *s, apr_pool_t *p);
+static apr_status_t rewritemap_program_child(apr_pool_t *p,
+ const char *progname, char **argv,
+ apr_file_t **fpout,
+ apr_file_t **fpin);
+
+ /* env variable support */
+static char *lookup_variable(request_rec *r, char *var);
+static char *lookup_header(request_rec *r, const char *name);
+
+ /* caching functions */
+static cache *init_cache(apr_pool_t *p);
+static char *get_cache_string(cache *c, const char *res, int mode, apr_time_t mtime,
+ char *key);
+static void set_cache_string(cache *c, const char *res, int mode, apr_time_t mtime,
+ char *key, char *value);
+static cacheentry *retrieve_cache_string(cache *c, const char *res, char *key);
+static void store_cache_string(cache *c, const char *res, cacheentry *ce);
+
+ /* misc functions */
+static char *subst_prefix_path(request_rec *r, char *input, char *match,
+ const char *subst);
+static int parseargline(char *str, char **a1, char **a2, char **a3);
+static int prefix_stat(const char *path, apr_pool_t *pool);
+static void add_env_variable(request_rec *r, char *s);
+static void add_cookie(request_rec *r, char *s);
+static int subreq_ok(request_rec *r);
+static int is_redirect_limit_exceeded(request_rec *r);
+
+ /* Lexicographic Comparison */
+static int compare_lexicography(char *cpNum1, char *cpNum2);
+
+ /* Bracketed expression handling */
+static char *find_closing_bracket(char *s, int left, int right);
+static char *find_char_in_brackets(char *s, int c, int left, int right);
+
+#endif /* MOD_REWRITE_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_so.c b/rubbos/app/httpd-2.0.64/modules/mappers/mod_so.c
new file mode 100644
index 00000000..984cb549
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_so.c
@@ -0,0 +1,368 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This module is used to load Apache modules at runtime. This means that the
+ * server functionality can be extended without recompiling and even without
+ * taking the server down at all. Only a HUP or AP_SIG_GRACEFUL signal
+ * needs to be sent to the server to reload the dynamically loaded modules.
+ *
+ * To use, you'll first need to build your module as a shared library, then
+ * update your configuration (httpd.conf) to get the Apache core to load the
+ * module at start-up.
+ *
+ * The easiest way to build a module as a shared library is to use the
+ * `SharedModule' command in the Configuration file, instead of `AddModule'.
+ * You should also change the file extension from `.o' to `.so'. So, for
+ * example, to build the status module as a shared library edit Configuration
+ * and change
+ * AddModule modules/standard/mod_status.o
+ * to
+ * SharedModule modules/standard/mod_status.so
+ *
+ * Run Configure and make. Now Apache's httpd binary will _not_ include
+ * mod_status. Instead a shared object called mod_status.so will be build, in
+ * the modules/standard directory. You can build most of the modules as shared
+ * libraries like this.
+ *
+ * To use the shared module, move the .so file(s) into an appropriate
+ * directory. You might like to create a directory called "modules" under you
+ * server root for this (e.g. /usr/local/httpd/modules).
+ *
+ * Then edit your conf/httpd.conf file, and add LoadModule lines. For
+ * example
+ * LoadModule status_module modules/mod_status.so
+ *
+ * The first argument is the module's structure name (look at the end of the
+ * module source to find this). The second option is the path to the module
+ * file, relative to the server root. Put these directives right at the top
+ * of your httpd.conf file.
+ *
+ * Now you can start Apache. A message will be logged at "debug" level to your
+ * error_log to confirm that the module(s) are loaded (use "LogLevel debug"
+ * directive to get these log messages).
+ *
+ * If you edit the LoadModule directives while the server is live you can get
+ * Apache to re-load the modules by sending it a HUP or AP_SIG_GRACEFUL
+ * signal as normal. You can use this to dynamically change the capability
+ * of your server without bringing it down.
+ *
+ * Because currently there is only limited builtin support in the Configure
+ * script for creating the shared library files (`.so'), please consult your
+ * vendors cc(1), ld(1) and dlopen(3) manpages to find out the appropriate
+ * compiler and linker flags and insert them manually into the Configuration
+ * file under CFLAGS_SHLIB, LDFLAGS_SHLIB and LDFLAGS_SHLIB_EXPORT.
+ *
+ * If you still have problems figuring out the flags both try the paper
+ * http://developer.netscape.com/library/documentation/enterprise
+ * /unix/svrplug.htm#1013807
+ * or install a Perl 5 interpreter on your platform and then run the command
+ *
+ * $ perl -V:usedl -V:ccdlflags -V:cccdlflags -V:lddlflags
+ *
+ * This gives you what type of dynamic loading Perl 5 uses on your platform
+ * and which compiler and linker flags Perl 5 uses to create the shared object
+ * files.
+ *
+ * Another location where you can find useful hints is the `ltconfig' script
+ * of the GNU libtool 1.2 package. Search for your platform name inside the
+ * various "case" constructs.
+ *
+ */
+
+#include "apr.h"
+#include "apr_dso.h"
+#include "apr_strings.h"
+#include "apr_errno.h"
+
+#define CORE_PRIVATE
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "ap_config.h"
+
+module AP_MODULE_DECLARE_DATA so_module;
+
+
+/*
+ * Server configuration to keep track of actually
+ * loaded modules and the corresponding module name.
+ */
+
+typedef struct moduleinfo {
+ const char *name;
+ module *modp;
+} moduleinfo;
+
+typedef struct so_server_conf {
+ apr_array_header_t *loaded_modules;
+} so_server_conf;
+
+static void *so_sconf_create(apr_pool_t *p, server_rec *s)
+{
+ so_server_conf *soc;
+
+ soc = (so_server_conf *)apr_pcalloc(p, sizeof(so_server_conf));
+ soc->loaded_modules = apr_array_make(p, DYNAMIC_MODULE_LIMIT,
+ sizeof(moduleinfo));
+
+ return (void *)soc;
+}
+
+#ifndef NO_DLOPEN
+
+/*
+ * This is the cleanup for a loaded shared object. It unloads the module.
+ * This is called as a cleanup function from the core.
+ */
+
+static apr_status_t unload_module(void *data)
+{
+ moduleinfo *modi = (moduleinfo*)data;
+
+ /* only unload if module information is still existing */
+ if (modi->modp == NULL)
+ return APR_SUCCESS;
+
+ /* remove the module pointer from the core structure */
+ ap_remove_loaded_module(modi->modp);
+
+ /* destroy the module information */
+ modi->modp = NULL;
+ modi->name = NULL;
+ return APR_SUCCESS;
+}
+
+/*
+ * This is called for the directive LoadModule and actually loads
+ * a shared object file into the address space of the server process.
+ */
+
+static const char *load_module(cmd_parms *cmd, void *dummy,
+ const char *modname, const char *filename)
+{
+ apr_dso_handle_t *modhandle;
+ apr_dso_handle_sym_t modsym;
+ module *modp;
+ const char *szModuleFile = ap_server_root_relative(cmd->pool, filename);
+ so_server_conf *sconf;
+ moduleinfo *modi;
+ moduleinfo *modie;
+ int i;
+
+ /* we need to setup this value for dummy to make sure that we don't try
+ * to add a non-existant tree into the build when we return to
+ * execute_now.
+ */
+ *(ap_directive_t **)dummy = NULL;
+
+ if (!szModuleFile) {
+ return apr_pstrcat(cmd->pool, "Invalid LoadModule path ",
+ filename, NULL);
+ }
+
+ /*
+ * check for already existing module
+ * If it already exists, we have nothing to do
+ * Check both dynamically-loaded modules and statically-linked modules.
+ */
+ sconf = (so_server_conf *)ap_get_module_config(cmd->server->module_config,
+ &so_module);
+ modie = (moduleinfo *)sconf->loaded_modules->elts;
+ for (i = 0; i < sconf->loaded_modules->nelts; i++) {
+ modi = &modie[i];
+ if (modi->name != NULL && strcmp(modi->name, modname) == 0) {
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0,
+ cmd->pool, "module %s is already loaded, skipping",
+ modname);
+ return NULL;
+ }
+ }
+
+ for (i = 0; ap_preloaded_modules[i]; i++) {
+ const char *preload_name;
+ apr_size_t preload_len;
+ apr_size_t thismod_len;
+
+ modp = ap_preloaded_modules[i];
+
+ /* make sure we're comparing apples with apples
+ * make sure name of preloaded module is mod_FOO.c
+ * make sure name of structure being loaded is FOO_module
+ */
+
+ if (memcmp(modp->name, "mod_", 4)) {
+ continue;
+ }
+
+ preload_name = modp->name + strlen("mod_");
+ preload_len = strlen(preload_name) - 2;
+
+ if (strlen(modname) <= strlen("_module")) {
+ continue;
+ }
+ thismod_len = strlen(modname) - strlen("_module");
+ if (strcmp(modname + thismod_len, "_module")) {
+ continue;
+ }
+
+ if (thismod_len != preload_len) {
+ continue;
+ }
+
+ if (!memcmp(modname, preload_name, preload_len)) {
+ return apr_pstrcat(cmd->pool, "module ", modname,
+ " is built-in and can't be loaded",
+ NULL);
+ }
+ }
+
+ modi = apr_array_push(sconf->loaded_modules);
+ modi->name = modname;
+
+ /*
+ * Load the file into the Apache address space
+ */
+ if (apr_dso_load(&modhandle, szModuleFile, cmd->pool) != APR_SUCCESS) {
+ char my_error[256];
+
+ return apr_pstrcat(cmd->pool, "Cannot load ", szModuleFile,
+ " into server: ",
+ apr_dso_error(modhandle, my_error, sizeof(my_error)),
+ NULL);
+ }
+ ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, cmd->pool,
+ "loaded module %s", modname);
+
+ /*
+ * Retrieve the pointer to the module structure through the module name:
+ * First with the hidden variant (prefix `AP_') and then with the plain
+ * symbol name.
+ */
+ if (apr_dso_sym(&modsym, modhandle, modname) != APR_SUCCESS) {
+ char my_error[256];
+
+ return apr_pstrcat(cmd->pool, "Can't locate API module structure `",
+ modname, "' in file ", szModuleFile, ": ",
+ apr_dso_error(modhandle, my_error, sizeof(my_error)),
+ NULL);
+ }
+ modp = (module*) modsym;
+ modp->dynamic_load_handle = (apr_dso_handle_t *)modhandle;
+ modi->modp = modp;
+
+ /*
+ * Make sure the found module structure is really a module structure
+ *
+ */
+ if (modp->magic != MODULE_MAGIC_COOKIE) {
+ return apr_psprintf(cmd->pool, "API module structure '%s' in file %s "
+ "is garbled - expected signature %08lx but saw "
+ "%08lx - perhaps this is not an Apache module DSO, "
+ "or was compiled for a different Apache version?",
+ modname, szModuleFile,
+ MODULE_MAGIC_COOKIE, modp->magic);
+ }
+
+ /*
+ * Add this module to the Apache core structures
+ */
+ ap_add_loaded_module(modp, cmd->pool);
+
+ /*
+ * Register a cleanup in the config apr_pool_t (normally pconf). When
+ * we do a restart (or shutdown) this cleanup will cause the
+ * shared object to be unloaded.
+ */
+ apr_pool_cleanup_register(cmd->pool, modi, unload_module, apr_pool_cleanup_null);
+
+ /*
+ * Finally we need to run the configuration process for the module
+ */
+ ap_single_module_configure(cmd->pool, cmd->server, modp);
+
+ return NULL;
+}
+
+/*
+ * This implements the LoadFile directive and loads an arbitrary
+ * shared object file into the adress space of the server process.
+ */
+
+static const char *load_file(cmd_parms *cmd, void *dummy, const char *filename)
+{
+ apr_dso_handle_t *handle;
+ const char *file;
+
+ file = ap_server_root_relative(cmd->pool, filename);
+
+ if (!file) {
+ return apr_pstrcat(cmd->pool, "Invalid LoadFile path ",
+ filename, NULL);
+ }
+
+ if (apr_dso_load(&handle, file, cmd->pool) != APR_SUCCESS) {
+ char my_error[256];
+
+ return apr_pstrcat(cmd->pool, "Cannot load ", filename,
+ " into server: ",
+ apr_dso_error(handle, my_error, sizeof(my_error)),
+ NULL);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL,
+ "loaded file %s", filename);
+
+ return NULL;
+}
+
+#else /* not NO_DLOPEN */
+
+static const char *load_file(cmd_parms *cmd, void *dummy, const char *filename)
+{
+ ap_log_perror(APLOG_MARK, APLOG_STARTUP, 0, cmd->pool,
+ "WARNING: LoadFile not supported on this platform");
+ return NULL;
+}
+
+static const char *load_module(cmd_parms *cmd, void *dummy,
+ const char *modname, const char *filename)
+{
+ ap_log_perror(APLOG_MARK, APLOG_STARTUP, 0, cmd->pool,
+ "WARNING: LoadModule not supported on this platform");
+ return NULL;
+}
+
+#endif /* NO_DLOPEN */
+
+static const command_rec so_cmds[] = {
+ AP_INIT_TAKE2("LoadModule", load_module, NULL, RSRC_CONF | EXEC_ON_READ,
+ "a module name and the name of a shared object file to load it from"),
+ AP_INIT_ITERATE("LoadFile", load_file, NULL, RSRC_CONF | EXEC_ON_READ,
+ "shared object file or library to load into the server at runtime"),
+ { NULL }
+};
+
+module AP_MODULE_DECLARE_DATA so_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-dir config */
+ NULL, /* merge per-dir config */
+ so_sconf_create, /* server config */
+ NULL, /* merge server config */
+ so_cmds, /* command apr_table_t */
+ NULL /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_so.la b/rubbos/app/httpd-2.0.64/modules/mappers/mod_so.la
new file mode 100644
index 00000000..1c9e8489
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_so.la
@@ -0,0 +1,35 @@
+# mod_so.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_so.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_so.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_so.lo b/rubbos/app/httpd-2.0.64/modules/mappers/mod_so.lo
new file mode 100644
index 00000000..917d7775
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_so.lo
@@ -0,0 +1,12 @@
+# mod_so.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_so.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_so.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_so.o b/rubbos/app/httpd-2.0.64/modules/mappers/mod_so.o
new file mode 100644
index 00000000..71d442fa
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_so.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_speling.c b/rubbos/app/httpd-2.0.64/modules/mappers/mod_speling.c
new file mode 100644
index 00000000..9520cbee
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_speling.c
@@ -0,0 +1,532 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr.h"
+#include "apr_file_io.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#define WANT_BASENAME_MATCH
+
+#include "httpd.h"
+#include "http_core.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_log.h"
+
+/* mod_speling.c - by Alexei Kosut <akosut@organic.com> June, 1996
+ *
+ * This module is transparent, and simple. It attempts to correct
+ * misspellings of URLs that users might have entered, namely by checking
+ * capitalizations. If it finds a match, it sends a redirect.
+ *
+ * 08-Aug-1997 <Martin.Kraemer@Mch.SNI.De>
+ * o Upgraded module interface to apache_1.3a2-dev API (more NULL's in
+ * speling_module).
+ * o Integrated tcsh's "spelling correction" routine which allows one
+ * misspelling (character insertion/omission/typo/transposition).
+ * Rewrote it to ignore case as well. This ought to catch the majority
+ * of misspelled requests.
+ * o Commented out the second pass where files' suffixes are stripped.
+ * Given the better hit rate of the first pass, this rather ugly
+ * (request index.html, receive index.db ?!?!) solution can be
+ * omitted.
+ * o wrote a "kind of" html page for mod_speling
+ *
+ * Activate it with "CheckSpelling On"
+ */
+
+module AP_MODULE_DECLARE_DATA speling_module;
+
+typedef struct {
+ int enabled;
+} spconfig;
+
+/*
+ * Create a configuration specific to this module for a server or directory
+ * location, and fill it with the default settings.
+ *
+ * The API says that in the absence of a merge function, the record for the
+ * closest ancestor is used exclusively. That's what we want, so we don't
+ * bother to have such a function.
+ */
+
+static void *mkconfig(apr_pool_t *p)
+{
+ spconfig *cfg = apr_pcalloc(p, sizeof(spconfig));
+
+ cfg->enabled = 0;
+ return cfg;
+}
+
+/*
+ * Respond to a callback to create configuration record for a server or
+ * vhost environment.
+ */
+static void *create_mconfig_for_server(apr_pool_t *p, server_rec *s)
+{
+ return mkconfig(p);
+}
+
+/*
+ * Respond to a callback to create a config record for a specific directory.
+ */
+static void *create_mconfig_for_directory(apr_pool_t *p, char *dir)
+{
+ return mkconfig(p);
+}
+
+/*
+ * Handler for the CheckSpelling directive, which is FLAG.
+ */
+static const char *set_speling(cmd_parms *cmd, void *mconfig, int arg)
+{
+ spconfig *cfg = (spconfig *) mconfig;
+
+ cfg->enabled = arg;
+ return NULL;
+}
+
+/*
+ * Define the directives specific to this module. This structure is referenced
+ * later by the 'module' structure.
+ */
+static const command_rec speling_cmds[] =
+{
+ AP_INIT_FLAG("CheckSpelling", set_speling, NULL, OR_OPTIONS,
+ "whether or not to fix miscapitalized/misspelled requests"),
+ { NULL }
+};
+
+typedef enum {
+ SP_IDENTICAL = 0,
+ SP_MISCAPITALIZED = 1,
+ SP_TRANSPOSITION = 2,
+ SP_MISSINGCHAR = 3,
+ SP_EXTRACHAR = 4,
+ SP_SIMPLETYPO = 5,
+ SP_VERYDIFFERENT = 6
+} sp_reason;
+
+static const char *sp_reason_str[] =
+{
+ "identical",
+ "miscapitalized",
+ "transposed characters",
+ "character missing",
+ "extra character",
+ "mistyped character",
+ "common basename",
+};
+
+typedef struct {
+ const char *name;
+ sp_reason quality;
+} misspelled_file;
+
+/*
+ * spdist() is taken from Kernighan & Pike,
+ * _The_UNIX_Programming_Environment_
+ * and adapted somewhat to correspond better to psychological reality.
+ * (Note the changes to the return values)
+ *
+ * According to Pollock and Zamora, CACM April 1984 (V. 27, No. 4),
+ * page 363, the correct order for this is:
+ * OMISSION = TRANSPOSITION > INSERTION > SUBSTITUTION
+ * thus, it was exactly backwards in the old version. -- PWP
+ *
+ * This routine was taken out of tcsh's spelling correction code
+ * (tcsh-6.07.04) and re-converted to apache data types ("char" type
+ * instead of tcsh's NLS'ed "Char"). Plus it now ignores the case
+ * during comparisons, so is a "approximate strcasecmp()".
+ * NOTE that is still allows only _one_ real "typo",
+ * it does NOT try to correct multiple errors.
+ */
+
+static sp_reason spdist(const char *s, const char *t)
+{
+ for (; apr_tolower(*s) == apr_tolower(*t); t++, s++) {
+ if (*t == '\0') {
+ return SP_MISCAPITALIZED; /* exact match (sans case) */
+ }
+ }
+ if (*s) {
+ if (*t) {
+ if (s[1] && t[1] && apr_tolower(*s) == apr_tolower(t[1])
+ && apr_tolower(*t) == apr_tolower(s[1])
+ && strcasecmp(s + 2, t + 2) == 0) {
+ return SP_TRANSPOSITION; /* transposition */
+ }
+ if (strcasecmp(s + 1, t + 1) == 0) {
+ return SP_SIMPLETYPO; /* 1 char mismatch */
+ }
+ }
+ if (strcasecmp(s + 1, t) == 0) {
+ return SP_EXTRACHAR; /* extra character */
+ }
+ }
+ if (*t && strcasecmp(s, t + 1) == 0) {
+ return SP_MISSINGCHAR; /* missing character */
+ }
+ return SP_VERYDIFFERENT; /* distance too large to fix. */
+}
+
+static int sort_by_quality(const void *left, const void *rite)
+{
+ return (int) (((misspelled_file *) left)->quality)
+ - (int) (((misspelled_file *) rite)->quality);
+}
+
+static int check_speling(request_rec *r)
+{
+ spconfig *cfg;
+ char *good, *bad, *postgood, *url;
+ apr_finfo_t dirent;
+ int filoc, dotloc, urlen, pglen;
+ apr_array_header_t *candidates = NULL;
+ apr_dir_t *dir;
+
+ cfg = ap_get_module_config(r->per_dir_config, &speling_module);
+ if (!cfg->enabled) {
+ return DECLINED;
+ }
+
+ /* We only want to worry about GETs */
+ if (r->method_number != M_GET) {
+ return DECLINED;
+ }
+
+ /* We've already got a file of some kind or another */
+ if (r->finfo.filetype != 0) {
+ return DECLINED;
+ }
+
+ /* Not a file request */
+ if (r->proxyreq || !r->filename) {
+ return DECLINED;
+ }
+
+ /* This is a sub request - don't mess with it */
+ if (r->main) {
+ return DECLINED;
+ }
+
+ /*
+ * The request should end up looking like this:
+ * r->uri: /correct-url/mispelling/more
+ * r->filename: /correct-file/mispelling r->path_info: /more
+ *
+ * So we do this in steps. First break r->filename into two pieces
+ */
+
+ filoc = ap_rind(r->filename, '/');
+ /*
+ * Don't do anything if the request doesn't contain a slash, or
+ * requests "/"
+ */
+ if (filoc == -1 || strcmp(r->uri, "/") == 0) {
+ return DECLINED;
+ }
+
+ /* good = /correct-file */
+ good = apr_pstrndup(r->pool, r->filename, filoc);
+ /* bad = mispelling */
+ bad = apr_pstrdup(r->pool, r->filename + filoc + 1);
+ /* postgood = mispelling/more */
+ postgood = apr_pstrcat(r->pool, bad, r->path_info, NULL);
+
+ urlen = strlen(r->uri);
+ pglen = strlen(postgood);
+
+ /* Check to see if the URL pieces add up */
+ if (strcmp(postgood, r->uri + (urlen - pglen))) {
+ return DECLINED;
+ }
+
+ /* url = /correct-url */
+ url = apr_pstrndup(r->pool, r->uri, (urlen - pglen));
+
+ /* Now open the directory and do ourselves a check... */
+ if (apr_dir_open(&dir, good, r->pool) != APR_SUCCESS) {
+ /* Oops, not a directory... */
+ return DECLINED;
+ }
+
+ candidates = apr_array_make(r->pool, 2, sizeof(misspelled_file));
+
+ dotloc = ap_ind(bad, '.');
+ if (dotloc == -1) {
+ dotloc = strlen(bad);
+ }
+
+ while (apr_dir_read(&dirent, APR_FINFO_DIRENT, dir) == APR_SUCCESS) {
+ sp_reason q;
+
+ /*
+ * If we end up with a "fixed" URL which is identical to the
+ * requested one, we must have found a broken symlink or some such.
+ * Do _not_ try to redirect this, it causes a loop!
+ */
+ if (strcmp(bad, dirent.name) == 0) {
+ apr_dir_close(dir);
+ return OK;
+ }
+
+ /*
+ * miscapitalization errors are checked first (like, e.g., lower case
+ * file, upper case request)
+ */
+ else if (strcasecmp(bad, dirent.name) == 0) {
+ misspelled_file *sp_new;
+
+ sp_new = (misspelled_file *) apr_array_push(candidates);
+ sp_new->name = apr_pstrdup(r->pool, dirent.name);
+ sp_new->quality = SP_MISCAPITALIZED;
+ }
+
+ /*
+ * simple typing errors are checked next (like, e.g.,
+ * missing/extra/transposed char)
+ */
+ else if ((q = spdist(bad, dirent.name)) != SP_VERYDIFFERENT) {
+ misspelled_file *sp_new;
+
+ sp_new = (misspelled_file *) apr_array_push(candidates);
+ sp_new->name = apr_pstrdup(r->pool, dirent.name);
+ sp_new->quality = q;
+ }
+
+ /*
+ * The spdist() should have found the majority of the misspelled
+ * requests. It is of questionable use to continue looking for
+ * files with the same base name, but potentially of totally wrong
+ * type (index.html <-> index.db).
+ * I would propose to not set the WANT_BASENAME_MATCH define.
+ * 08-Aug-1997 <Martin.Kraemer@Mch.SNI.De>
+ *
+ * However, Alexei replied giving some reasons to add it anyway:
+ * > Oh, by the way, I remembered why having the
+ * > extension-stripping-and-matching stuff is a good idea:
+ * >
+ * > If you're using MultiViews, and have a file named foobar.html,
+ * > which you refer to as "foobar", and someone tried to access
+ * > "Foobar", mod_speling won't find it, because it won't find
+ * > anything matching that spelling. With the extension-munging,
+ * > it would locate "foobar.html". Not perfect, but I ran into
+ * > that problem when I first wrote the module.
+ */
+ else {
+#ifdef WANT_BASENAME_MATCH
+ /*
+ * Okay... we didn't find anything. Now we take out the hard-core
+ * power tools. There are several cases here. Someone might have
+ * entered a wrong extension (.htm instead of .html or vice
+ * versa) or the document could be negotiated. At any rate, now
+ * we just compare stuff before the first dot. If it matches, we
+ * figure we got us a match. This can result in wrong things if
+ * there are files of different content types but the same prefix
+ * (e.g. foo.gif and foo.html) This code will pick the first one
+ * it finds. Better than a Not Found, though.
+ */
+ int entloc = ap_ind(dirent.name, '.');
+ if (entloc == -1) {
+ entloc = strlen(dirent.name);
+ }
+
+ if ((dotloc == entloc)
+ && !strncasecmp(bad, dirent.name, dotloc)) {
+ misspelled_file *sp_new;
+
+ sp_new = (misspelled_file *) apr_array_push(candidates);
+ sp_new->name = apr_pstrdup(r->pool, dirent.name);
+ sp_new->quality = SP_VERYDIFFERENT;
+ }
+#endif
+ }
+ }
+ apr_dir_close(dir);
+
+ if (candidates->nelts != 0) {
+ /* Wow... we found us a mispelling. Construct a fixed url */
+ char *nuri;
+ const char *ref;
+ misspelled_file *variant = (misspelled_file *) candidates->elts;
+ int i;
+
+ ref = apr_table_get(r->headers_in, "Referer");
+
+ qsort((void *) candidates->elts, candidates->nelts,
+ sizeof(misspelled_file), sort_by_quality);
+
+ /*
+ * Conditions for immediate redirection:
+ * a) the first candidate was not found by stripping the suffix
+ * AND b) there exists only one candidate OR the best match is not
+ * ambiguous
+ * then return a redirection right away.
+ */
+ if (variant[0].quality != SP_VERYDIFFERENT
+ && (candidates->nelts == 1
+ || variant[0].quality != variant[1].quality)) {
+
+ nuri = ap_escape_uri(r->pool, apr_pstrcat(r->pool, url,
+ variant[0].name,
+ r->path_info, NULL));
+ if (r->parsed_uri.query)
+ nuri = apr_pstrcat(r->pool, nuri, "?", r->parsed_uri.query, NULL);
+
+ apr_table_setn(r->headers_out, "Location",
+ ap_construct_url(r->pool, nuri, r));
+
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, APR_SUCCESS,
+ r,
+ ref ? "Fixed spelling: %s to %s from %s"
+ : "Fixed spelling: %s to %s",
+ r->uri, nuri, ref);
+
+ return HTTP_MOVED_PERMANENTLY;
+ }
+ /*
+ * Otherwise, a "[300] Multiple Choices" list with the variants is
+ * returned.
+ */
+ else {
+ apr_pool_t *p;
+ apr_table_t *notes;
+ apr_pool_t *sub_pool;
+ apr_array_header_t *t;
+ apr_array_header_t *v;
+
+
+ if (r->main == NULL) {
+ p = r->pool;
+ notes = r->notes;
+ }
+ else {
+ p = r->main->pool;
+ notes = r->main->notes;
+ }
+
+ if (apr_pool_create(&sub_pool, p) != APR_SUCCESS)
+ return DECLINED;
+
+ t = apr_array_make(sub_pool, candidates->nelts * 8 + 8,
+ sizeof(char *));
+ v = apr_array_make(sub_pool, candidates->nelts * 5,
+ sizeof(char *));
+
+ /* Generate the response text. */
+
+ *(const char **)apr_array_push(t) =
+ "The document name you requested (<code>";
+ *(const char **)apr_array_push(t) = ap_escape_html(sub_pool, r->uri);
+ *(const char **)apr_array_push(t) =
+ "</code>) could not be found on this server.\n"
+ "However, we found documents with names similar "
+ "to the one you requested.<p>"
+ "Available documents:\n<ul>\n";
+
+ for (i = 0; i < candidates->nelts; ++i) {
+ char *vuri;
+ const char *reason;
+
+ reason = sp_reason_str[(int) (variant[i].quality)];
+ /* The format isn't very neat... */
+ vuri = apr_pstrcat(sub_pool, url, variant[i].name, r->path_info,
+ (r->parsed_uri.query != NULL) ? "?" : "",
+ (r->parsed_uri.query != NULL)
+ ? r->parsed_uri.query : "",
+ NULL);
+ *(const char **)apr_array_push(v) = "\"";
+ *(const char **)apr_array_push(v) = ap_escape_uri(sub_pool, vuri);
+ *(const char **)apr_array_push(v) = "\";\"";
+ *(const char **)apr_array_push(v) = reason;
+ *(const char **)apr_array_push(v) = "\"";
+
+ *(const char **)apr_array_push(t) = "<li><a href=\"";
+ *(const char **)apr_array_push(t) = ap_escape_uri(sub_pool, vuri);
+ *(const char **)apr_array_push(t) = "\">";
+ *(const char **)apr_array_push(t) = ap_escape_html(sub_pool, vuri);
+ *(const char **)apr_array_push(t) = "</a> (";
+ *(const char **)apr_array_push(t) = reason;
+ *(const char **)apr_array_push(t) = ")\n";
+
+ /*
+ * when we have printed the "close matches" and there are
+ * more "distant matches" (matched by stripping the suffix),
+ * then we insert an additional separator text to suggest
+ * that the user LOOK CLOSELY whether these are really the
+ * files she wanted.
+ */
+ if (i > 0 && i < candidates->nelts - 1
+ && variant[i].quality != SP_VERYDIFFERENT
+ && variant[i + 1].quality == SP_VERYDIFFERENT) {
+ *(const char **)apr_array_push(t) =
+ "</ul>\nFurthermore, the following related "
+ "documents were found:\n<ul>\n";
+ }
+ }
+ *(const char **)apr_array_push(t) = "</ul>\n";
+
+ /* If we know there was a referring page, add a note: */
+ if (ref != NULL) {
+ *(const char **)apr_array_push(t) =
+ "Please consider informing the owner of the "
+ "<a href=\"";
+ *(const char **)apr_array_push(t) = ap_escape_uri(sub_pool, ref);
+ *(const char **)apr_array_push(t) = "\">referring page</a> "
+ "about the broken link.\n";
+ }
+
+
+ /* Pass our apr_table_t to http_protocol.c (see mod_negotiation): */
+ apr_table_setn(notes, "variant-list", apr_array_pstrcat(p, t, 0));
+
+ apr_table_mergen(r->subprocess_env, "VARIANTS",
+ apr_array_pstrcat(p, v, ','));
+
+ apr_pool_destroy(sub_pool);
+
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
+ ref ? "Spelling fix: %s: %d candidates from %s"
+ : "Spelling fix: %s: %d candidates",
+ r->uri, candidates->nelts, ref);
+
+ return HTTP_MULTIPLE_CHOICES;
+ }
+ }
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_fixups(check_speling,NULL,NULL,APR_HOOK_LAST);
+}
+
+module AP_MODULE_DECLARE_DATA speling_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_mconfig_for_directory, /* create per-dir config */
+ NULL, /* merge per-dir config */
+ create_mconfig_for_server, /* server config */
+ NULL, /* merge server config */
+ speling_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_speling.dsp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_speling.dsp
new file mode 100644
index 00000000..46626b0c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_speling.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_speling" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_speling - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_speling.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_speling.mak" CFG="mod_speling - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_speling - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_speling - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_speling - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_speling_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_speling.so" /base:@..\..\os\win32\BaseAddr.ref,mod_speling.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_speling.so" /base:@..\..\os\win32\BaseAddr.ref,mod_speling.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_speling - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_speling_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_speling.so" /base:@..\..\os\win32\BaseAddr.ref,mod_speling.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_speling.so" /base:@..\..\os\win32\BaseAddr.ref,mod_speling.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_speling - Win32 Release"
+# Name "mod_speling - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_speling.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_speling.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_speling - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_speling.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_speling.so "speling_module for Apache" ../../include/ap_release.h > .\mod_speling.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_speling - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_speling.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_speling.so "speling_module for Apache" ../../include/ap_release.h > .\mod_speling.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_speling.exp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_speling.exp
new file mode 100644
index 00000000..a6ee8b50
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_speling.exp
@@ -0,0 +1 @@
+speling_module
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.c b/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.c
new file mode 100644
index 00000000..b071b1a7
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.c
@@ -0,0 +1,366 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_userdir... implement the UserDir command. Broken away from the
+ * Alias stuff for a couple of good and not-so-good reasons:
+ *
+ * 1) It shows a real minimal working example of how to do something like
+ * this.
+ * 2) I know people who are actually interested in changing this *particular*
+ * aspect of server functionality without changing the rest of it. That's
+ * what this whole modular arrangement is supposed to be good at...
+ *
+ * Modified by Alexei Kosut to support the following constructs
+ * (server running at www.foo.com, request for /~bar/one/two.html)
+ *
+ * UserDir public_html -> ~bar/public_html/one/two.html
+ * UserDir /usr/web -> /usr/web/bar/one/two.html
+ * UserDir /home/ * /www -> /home/bar/www/one/two.html
+ * NOTE: theses ^ ^ space only added allow it to work in a comment, ignore
+ * UserDir http://x/users -> (302) http://x/users/bar/one/two.html
+ * UserDir http://x/ * /y -> (302) http://x/bar/y/one/two.html
+ * NOTE: here also ^ ^
+ *
+ * In addition, you can use multiple entries, to specify alternate
+ * user directories (a la Directory Index). For example:
+ *
+ * UserDir public_html /usr/web http://www.xyz.com/users
+ *
+ * Modified by Ken Coar to provide for the following:
+ *
+ * UserDir disable[d] username ...
+ * UserDir enable[d] username ...
+ *
+ * If "disabled" has no other arguments, *all* ~<username> references are
+ * disabled, except those explicitly turned on with the "enabled" keyword.
+ */
+
+#include "apr_strings.h"
+#include "apr_user.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+
+#if !defined(WIN32) && !defined(OS2) && !defined(BEOS) && !defined(NETWARE)
+#define HAVE_UNIX_SUEXEC
+#endif
+
+#ifdef HAVE_UNIX_SUEXEC
+#include "unixd.h" /* Contains the suexec_identity hook used on Unix */
+#endif
+
+
+/* The default directory in user's home dir */
+#ifndef DEFAULT_USER_DIR
+#define DEFAULT_USER_DIR "public_html"
+#endif
+
+module AP_MODULE_DECLARE_DATA userdir_module;
+
+typedef struct {
+ int globally_disabled;
+ char *userdir;
+ apr_table_t *enabled_users;
+ apr_table_t *disabled_users;
+} userdir_config;
+
+/*
+ * Server config for this module: global disablement flag, a list of usernames
+ * ineligible for UserDir access, a list of those immune to global (but not
+ * explicit) disablement, and the replacement string for all others.
+ */
+
+static void *create_userdir_config(apr_pool_t *p, server_rec *s)
+{
+ userdir_config *newcfg = apr_pcalloc(p, sizeof(*newcfg));
+
+ newcfg->globally_disabled = 0;
+ newcfg->userdir = DEFAULT_USER_DIR;
+ newcfg->enabled_users = apr_table_make(p, 4);
+ newcfg->disabled_users = apr_table_make(p, 4);
+
+ return newcfg;
+}
+
+#define O_DEFAULT 0
+#define O_ENABLE 1
+#define O_DISABLE 2
+
+static const char *set_user_dir(cmd_parms *cmd, void *dummy, const char *arg)
+{
+ userdir_config *s_cfg = ap_get_module_config(cmd->server->module_config,
+ &userdir_module);
+ char *username;
+ const char *usernames = arg;
+ char *kw = ap_getword_conf(cmd->pool, &usernames);
+ apr_table_t *usertable;
+
+ /* Since we are a raw argument, it is possible for us to be called with
+ * zero arguments. So that we aren't ambiguous, flat out reject this.
+ */
+ if (*kw == '\0') {
+ return "UserDir requires an argument.";
+ }
+
+ /*
+ * Let's do the comparisons once.
+ */
+ if ((!strcasecmp(kw, "disable")) || (!strcasecmp(kw, "disabled"))) {
+ /*
+ * If there are no usernames specified, this is a global disable - we
+ * need do no more at this point than record the fact.
+ */
+ if (strlen(usernames) == 0) {
+ s_cfg->globally_disabled = 1;
+ return NULL;
+ }
+ usertable = s_cfg->disabled_users;
+ }
+ else if ((!strcasecmp(kw, "enable")) || (!strcasecmp(kw, "enabled"))) {
+ /*
+ * The "disable" keyword can stand alone or take a list of names, but
+ * the "enable" keyword requires the list. Whinge if it doesn't have
+ * it.
+ */
+ if (strlen(usernames) == 0) {
+ return "UserDir \"enable\" keyword requires a list of usernames";
+ }
+ usertable = s_cfg->enabled_users;
+ }
+ else {
+ /*
+ * If the first (only?) value isn't one of our keywords, just copy
+ * the string to the userdir string.
+ */
+ s_cfg->userdir = apr_pstrdup(cmd->pool, arg);
+ return NULL;
+ }
+ /*
+ * Now we just take each word in turn from the command line and add it to
+ * the appropriate table.
+ */
+ while (*usernames) {
+ username = ap_getword_conf(cmd->pool, &usernames);
+ apr_table_setn(usertable, username, kw);
+ }
+ return NULL;
+}
+
+static const command_rec userdir_cmds[] = {
+ AP_INIT_RAW_ARGS("UserDir", set_user_dir, NULL, RSRC_CONF,
+ "the public subdirectory in users' home directories, or "
+ "'disabled', or 'disabled username username...', or "
+ "'enabled username username...'"),
+ {NULL}
+};
+
+static int translate_userdir(request_rec *r)
+{
+ ap_conf_vector_t *server_conf;
+ const userdir_config *s_cfg;
+ char *name = r->uri;
+ const char *userdirs;
+ const char *w, *dname;
+ char *redirect;
+ apr_finfo_t statbuf;
+
+ /*
+ * If the URI doesn't match our basic pattern, we've nothing to do with
+ * it.
+ */
+ if (name[0] != '/' || name[1] != '~') {
+ return DECLINED;
+ }
+ server_conf = r->server->module_config;
+ s_cfg = ap_get_module_config(server_conf, &userdir_module);
+ userdirs = s_cfg->userdir;
+ if (userdirs == NULL) {
+ return DECLINED;
+ }
+
+ dname = name + 2;
+ w = ap_getword(r->pool, &dname, '/');
+
+ /*
+ * The 'dname' funny business involves backing it up to capture the '/'
+ * delimiting the "/~user" part from the rest of the URL, in case there
+ * was one (the case where there wasn't being just "GET /~user HTTP/1.0",
+ * for which we don't want to tack on a '/' onto the filename).
+ */
+
+ if (dname[-1] == '/') {
+ --dname;
+ }
+
+ /*
+ * If there's no username, it's not for us. Ignore . and .. as well.
+ */
+ if (w[0] == '\0' || (w[1] == '.' && (w[2] == '\0' || (w[2] == '.' && w[3] == '\0')))) {
+ return DECLINED;
+ }
+ /*
+ * Nor if there's an username but it's in the disabled list.
+ */
+ if (apr_table_get(s_cfg->disabled_users, w) != NULL) {
+ return DECLINED;
+ }
+ /*
+ * If there's a global interdiction on UserDirs, check to see if this
+ * name is one of the Blessed.
+ */
+ if (s_cfg->globally_disabled
+ && apr_table_get(s_cfg->enabled_users, w) == NULL) {
+ return DECLINED;
+ }
+
+ /*
+ * Special cases all checked, onward to normal substitution processing.
+ */
+
+ while (*userdirs) {
+ const char *userdir = ap_getword_conf(r->pool, &userdirs);
+ char *filename = NULL, *x = NULL;
+ apr_status_t rv;
+ int is_absolute = ap_os_is_path_absolute(r->pool, userdir);
+
+ if (ap_strchr_c(userdir, '*'))
+ x = ap_getword(r->pool, &userdir, '*');
+
+ if (userdir[0] == '\0' || is_absolute) {
+ if (x) {
+#ifdef HAVE_DRIVE_LETTERS
+ /*
+ * Crummy hack. Need to figure out whether we have been
+ * redirected to a URL or to a file on some drive. Since I
+ * know of no protocols that are a single letter, ignore
+ * a : as the first or second character, and assume a file
+ * was specified
+ */
+ if (strchr(x + 2, ':'))
+#else
+ if (strchr(x, ':') && !is_absolute)
+#endif /* HAVE_DRIVE_LETTERS */
+ {
+ redirect = apr_pstrcat(r->pool, x, w, userdir, dname, NULL);
+ apr_table_setn(r->headers_out, "Location", redirect);
+ return HTTP_MOVED_TEMPORARILY;
+ }
+ else
+ filename = apr_pstrcat(r->pool, x, w, userdir, NULL);
+ }
+ else
+ filename = apr_pstrcat(r->pool, userdir, "/", w, NULL);
+ }
+ else if (x && ap_strchr_c(x, ':')) {
+ redirect = apr_pstrcat(r->pool, x, w, dname, NULL);
+ apr_table_setn(r->headers_out, "Location", redirect);
+ return HTTP_MOVED_TEMPORARILY;
+ }
+ else {
+#if APR_HAS_USER
+ char *homedir;
+
+ if (apr_get_home_directory(&homedir, w, r->pool) == APR_SUCCESS) {
+ filename = apr_pstrcat(r->pool, homedir, "/", userdir, NULL);
+ }
+#else
+ return DECLINED;
+#endif
+ }
+
+ /*
+ * Now see if it exists, or we're at the last entry. If we are at the
+ * last entry, then use the filename generated (if there is one)
+ * anyway, in the hope that some handler might handle it. This can be
+ * used, for example, to run a CGI script for the user.
+ */
+ if (filename && (!*userdirs
+ || ((rv = apr_stat(&statbuf, filename, APR_FINFO_MIN,
+ r->pool)) == APR_SUCCESS
+ || rv == APR_INCOMPLETE))) {
+ r->filename = apr_pstrcat(r->pool, filename, dname, NULL);
+ /* XXX: Does this walk us around FollowSymLink rules?
+ * When statbuf contains info on r->filename we can save a syscall
+ * by copying it to r->finfo
+ */
+ if (*userdirs && dname[0] == 0)
+ r->finfo = statbuf;
+
+ /* For use in the get_suexec_identity phase */
+ apr_table_setn(r->notes, "mod_userdir_user", w);
+
+ return OK;
+ }
+ }
+
+ return DECLINED;
+}
+
+#ifdef HAVE_UNIX_SUEXEC
+static ap_unix_identity_t *get_suexec_id_doer(const request_rec *r)
+{
+ ap_unix_identity_t *ugid = NULL;
+#if APR_HAS_USER
+ const char *username = apr_table_get(r->notes, "mod_userdir_user");
+
+ if (username == NULL) {
+ return NULL;
+ }
+
+ if ((ugid = apr_palloc(r->pool, sizeof(*ugid))) == NULL) {
+ return NULL;
+ }
+
+ if (apr_get_userid(&ugid->uid, &ugid->gid, username, r->pool) != APR_SUCCESS) {
+ return NULL;
+ }
+
+ ugid->userdir = 1;
+#endif
+ return ugid;
+}
+#endif /* HAVE_UNIX_SUEXEC */
+
+static void register_hooks(apr_pool_t *p)
+{
+ static const char * const aszPre[]={ "mod_alias.c",NULL };
+ static const char * const aszSucc[]={ "mod_vhost_alias.c",NULL };
+
+ ap_hook_translate_name(translate_userdir,aszPre,aszSucc,APR_HOOK_MIDDLE);
+#ifdef HAVE_UNIX_SUEXEC
+ ap_hook_get_suexec_identity(get_suexec_id_doer,NULL,NULL,APR_HOOK_FIRST);
+#endif
+}
+
+module AP_MODULE_DECLARE_DATA userdir_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ create_userdir_config, /* server config */
+ NULL, /* merge server config */
+ userdir_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.dsp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.dsp
new file mode 100644
index 00000000..04c078b4
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_userdir" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_userdir - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_userdir.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_userdir.mak" CFG="mod_userdir - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_userdir - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_userdir - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_userdir - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_userdir_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_userdir.so" /base:@..\..\os\win32\BaseAddr.ref,mod_userdir.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_userdir.so" /base:@..\..\os\win32\BaseAddr.ref,mod_userdir.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_userdir - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_userdir_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_userdir.so" /base:@..\..\os\win32\BaseAddr.ref,mod_userdir.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_userdir.so" /base:@..\..\os\win32\BaseAddr.ref,mod_userdir.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_userdir - Win32 Release"
+# Name "mod_userdir - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_userdir.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_userdir.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_userdir - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_userdir.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_userdir.so "userdir_module for Apache" ../../include/ap_release.h > .\mod_userdir.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_userdir - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_userdir.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_userdir.so "userdir_module for Apache" ../../include/ap_release.h > .\mod_userdir.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.exp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.exp
new file mode 100644
index 00000000..6b8b81d5
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.exp
@@ -0,0 +1 @@
+userdir_module
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.la b/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.la
new file mode 100644
index 00000000..2f7eeb0b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.la
@@ -0,0 +1,35 @@
+# mod_userdir.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_userdir.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_userdir.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.lo b/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.lo
new file mode 100644
index 00000000..445d354f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.lo
@@ -0,0 +1,12 @@
+# mod_userdir.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_userdir.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_userdir.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.o b/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.o
new file mode 100644
index 00000000..677c39d1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_userdir.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_vhost_alias.c b/rubbos/app/httpd-2.0.64/modules/mappers/mod_vhost_alias.c
new file mode 100644
index 00000000..cb798863
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_vhost_alias.c
@@ -0,0 +1,457 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_vhost_alias.c: support for dynamically configured mass virtual hosting
+ *
+ * Copyright (c) 1998-1999 Demon Internet Ltd.
+ *
+ * This software was submitted by Demon Internet to the Apache Software Foundation
+ * in May 1999. Future revisions and derivatives of this source code
+ * must acknowledge Demon Internet as the original contributor of
+ * this module. All other licensing and usage conditions are those
+ * of the Apache Software Foundation.
+ *
+ * Originally written by Tony Finch <fanf@demon.net> <dot@dotat.at>.
+ *
+ * Implementation ideas were taken from mod_alias.c. The overall
+ * concept is derived from the OVERRIDE_DOC_ROOT/OVERRIDE_CGIDIR
+ * patch to Apache 1.3b3 and a similar feature in Demon's thttpd,
+ * both written by James Grinter <jrg@blodwen.demon.co.uk>.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_hooks.h"
+#include "apr_lib.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_request.h" /* for ap_hook_translate_name */
+
+
+module AP_MODULE_DECLARE_DATA vhost_alias_module;
+
+
+/*
+ * basic configuration things
+ * we abbreviate "mod_vhost_alias" to "mva" for shorter names
+ */
+
+typedef enum {
+ VHOST_ALIAS_UNSET, VHOST_ALIAS_NONE, VHOST_ALIAS_NAME, VHOST_ALIAS_IP
+} mva_mode_e;
+
+/*
+ * Per-server module config record.
+ */
+typedef struct mva_sconf_t {
+ const char *doc_root;
+ const char *cgi_root;
+ mva_mode_e doc_root_mode;
+ mva_mode_e cgi_root_mode;
+} mva_sconf_t;
+
+static void *mva_create_server_config(apr_pool_t *p, server_rec *s)
+{
+ mva_sconf_t *conf;
+
+ conf = (mva_sconf_t *) apr_pcalloc(p, sizeof(mva_sconf_t));
+ conf->doc_root = NULL;
+ conf->cgi_root = NULL;
+ conf->doc_root_mode = VHOST_ALIAS_UNSET;
+ conf->cgi_root_mode = VHOST_ALIAS_UNSET;
+ return conf;
+}
+
+static void *mva_merge_server_config(apr_pool_t *p, void *parentv, void *childv)
+{
+ mva_sconf_t *parent = (mva_sconf_t *) parentv;
+ mva_sconf_t *child = (mva_sconf_t *) childv;
+ mva_sconf_t *conf;
+
+ conf = (mva_sconf_t *) apr_pcalloc(p, sizeof(*conf));
+ if (child->doc_root_mode == VHOST_ALIAS_UNSET) {
+ conf->doc_root_mode = parent->doc_root_mode;
+ conf->doc_root = parent->doc_root;
+ }
+ else {
+ conf->doc_root_mode = child->doc_root_mode;
+ conf->doc_root = child->doc_root;
+ }
+ if (child->cgi_root_mode == VHOST_ALIAS_UNSET) {
+ conf->cgi_root_mode = parent->cgi_root_mode;
+ conf->cgi_root = parent->cgi_root;
+ }
+ else {
+ conf->cgi_root_mode = child->cgi_root_mode;
+ conf->cgi_root = child->cgi_root;
+ }
+ return conf;
+}
+
+
+/*
+ * These are just here to tell us what vhost_alias_set should do.
+ * We don't put anything into them; we just use the cell addresses.
+ */
+static int vhost_alias_set_doc_root_ip,
+ vhost_alias_set_cgi_root_ip,
+ vhost_alias_set_doc_root_name,
+ vhost_alias_set_cgi_root_name;
+
+static const char *vhost_alias_set(cmd_parms *cmd, void *dummy, const char *map)
+{
+ mva_sconf_t *conf;
+ mva_mode_e mode, *pmode;
+ const char **pmap;
+ const char *p;
+
+ conf = (mva_sconf_t *) ap_get_module_config(cmd->server->module_config,
+ &vhost_alias_module);
+ /* there ought to be a better way of doing this */
+ if (&vhost_alias_set_doc_root_ip == cmd->info) {
+ mode = VHOST_ALIAS_IP;
+ pmap = &conf->doc_root;
+ pmode = &conf->doc_root_mode;
+ }
+ else if (&vhost_alias_set_cgi_root_ip == cmd->info) {
+ mode = VHOST_ALIAS_IP;
+ pmap = &conf->cgi_root;
+ pmode = &conf->cgi_root_mode;
+ }
+ else if (&vhost_alias_set_doc_root_name == cmd->info) {
+ mode = VHOST_ALIAS_NAME;
+ pmap = &conf->doc_root;
+ pmode = &conf->doc_root_mode;
+ }
+ else if (&vhost_alias_set_cgi_root_name == cmd->info) {
+ mode = VHOST_ALIAS_NAME;
+ pmap = &conf->cgi_root;
+ pmode = &conf->cgi_root_mode;
+ }
+ else {
+ return "INTERNAL ERROR: unknown command info";
+ }
+
+ if (!ap_os_is_path_absolute(cmd->pool, map)) {
+ if (strcasecmp(map, "none")) {
+ return "format string must be an absolute path, or 'none'";
+ }
+ *pmap = NULL;
+ *pmode = VHOST_ALIAS_NONE;
+ return NULL;
+ }
+
+ /* sanity check */
+ p = map;
+ while (*p != '\0') {
+ if (*p++ != '%') {
+ continue;
+ }
+ /* we just found a '%' */
+ if (*p == 'p' || *p == '%') {
+ ++p;
+ continue;
+ }
+ /* optional dash */
+ if (*p == '-') {
+ ++p;
+ }
+ /* digit N */
+ if (apr_isdigit(*p)) {
+ ++p;
+ }
+ else {
+ return "syntax error in format string";
+ }
+ /* optional plus */
+ if (*p == '+') {
+ ++p;
+ }
+ /* do we end here? */
+ if (*p != '.') {
+ continue;
+ }
+ ++p;
+ /* optional dash */
+ if (*p == '-') {
+ ++p;
+ }
+ /* digit M */
+ if (apr_isdigit(*p)) {
+ ++p;
+ }
+ else {
+ return "syntax error in format string";
+ }
+ /* optional plus */
+ if (*p == '+') {
+ ++p;
+ }
+ }
+ *pmap = map;
+ *pmode = mode;
+ return NULL;
+}
+
+static const command_rec mva_commands[] =
+{
+ AP_INIT_TAKE1("VirtualScriptAlias", vhost_alias_set,
+ &vhost_alias_set_cgi_root_name, RSRC_CONF,
+ "how to create a ScriptAlias based on the host"),
+ AP_INIT_TAKE1("VirtualDocumentRoot", vhost_alias_set,
+ &vhost_alias_set_doc_root_name, RSRC_CONF,
+ "how to create the DocumentRoot based on the host"),
+ AP_INIT_TAKE1("VirtualScriptAliasIP", vhost_alias_set,
+ &vhost_alias_set_cgi_root_ip, RSRC_CONF,
+ "how to create a ScriptAlias based on the host"),
+ AP_INIT_TAKE1("VirtualDocumentRootIP", vhost_alias_set,
+ &vhost_alias_set_doc_root_ip, RSRC_CONF,
+ "how to create the DocumentRoot based on the host"),
+ { NULL }
+};
+
+
+/*
+ * This really wants to be a nested function
+ * but C is too feeble to support them.
+ */
+static APR_INLINE void vhost_alias_checkspace(request_rec *r, char *buf,
+ char **pdest, int size)
+{
+ /* XXX: what if size > HUGE_STRING_LEN? */
+ if (*pdest + size > buf + HUGE_STRING_LEN) {
+ **pdest = '\0';
+ if (r->filename) {
+ r->filename = apr_pstrcat(r->pool, r->filename, buf, NULL);
+ }
+ else {
+ r->filename = apr_pstrdup(r->pool, buf);
+ }
+ *pdest = buf;
+ }
+}
+
+static void vhost_alias_interpolate(request_rec *r, const char *name,
+ const char *map, const char *uri)
+{
+ /* 0..9 9..0 */
+ enum { MAXDOTS = 19 };
+ const char *dots[MAXDOTS+1];
+ int ndots;
+
+ char buf[HUGE_STRING_LEN];
+ char *dest, last;
+
+ int N, M, Np, Mp, Nd, Md;
+ const char *start, *end;
+
+ const char *p;
+
+ ndots = 0;
+ dots[ndots++] = name-1; /* slightly naughty */
+ for (p = name; *p; ++p){
+ if (*p == '.' && ndots < MAXDOTS) {
+ dots[ndots++] = p;
+ }
+ }
+ dots[ndots] = p;
+
+ r->filename = NULL;
+
+ dest = buf;
+ last = '\0';
+ while (*map) {
+ if (*map != '%') {
+ /* normal characters */
+ vhost_alias_checkspace(r, buf, &dest, 1);
+ last = *dest++ = *map++;
+ continue;
+ }
+ /* we are in a format specifier */
+ ++map;
+ /* can't be a slash */
+ last = '\0';
+ /* %% -> % */
+ if (*map == '%') {
+ ++map;
+ vhost_alias_checkspace(r, buf, &dest, 1);
+ *dest++ = '%';
+ continue;
+ }
+ /* port number */
+ if (*map == 'p') {
+ ++map;
+ /* no. of decimal digits in a short plus one */
+ vhost_alias_checkspace(r, buf, &dest, 7);
+ dest += apr_snprintf(dest, 7, "%d", ap_get_server_port(r));
+ continue;
+ }
+ /* deal with %-N+.-M+ -- syntax is already checked */
+ N = M = 0; /* value */
+ Np = Mp = 0; /* is there a plus? */
+ Nd = Md = 0; /* is there a dash? */
+ if (*map == '-') ++map, Nd = 1;
+ N = *map++ - '0';
+ if (*map == '+') ++map, Np = 1;
+ if (*map == '.') {
+ ++map;
+ if (*map == '-') {
+ ++map, Md = 1;
+ }
+ M = *map++ - '0';
+ if (*map == '+') {
+ ++map, Mp = 1;
+ }
+ }
+ /* note that N and M are one-based indices, not zero-based */
+ start = dots[0]+1; /* ptr to the first character */
+ end = dots[ndots]; /* ptr to the character after the last one */
+ if (N != 0) {
+ if (N > ndots) {
+ start = "_";
+ end = start+1;
+ }
+ else if (!Nd) {
+ start = dots[N-1]+1;
+ if (!Np) {
+ end = dots[N];
+ }
+ }
+ else {
+ if (!Np) {
+ start = dots[ndots-N]+1;
+ }
+ end = dots[ndots-N+1];
+ }
+ }
+ if (M != 0) {
+ if (M > end - start) {
+ start = "_";
+ end = start+1;
+ }
+ else if (!Md) {
+ start = start+M-1;
+ if (!Mp) {
+ end = start+1;
+ }
+ }
+ else {
+ if (!Mp) {
+ start = end-M;
+ }
+ end = end-M+1;
+ }
+ }
+ vhost_alias_checkspace(r, buf, &dest, end - start);
+ for (p = start; p < end; ++p) {
+ *dest++ = apr_tolower(*p);
+ }
+ }
+ *dest = '\0';
+ /* no double slashes */
+ if (last == '/') {
+ ++uri;
+ }
+
+ if (r->filename) {
+ r->filename = apr_pstrcat(r->pool, r->filename, buf, uri, NULL);
+ }
+ else {
+ r->filename = apr_pstrcat(r->pool, buf, uri, NULL);
+ }
+}
+
+static int mva_translate(request_rec *r)
+{
+ mva_sconf_t *conf;
+ const char *name, *map, *uri;
+ mva_mode_e mode;
+ const char *cgi;
+
+ conf = (mva_sconf_t *) ap_get_module_config(r->server->module_config,
+ &vhost_alias_module);
+ cgi = NULL;
+ if (conf->cgi_root) {
+ cgi = strstr(r->uri, "cgi-bin/");
+ if (cgi && (cgi != r->uri + strspn(r->uri, "/"))) {
+ cgi = NULL;
+ }
+ }
+ if (cgi) {
+ mode = conf->cgi_root_mode;
+ map = conf->cgi_root;
+ uri = cgi + strlen("cgi-bin");
+ }
+ else if (r->uri[0] == '/') {
+ mode = conf->doc_root_mode;
+ map = conf->doc_root;
+ uri = r->uri;
+ }
+ else {
+ return DECLINED;
+ }
+
+ if (mode == VHOST_ALIAS_NAME) {
+ name = ap_get_server_name(r);
+ }
+ else if (mode == VHOST_ALIAS_IP) {
+ name = r->connection->local_ip;
+ }
+ else {
+ return DECLINED;
+ }
+
+ /* ### There is an optimization available here to determine the
+ * absolute portion of the path from the server config phase,
+ * through the first % segment, and note that portion of the path
+ * canonical_path buffer.
+ */
+ r->canonical_filename = "";
+ vhost_alias_interpolate(r, name, map, uri);
+
+ if (cgi) {
+ /* see is_scriptaliased() in mod_cgi */
+ r->handler = "cgi-script";
+ apr_table_setn(r->notes, "alias-forced-type", r->handler);
+ }
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ static const char * const aszPre[]={ "mod_alias.c","mod_userdir.c",NULL };
+
+ ap_hook_translate_name(mva_translate, aszPre, NULL, APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA vhost_alias_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ mva_create_server_config, /* server config */
+ mva_merge_server_config, /* merge server configs */
+ mva_commands, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
+
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_vhost_alias.dsp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_vhost_alias.dsp
new file mode 100644
index 00000000..3b322e23
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_vhost_alias.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_vhost_alias" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_vhost_alias - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_vhost_alias.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_vhost_alias.mak" CFG="mod_vhost_alias - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_vhost_alias - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_vhost_alias - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_vhost_alias - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_vhost_alias_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_vhost_alias.so" /base:@..\..\os\win32\BaseAddr.ref,mod_vhost_alias.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_vhost_alias.so" /base:@..\..\os\win32\BaseAddr.ref,mod_vhost_alias.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_vhost_alias - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_vhost_alias_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_vhost_alias.so" /base:@..\..\os\win32\BaseAddr.ref,mod_vhost_alias.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_vhost_alias.so" /base:@..\..\os\win32\BaseAddr.ref,mod_vhost_alias.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_vhost_alias - Win32 Release"
+# Name "mod_vhost_alias - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_vhost_alias.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_vhost_alias.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_vhost_alias - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_vhost_alias.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_vhost_alias.so "vhost_alias_module for Apache" ../../include/ap_release.h > .\mod_vhost_alias.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_vhost_alias - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_vhost_alias.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_vhost_alias.so "vhost_alias_module for Apache" ../../include/ap_release.h > .\mod_vhost_alias.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/mod_vhost_alias.exp b/rubbos/app/httpd-2.0.64/modules/mappers/mod_vhost_alias.exp
new file mode 100644
index 00000000..b17666fc
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/mod_vhost_alias.exp
@@ -0,0 +1 @@
+vhost_alias_module
diff --git a/rubbos/app/httpd-2.0.64/modules/mappers/modules.mk b/rubbos/app/httpd-2.0.64/modules/mappers/modules.mk
new file mode 100644
index 00000000..8fb09eec
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/mappers/modules.mk
@@ -0,0 +1,17 @@
+mod_negotiation.la: mod_negotiation.lo
+ $(MOD_LINK) mod_negotiation.lo $(MOD_NEGOTIATION_LDADD)
+mod_dir.la: mod_dir.lo
+ $(MOD_LINK) mod_dir.lo $(MOD_DIR_LDADD)
+mod_imap.la: mod_imap.lo
+ $(MOD_LINK) mod_imap.lo $(MOD_IMAP_LDADD)
+mod_actions.la: mod_actions.lo
+ $(MOD_LINK) mod_actions.lo $(MOD_ACTIONS_LDADD)
+mod_userdir.la: mod_userdir.lo
+ $(MOD_LINK) mod_userdir.lo $(MOD_USERDIR_LDADD)
+mod_alias.la: mod_alias.lo
+ $(MOD_LINK) mod_alias.lo $(MOD_ALIAS_LDADD)
+mod_so.la: mod_so.lo
+ $(MOD_LINK) mod_so.lo $(MOD_SO_LDADD)
+DISTCLEAN_TARGETS = modules.mk
+static = mod_negotiation.la mod_dir.la mod_imap.la mod_actions.la mod_userdir.la mod_alias.la mod_so.la
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/.deps b/rubbos/app/httpd-2.0.64/modules/metadata/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/.indent.pro b/rubbos/app/httpd-2.0.64/modules/metadata/.indent.pro
new file mode 100644
index 00000000..a9fbe9f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_env.a b/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_env.a
new file mode 100644
index 00000000..7b9b9591
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_env.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_env.la b/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_env.la
new file mode 100644
index 00000000..80d73dfa
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_env.la
@@ -0,0 +1,35 @@
+# mod_env.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_env.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_env.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_env.o b/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_env.o
new file mode 100644
index 00000000..b98d9d62
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_env.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_setenvif.a b/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_setenvif.a
new file mode 100644
index 00000000..6e6c84b6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_setenvif.a
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_setenvif.la b/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_setenvif.la
new file mode 100644
index 00000000..6562d1dc
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_setenvif.la
@@ -0,0 +1,35 @@
+# mod_setenvif.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_setenvif.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_setenvif.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_setenvif.o b/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_setenvif.o
new file mode 100644
index 00000000..0f0fee97
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/.libs/mod_setenvif.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/Makefile b/rubbos/app/httpd-2.0.64/modules/metadata/Makefile
new file mode 100644
index 00000000..d90900c2
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/metadata
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/metadata
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/metadata
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/Makefile.in b/rubbos/app/httpd-2.0.64/modules/metadata/Makefile.in
new file mode 100644
index 00000000..167b343d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/Makefile.in
@@ -0,0 +1,3 @@
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUcernmeta b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUcernmeta
new file mode 100644
index 00000000..9a642677
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUcernmeta
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = cernmeta
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) CERN Meta Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = CERN Meta Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/cernmeta.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_cern_meta.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ cern_meta_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUexpires b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUexpires
new file mode 100644
index 00000000..eacc10eb
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUexpires
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = expires
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Expires Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Expires Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/expires.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_expires.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ expires_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUheaders b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUheaders
new file mode 100644
index 00000000..def6d703
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUheaders
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = headers
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Headers Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Headers Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/headers.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_headers.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ headers_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUmakefile
new file mode 100644
index 00000000..fe1ecf21
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUmakefile
@@ -0,0 +1,252 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME =
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION =
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME =
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE =
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/cernmeta.nlm \
+ $(OBJDIR)/expires.nlm \
+ $(OBJDIR)/headers.nlm \
+ $(OBJDIR)/mimemagi.nlm \
+ $(OBJDIR)/uniqueid.nlm \
+ $(OBJDIR)/usertrk.nlm \
+ $(OBJDIR)/modversion.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUmimemagi b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUmimemagi
new file mode 100644
index 00000000..c2aa84da
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUmimemagi
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = mimemagi
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Mime Magic Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = CERN Meta Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/mimemagi.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_mime_magic.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ mime_magic_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUmodversion b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUmodversion
new file mode 100644
index 00000000..fdece687
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUmodversion
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = modversion
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Version Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Version Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/$(NLM_NAME).nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_version.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ version_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUuniqueid b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUuniqueid
new file mode 100644
index 00000000..02917f5b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUuniqueid
@@ -0,0 +1,254 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = uniqueid
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Unique ID Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Unique ID Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/uniqueid.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_unique_id.o \
+ $(OBJDIR)/libprews.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ @ws2nlm.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ unique_id_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+$(OBJDIR)/%.o: ../arch/netware/%.c $(OBJDIR)\$(NLM_NAME)_cc.opt
+ @echo compiling $<
+ $(CC) $< -o=$(OBJDIR)\$(@F) @$(OBJDIR)\$(NLM_NAME)_cc.opt
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUusertrk b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUusertrk
new file mode 100644
index 00000000..49ee8f84
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/NWGNUusertrk
@@ -0,0 +1,248 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = usertrk
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) User Track Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = User Track Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/usertrk.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_usertrack.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ usertrack_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/config.m4 b/rubbos/app/httpd-2.0.64/modules/metadata/config.m4
new file mode 100644
index 00000000..3fc1b2ce
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/config.m4
@@ -0,0 +1,24 @@
+dnl modules enabled in this directory by default
+
+dnl APACHE_MODULE(name, helptext[, objects[, structname[, default[, config]]]])
+
+APACHE_MODPATH_INIT(metadata)
+
+APACHE_MODULE(env, clearing/setting of ENV vars, , , yes)
+APACHE_MODULE(mime_magic, automagically determining MIME type)
+APACHE_MODULE(cern_meta, CERN-type meta files)
+APACHE_MODULE(expires, Expires header control, , , most)
+APACHE_MODULE(headers, HTTP header control, , , most)
+
+APACHE_MODULE(usertrack, user-session tracking, , , , [
+ AC_CHECK_HEADERS(sys/times.h)
+ AC_CHECK_FUNCS(times)
+])
+
+APACHE_MODULE(unique_id, per-request unique ids)
+APACHE_MODULE(setenvif, basing ENV vars on headers, , , yes)
+APACHE_MODULE(version, determining httpd version in config files)
+
+APR_ADDTO(LT_LDFLAGS,-export-dynamic)
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_cern_meta.c b/rubbos/app/httpd-2.0.64/modules/metadata/mod_cern_meta.c
new file mode 100644
index 00000000..18bf1c0b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_cern_meta.c
@@ -0,0 +1,372 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_cern_meta.c
+ * version 0.1.0
+ * status beta
+ *
+ * Andrew Wilson <Andrew.Wilson@cm.cf.ac.uk> 25.Jan.96
+ *
+ * *** IMPORTANT ***
+ * This version of mod_cern_meta.c controls Meta File behaviour on a
+ * per-directory basis. Previous versions of the module defined behaviour
+ * on a per-server basis. The upshot is that you'll need to revisit your
+ * configuration files in order to make use of the new module.
+ * ***
+ *
+ * Emulate the CERN HTTPD Meta file semantics. Meta files are HTTP
+ * headers that can be output in addition to the normal range of
+ * headers for each file accessed. They appear rather like the Apache
+ * .asis files, and are able to provide a crude way of influencing
+ * the Expires: header, as well as providing other curiosities.
+ * There are many ways to manage meta information, this one was
+ * chosen because there is already a large number of CERN users
+ * who can exploit this module. It should be noted that there are probably
+ * more sensitive ways of managing the Expires: header specifically.
+ *
+ * The module obeys the following directives, which can appear
+ * in the server's .conf files and in .htaccess files.
+ *
+ * MetaFiles <on|off>
+ *
+ * turns on|off meta file processing for any directory.
+ * Default value is off
+ *
+ * # turn on MetaFiles in this directory
+ * MetaFiles on
+ *
+ * MetaDir <directory name>
+ *
+ * specifies the name of the directory in which Apache can find
+ * meta information files. The directory is usually a 'hidden'
+ * subdirectory of the directory that contains the file being
+ * accessed. eg:
+ *
+ * # .meta files are in the *same* directory as the
+ * # file being accessed
+ * MetaDir .
+ *
+ * the default is to look in a '.web' subdirectory. This is the
+ * same as for CERN 3.+ webservers and behaviour is the same as
+ * for the directive:
+ *
+ * MetaDir .web
+ *
+ * MetaSuffix <meta file suffix>
+ *
+ * specifies the file name suffix for the file containing the
+ * meta information. eg:
+ *
+ * # our meta files are suffixed with '.cern_meta'
+ * MetaSuffix .cern_meta
+ *
+ * the default is to look for files with the suffix '.meta'. This
+ * behaviour is the same as for the directive:
+ *
+ * MetaSuffix .meta
+ *
+ * When accessing the file
+ *
+ * DOCUMENT_ROOT/somedir/index.html
+ *
+ * this module will look for the file
+ *
+ * DOCUMENT_ROOT/somedir/.web/index.html.meta
+ *
+ * and will use its contents to generate additional MIME header
+ * information.
+ *
+ * For more information on the CERN Meta file semantics see:
+ *
+ * http://www.w3.org/hypertext/WWW/Daemon/User/Config/General.html#MetaDir
+ *
+ * Change-log:
+ * 29.Jan.96 pfopen/pfclose instead of fopen/fclose
+ * DECLINE when real file not found, we may be checking each
+ * of the index.html/index.shtml/index.htm variants and don't
+ * need to report missing ones as spurious errors.
+ * 31.Jan.96 log_error reports about a malformed .meta file, rather
+ * than a script error.
+ * 20.Jun.96 MetaFiles <on|off> default off, added, so that module
+ * can be configured per-directory. Prior to this the module
+ * was running for each request anywhere on the server, naughty..
+ * 29.Jun.96 All directives made per-directory.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#if APR_HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "util_script.h"
+#include "http_log.h"
+#include "http_request.h"
+#include "http_protocol.h"
+#include "apr_lib.h"
+
+#define DIR_CMD_PERMS OR_INDEXES
+
+#define DEFAULT_METADIR ".web"
+#define DEFAULT_METASUFFIX ".meta"
+#define DEFAULT_METAFILES 0
+
+module AP_MODULE_DECLARE_DATA cern_meta_module;
+
+typedef struct {
+ const char *metadir;
+ const char *metasuffix;
+ int metafiles;
+} cern_meta_dir_config;
+
+static void *create_cern_meta_dir_config(apr_pool_t *p, char *dummy)
+{
+ cern_meta_dir_config *new =
+ (cern_meta_dir_config *) apr_palloc(p, sizeof(cern_meta_dir_config));
+
+ new->metadir = NULL;
+ new->metasuffix = NULL;
+ new->metafiles = DEFAULT_METAFILES;
+
+ return new;
+}
+
+static void *merge_cern_meta_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ cern_meta_dir_config *base = (cern_meta_dir_config *) basev;
+ cern_meta_dir_config *add = (cern_meta_dir_config *) addv;
+ cern_meta_dir_config *new =
+ (cern_meta_dir_config *) apr_palloc(p, sizeof(cern_meta_dir_config));
+
+ new->metadir = add->metadir ? add->metadir : base->metadir;
+ new->metasuffix = add->metasuffix ? add->metasuffix : base->metasuffix;
+ new->metafiles = add->metafiles;
+
+ return new;
+}
+
+static const char *set_metadir(cmd_parms *parms, void *in_dconf, const char *arg)
+{
+ cern_meta_dir_config *dconf = in_dconf;
+
+ dconf->metadir = arg;
+ return NULL;
+}
+
+static const char *set_metasuffix(cmd_parms *parms, void *in_dconf, const char *arg)
+{
+ cern_meta_dir_config *dconf = in_dconf;
+
+ dconf->metasuffix = arg;
+ return NULL;
+}
+
+static const char *set_metafiles(cmd_parms *parms, void *in_dconf, int arg)
+{
+ cern_meta_dir_config *dconf = in_dconf;
+
+ dconf->metafiles = arg;
+ return NULL;
+}
+
+
+static const command_rec cern_meta_cmds[] =
+{
+ AP_INIT_FLAG("MetaFiles", set_metafiles, NULL, DIR_CMD_PERMS,
+ "Limited to 'on' or 'off'"),
+ AP_INIT_TAKE1("MetaDir", set_metadir, NULL, DIR_CMD_PERMS,
+ "the name of the directory containing meta files"),
+ AP_INIT_TAKE1("MetaSuffix", set_metasuffix, NULL, DIR_CMD_PERMS,
+ "the filename suffix for meta files"),
+ {NULL}
+};
+
+/* XXX: this is very similar to ap_scan_script_header_err_core...
+ * are the differences deliberate, or just a result of bit rot?
+ */
+static int scan_meta_file(request_rec *r, apr_file_t *f)
+{
+ char w[MAX_STRING_LEN];
+ char *l;
+ int p;
+ apr_table_t *tmp_headers;
+
+ tmp_headers = apr_table_make(r->pool, 5);
+ while (apr_file_gets(w, MAX_STRING_LEN - 1, f) == APR_SUCCESS) {
+
+ /* Delete terminal (CR?)LF */
+
+ p = strlen(w);
+ if (p > 0 && w[p - 1] == '\n') {
+ if (p > 1 && w[p - 2] == '\015')
+ w[p - 2] = '\0';
+ else
+ w[p - 1] = '\0';
+ }
+
+ if (w[0] == '\0') {
+ return OK;
+ }
+
+ /* if we see a bogus header don't ignore it. Shout and scream */
+
+ if (!(l = strchr(w, ':'))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "malformed header in meta file: %s", r->filename);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ *l++ = '\0';
+ while (*l && apr_isspace(*l))
+ ++l;
+
+ if (!strcasecmp(w, "Content-type")) {
+ char *tmp;
+ /* Nuke trailing whitespace */
+
+ char *endp = l + strlen(l) - 1;
+ while (endp > l && apr_isspace(*endp))
+ *endp-- = '\0';
+
+ tmp = apr_pstrdup(r->pool, l);
+ ap_content_type_tolower(tmp);
+ ap_set_content_type(r, tmp);
+ }
+ else if (!strcasecmp(w, "Status")) {
+ sscanf(l, "%d", &r->status);
+ r->status_line = apr_pstrdup(r->pool, l);
+ }
+ else {
+ apr_table_set(tmp_headers, w, l);
+ }
+ }
+ apr_table_overlap(r->headers_out, tmp_headers, APR_OVERLAP_TABLES_SET);
+ return OK;
+}
+
+static int add_cern_meta_data(request_rec *r)
+{
+ char *metafilename;
+ char *leading_slash;
+ char *last_slash;
+ char *real_file;
+ char *scrap_book;
+ apr_file_t *f = NULL;
+ apr_status_t retcode;
+ cern_meta_dir_config *dconf;
+ int rv;
+ request_rec *rr;
+
+ dconf = ap_get_module_config(r->per_dir_config, &cern_meta_module);
+
+ if (!dconf->metafiles) {
+ return DECLINED;
+ };
+
+ /* if ./.web/$1.meta exists then output 'asis' */
+
+ if (r->finfo.filetype == 0) {
+ return DECLINED;
+ };
+
+ /* is this a directory? */
+ if (r->finfo.filetype == APR_DIR || r->uri[strlen(r->uri) - 1] == '/') {
+ return DECLINED;
+ };
+
+ /* what directory is this file in? */
+ scrap_book = apr_pstrdup(r->pool, r->filename);
+
+ leading_slash = strchr(scrap_book, '/');
+ last_slash = strrchr(scrap_book, '/');
+ if ((last_slash != NULL) && (last_slash != leading_slash)) {
+ /* skip over last slash */
+ real_file = last_slash;
+ real_file++;
+ *last_slash = '\0';
+ }
+ else {
+ /* no last slash, buh?! */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "internal error in mod_cern_meta: %s", r->filename);
+ /* should really barf, but hey, let's be friends... */
+ return DECLINED;
+ };
+
+ metafilename = apr_pstrcat(r->pool, scrap_book, "/",
+ dconf->metadir ? dconf->metadir : DEFAULT_METADIR,
+ "/", real_file,
+ dconf->metasuffix ? dconf->metasuffix : DEFAULT_METASUFFIX,
+ NULL);
+
+ /* It sucks to require this subrequest to complete, because this
+ * means people must leave their meta files accessible to the world.
+ * A better solution might be a "safe open" feature of pfopen to avoid
+ * pipes, symlinks, and crap like that.
+ *
+ * In fact, this doesn't suck. Because <Location > blocks are never run
+ * against sub_req_lookup_file, the meta can be somewhat protected by
+ * either masking it with a <Location > directive or alias, or stowing
+ * the file outside of the web document tree, while providing the
+ * appropriate directory blocks to allow access to it as a file.
+ */
+ rr = ap_sub_req_lookup_file(metafilename, r, NULL);
+ if (rr->status != HTTP_OK) {
+ ap_destroy_sub_req(rr);
+ return DECLINED;
+ }
+ ap_destroy_sub_req(rr);
+
+ retcode = apr_file_open(&f, metafilename, APR_READ, APR_OS_DEFAULT, r->pool);
+ if (retcode != APR_SUCCESS) {
+ if (APR_STATUS_IS_ENOENT(retcode)) {
+ return DECLINED;
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "meta file permissions deny server access: %s", metafilename);
+ return HTTP_FORBIDDEN;
+ };
+
+ /* read the headers in */
+ rv = scan_meta_file(r, f);
+ apr_file_close(f);
+
+ return rv;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_fixups(add_cern_meta_data,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA cern_meta_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_cern_meta_dir_config,/* dir config creater */
+ merge_cern_meta_dir_configs,/* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server configs */
+ cern_meta_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_cern_meta.dsp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_cern_meta.dsp
new file mode 100644
index 00000000..c50e9085
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_cern_meta.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_cern_meta" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_cern_meta - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_cern_meta.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_cern_meta.mak" CFG="mod_cern_meta - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_cern_meta - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_cern_meta - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_cern_meta - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_cern_meta_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_cern_meta.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cern_meta.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_cern_meta.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cern_meta.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_cern_meta - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_cern_meta_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_cern_meta.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cern_meta.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_cern_meta.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cern_meta.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_cern_meta - Win32 Release"
+# Name "mod_cern_meta - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_cern_meta.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_cern_meta.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_cern_meta - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_cern_meta.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_cern_meta.so "cern_meta_module for Apache" ../../include/ap_release.h > .\mod_cern_meta.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_cern_meta - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_cern_meta.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_cern_meta.so "cern_meta_module for Apache" ../../include/ap_release.h > .\mod_cern_meta.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_cern_meta.exp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_cern_meta.exp
new file mode 100644
index 00000000..d36e2be6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_cern_meta.exp
@@ -0,0 +1 @@
+cern_meta_module
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.c b/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.c
new file mode 100644
index 00000000..9ce1ccbc
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.c
@@ -0,0 +1,179 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_log.h"
+
+typedef struct {
+ apr_table_t *vars;
+ apr_table_t *unsetenv;
+} env_dir_config_rec;
+
+module AP_MODULE_DECLARE_DATA env_module;
+
+static void *create_env_dir_config(apr_pool_t *p, char *dummy)
+{
+ env_dir_config_rec *conf = apr_palloc(p, sizeof(*conf));
+
+ conf->vars = apr_table_make(p, 10);
+ conf->unsetenv = apr_table_make(p, 10);
+
+ return conf;
+}
+
+static void *merge_env_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ env_dir_config_rec *base = basev;
+ env_dir_config_rec *add = addv;
+ env_dir_config_rec *res = apr_palloc(p, sizeof(*res));
+
+ const apr_table_entry_t *elts;
+ const apr_array_header_t *arr;
+
+ int i;
+
+ /*
+ * res->vars = copy_table( p, base->vars );
+ * foreach $unsetenv ( @add->unsetenv )
+ * table_unset( res->vars, $unsetenv );
+ * foreach $element ( @add->vars )
+ * table_set( res->vars, $element.key, $element.val );
+ *
+ * add->unsetenv already removed the vars from add->vars,
+ * if they preceeded the UnsetEnv directive.
+ */
+ res->vars = apr_table_copy(p, base->vars);
+ res->unsetenv = NULL;
+
+ arr = apr_table_elts(add->unsetenv);
+ if (arr) {
+ elts = (const apr_table_entry_t *)arr->elts;
+
+ for (i = 0; i < arr->nelts; ++i) {
+ apr_table_unset(res->vars, elts[i].key);
+ }
+ }
+
+ arr = apr_table_elts(add->vars);
+ if (arr) {
+ elts = (const apr_table_entry_t *)arr->elts;
+
+ for (i = 0; i < arr->nelts; ++i) {
+ apr_table_setn(res->vars, elts[i].key, elts[i].val);
+ }
+ }
+
+ return res;
+}
+
+static const char *add_env_module_vars_passed(cmd_parms *cmd, void *sconf_,
+ const char *arg)
+{
+ env_dir_config_rec *sconf = sconf_;
+ apr_table_t *vars = sconf->vars;
+ const char *env_var;
+
+ env_var = getenv(arg);
+ if (env_var != NULL) {
+ apr_table_setn(vars, arg, apr_pstrdup(cmd->pool, env_var));
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server,
+ "PassEnv variable %s was undefined", arg);
+ }
+
+ return NULL;
+}
+
+static const char *add_env_module_vars_set(cmd_parms *cmd, void *sconf_,
+ const char *name, const char *value)
+{
+ env_dir_config_rec *sconf = sconf_;
+
+ /* name is mandatory, value is optional. no value means
+ * set the variable to an empty string
+ */
+ apr_table_setn(sconf->vars, name, value ? value : "");
+
+ return NULL;
+}
+
+static const char *add_env_module_vars_unset(cmd_parms *cmd, void *sconf_,
+ const char *arg)
+{
+ env_dir_config_rec *sconf = sconf_;
+
+ /* Always UnsetEnv FOO in the same context as {Set,Pass}Env FOO
+ * only if this UnsetEnv follows the {Set,Pass}Env. The merge
+ * will only apply unsetenv to the parent env (main server).
+ */
+ apr_table_set(sconf->unsetenv, arg, NULL);
+ apr_table_unset(sconf->vars, arg);
+
+ return NULL;
+}
+
+static const command_rec env_module_cmds[] =
+{
+AP_INIT_ITERATE("PassEnv", add_env_module_vars_passed, NULL,
+ OR_FILEINFO, "a list of environment variables to pass to CGI."),
+AP_INIT_TAKE12("SetEnv", add_env_module_vars_set, NULL,
+ OR_FILEINFO, "an environment variable name and optional value to pass to CGI."),
+AP_INIT_ITERATE("UnsetEnv", add_env_module_vars_unset, NULL,
+ OR_FILEINFO, "a list of variables to remove from the CGI environment."),
+ {NULL},
+};
+
+static int fixup_env_module(request_rec *r)
+{
+ apr_table_t *e = r->subprocess_env;
+ env_dir_config_rec *sconf = ap_get_module_config(r->per_dir_config,
+ &env_module);
+ apr_table_t *vars = sconf->vars;
+
+ if (!apr_table_elts(sconf->vars)->nelts)
+ return DECLINED;
+
+ r->subprocess_env = apr_table_overlay(r->pool, e, vars);
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_fixups(fixup_env_module, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA env_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_env_dir_config, /* dir config creater */
+ merge_env_dir_configs, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server configs */
+ env_module_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.dsp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.dsp
new file mode 100644
index 00000000..9a841cb6
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_env" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_env - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_env.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_env.mak" CFG="mod_env - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_env - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_env - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_env - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_env_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_env.so" /base:@..\..\os\win32\BaseAddr.ref,mod_env.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_env.so" /base:@..\..\os\win32\BaseAddr.ref,mod_env.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_env - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_env_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_env.so" /base:@..\..\os\win32\BaseAddr.ref,mod_env.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_env.so" /base:@..\..\os\win32\BaseAddr.ref,mod_env.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_env - Win32 Release"
+# Name "mod_env - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_env.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_env.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_env - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_env.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_env.so "env_module for Apache" ../../include/ap_release.h > .\mod_env.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_env - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_env.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_env.so "env_module for Apache" ../../include/ap_release.h > .\mod_env.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.exp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.exp
new file mode 100644
index 00000000..b487bf09
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.exp
@@ -0,0 +1 @@
+env_module
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.la b/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.la
new file mode 100644
index 00000000..80d73dfa
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.la
@@ -0,0 +1,35 @@
+# mod_env.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_env.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_env.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.lo b/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.lo
new file mode 100644
index 00000000..134c2b05
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.lo
@@ -0,0 +1,12 @@
+# mod_env.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_env.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_env.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.o b/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.o
new file mode 100644
index 00000000..b98d9d62
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_env.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_expires.c b/rubbos/app/httpd-2.0.64/modules/metadata/mod_expires.c
new file mode 100644
index 00000000..7c2b78a5
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_expires.c
@@ -0,0 +1,566 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_expires.c
+ * version 0.0.11
+ * status beta
+ *
+ * Andrew Wilson <Andrew.Wilson@cm.cf.ac.uk> 26.Jan.96
+ *
+ * This module allows you to control the form of the Expires: header
+ * that Apache issues for each access. Directives can appear in
+ * configuration files or in .htaccess files so expiry semantics can
+ * be defined on a per-directory basis.
+ *
+ * DIRECTIVE SYNTAX
+ *
+ * Valid directives are:
+ *
+ * ExpiresActive on | off
+ * ExpiresDefault <code><seconds>
+ * ExpiresByType type/encoding <code><seconds>
+ *
+ * Valid values for <code> are:
+ *
+ * 'M' expires header shows file modification date + <seconds>
+ * 'A' expires header shows access time + <seconds>
+ *
+ * [I'm not sure which of these is best under different
+ * circumstances, I guess it's for other people to explore.
+ * The effects may be indistinguishable for a number of cases]
+ *
+ * <seconds> should be an integer value [acceptable to atoi()]
+ *
+ * There is NO space between the <code> and <seconds>.
+ *
+ * For example, a directory which contains information which changes
+ * frequently might contain:
+ *
+ * # reports generated by cron every hour. don't let caches
+ * # hold onto stale information
+ * ExpiresDefault M3600
+ *
+ * Another example, our html pages can change all the time, the gifs
+ * tend not to change often:
+ *
+ * # pages are hot (1 week), images are cold (1 month)
+ * ExpiresByType text/html A604800
+ * ExpiresByType image/gif A2592000
+ *
+ * Expires can be turned on for all URLs on the server by placing the
+ * following directive in a conf file:
+ *
+ * ExpiresActive on
+ *
+ * ExpiresActive can also appear in .htaccess files, enabling the
+ * behaviour to be turned on or off for each chosen directory.
+ *
+ * # turn off Expires behaviour in this directory
+ * # and subdirectories
+ * ExpiresActive off
+ *
+ * Directives defined for a directory are valid in subdirectories
+ * unless explicitly overridden by new directives in the subdirectory
+ * .htaccess files.
+ *
+ * ALTERNATIVE DIRECTIVE SYNTAX
+ *
+ * Directives can also be defined in a more readable syntax of the form:
+ *
+ * ExpiresDefault "<base> [plus] {<num> <type>}*"
+ * ExpiresByType type/encoding "<base> [plus] {<num> <type>}*"
+ *
+ * where <base> is one of:
+ * access
+ * now equivalent to 'access'
+ * modification
+ *
+ * where the 'plus' keyword is optional
+ *
+ * where <num> should be an integer value [acceptable to atoi()]
+ *
+ * where <type> is one of:
+ * years
+ * months
+ * weeks
+ * days
+ * hours
+ * minutes
+ * seconds
+ *
+ * For example, any of the following directives can be used to make
+ * documents expire 1 month after being accessed, by default:
+ *
+ * ExpiresDefault "access plus 1 month"
+ * ExpiresDefault "access plus 4 weeks"
+ * ExpiresDefault "access plus 30 days"
+ *
+ * The expiry time can be fine-tuned by adding several '<num> <type>'
+ * clauses:
+ *
+ * ExpiresByType text/html "access plus 1 month 15 days 2 hours"
+ * ExpiresByType image/gif "modification plus 5 hours 3 minutes"
+ *
+ * ---
+ *
+ * Change-log:
+ * 29.Jan.96 Hardened the add_* functions. Server will now bail out
+ * if bad directives are given in the conf files.
+ * 02.Feb.96 Returns DECLINED if not 'ExpiresActive on', giving other
+ * expires-aware modules a chance to play with the same
+ * directives. [Michael Rutman]
+ * 03.Feb.96 Call tzset() before localtime(). Trying to get the module
+ * to work properly in non GMT timezones.
+ * 12.Feb.96 Modified directive syntax to allow more readable commands:
+ * ExpiresDefault "now plus 10 days 20 seconds"
+ * ExpiresDefault "access plus 30 days"
+ * ExpiresDefault "modification plus 1 year 10 months 30 days"
+ * 13.Feb.96 Fix call to table_get() with NULL 2nd parameter [Rob Hartill]
+ * 19.Feb.96 Call gm_timestr_822() to get time formatted correctly, can't
+ * rely on presence of HTTP_TIME_FORMAT in Apache 1.1+.
+ * 21.Feb.96 This version (0.0.9) reverses assumptions made in 0.0.8
+ * about star/star handlers. Reverting to 0.0.7 behaviour.
+ * 08.Jun.96 allows ExpiresDefault to be used with responses that use
+ * the DefaultType by not DECLINING, but instead skipping
+ * the table_get check and then looking for an ExpiresDefault.
+ * [Rob Hartill]
+ * 04.Nov.96 'const' definitions added.
+ *
+ * TODO
+ * add support for Cache-Control: max-age=20 from the HTTP/1.1
+ * proposal (in this case, a ttl of 20 seconds) [ask roy]
+ * add per-file expiry and explicit expiry times - duplicates some
+ * of the mod_cern_meta.c functionality. eg:
+ * ExpiresExplicit index.html "modification plus 30 days"
+ *
+ * BUGS
+ * Hi, welcome to the internet.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "http_request.h"
+#include "http_protocol.h"
+
+typedef struct {
+ int active;
+ int wildcards;
+ char *expiresdefault;
+ apr_table_t *expiresbytype;
+} expires_dir_config;
+
+/* from mod_dir, why is this alias used?
+ */
+#define DIR_CMD_PERMS OR_INDEXES
+
+#define ACTIVE_ON 1
+#define ACTIVE_OFF 0
+#define ACTIVE_DONTCARE 2
+
+module AP_MODULE_DECLARE_DATA expires_module;
+
+static void *create_dir_expires_config(apr_pool_t *p, char *dummy)
+{
+ expires_dir_config *new =
+ (expires_dir_config *) apr_pcalloc(p, sizeof(expires_dir_config));
+ new->active = ACTIVE_DONTCARE;
+ new->wildcards = 0;
+ new->expiresdefault = NULL;
+ new->expiresbytype = apr_table_make(p, 4);
+ return (void *) new;
+}
+
+static const char *set_expiresactive(cmd_parms *cmd, void *in_dir_config, int arg)
+{
+ expires_dir_config *dir_config = in_dir_config;
+
+ /* if we're here at all it's because someone explicitly
+ * set the active flag
+ */
+ dir_config->active = ACTIVE_ON;
+ if (arg == 0) {
+ dir_config->active = ACTIVE_OFF;
+ }
+ return NULL;
+}
+
+/* check_code() parse 'code' and return NULL or an error response
+ * string. If we return NULL then real_code contains code converted
+ * to the cnnnn format.
+ */
+static char *check_code(apr_pool_t *p, const char *code, char **real_code)
+{
+ char *word;
+ char base = 'X';
+ int modifier = 0;
+ int num = 0;
+ int factor = 0;
+
+ /* 0.0.4 compatibility?
+ */
+ if ((code[0] == 'A') || (code[0] == 'M')) {
+ *real_code = (char *)code;
+ return NULL;
+ }
+
+ /* <base> [plus] {<num> <type>}*
+ */
+
+ /* <base>
+ */
+ word = ap_getword_conf(p, &code);
+ if (!strncasecmp(word, "now", 1) ||
+ !strncasecmp(word, "access", 1)) {
+ base = 'A';
+ }
+ else if (!strncasecmp(word, "modification", 1)) {
+ base = 'M';
+ }
+ else {
+ return apr_pstrcat(p, "bad expires code, unrecognised <base> '",
+ word, "'", NULL);
+ }
+
+ /* [plus]
+ */
+ word = ap_getword_conf(p, &code);
+ if (!strncasecmp(word, "plus", 1)) {
+ word = ap_getword_conf(p, &code);
+ }
+
+ /* {<num> <type>}*
+ */
+ while (word[0]) {
+ /* <num>
+ */
+ if (apr_isdigit(word[0])) {
+ num = atoi(word);
+ }
+ else {
+ return apr_pstrcat(p, "bad expires code, numeric value expected <num> '",
+ word, "'", NULL);
+ }
+
+ /* <type>
+ */
+ word = ap_getword_conf(p, &code);
+ if (word[0]) {
+ /* do nothing */
+ }
+ else {
+ return apr_pstrcat(p, "bad expires code, missing <type>", NULL);
+ }
+
+ factor = 0;
+ if (!strncasecmp(word, "years", 1)) {
+ factor = 60 * 60 * 24 * 365;
+ }
+ else if (!strncasecmp(word, "months", 2)) {
+ factor = 60 * 60 * 24 * 30;
+ }
+ else if (!strncasecmp(word, "weeks", 1)) {
+ factor = 60 * 60 * 24 * 7;
+ }
+ else if (!strncasecmp(word, "days", 1)) {
+ factor = 60 * 60 * 24;
+ }
+ else if (!strncasecmp(word, "hours", 1)) {
+ factor = 60 * 60;
+ }
+ else if (!strncasecmp(word, "minutes", 2)) {
+ factor = 60;
+ }
+ else if (!strncasecmp(word, "seconds", 1)) {
+ factor = 1;
+ }
+ else {
+ return apr_pstrcat(p, "bad expires code, unrecognised <type>",
+ "'", word, "'", NULL);
+ }
+
+ modifier = modifier + factor * num;
+
+ /* next <num>
+ */
+ word = ap_getword_conf(p, &code);
+ }
+
+ *real_code = apr_psprintf(p, "%c%d", base, modifier);
+
+ return NULL;
+}
+
+static const char *set_expiresbytype(cmd_parms *cmd, void *in_dir_config,
+ const char *mime, const char *code)
+{
+ expires_dir_config *dir_config = in_dir_config;
+ char *response, *real_code;
+ const char *check;
+
+ check = ap_strrchr_c(mime, '/');
+ if ((strlen(++check) == 1) && (*check == '*')) {
+ dir_config->wildcards = 1;
+ }
+
+ if ((response = check_code(cmd->pool, code, &real_code)) == NULL) {
+ apr_table_setn(dir_config->expiresbytype, mime, real_code);
+ return NULL;
+ }
+ return apr_pstrcat(cmd->pool,
+ "'ExpiresByType ", mime, " ", code, "': ", response, NULL);
+}
+
+static const char *set_expiresdefault(cmd_parms *cmd, void *in_dir_config,
+ const char *code)
+{
+ expires_dir_config * dir_config = in_dir_config;
+ char *response, *real_code;
+
+ if ((response = check_code(cmd->pool, code, &real_code)) == NULL) {
+ dir_config->expiresdefault = real_code;
+ return NULL;
+ }
+ return apr_pstrcat(cmd->pool,
+ "'ExpiresDefault ", code, "': ", response, NULL);
+}
+
+static const command_rec expires_cmds[] =
+{
+ AP_INIT_FLAG("ExpiresActive", set_expiresactive, NULL, DIR_CMD_PERMS,
+ "Limited to 'on' or 'off'"),
+ AP_INIT_TAKE2("ExpiresByType", set_expiresbytype, NULL, DIR_CMD_PERMS,
+ "a MIME type followed by an expiry date code"),
+ AP_INIT_TAKE1("ExpiresDefault", set_expiresdefault, NULL, DIR_CMD_PERMS,
+ "an expiry date code"),
+ {NULL}
+};
+
+static void *merge_expires_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ expires_dir_config *new = (expires_dir_config *) apr_pcalloc(p, sizeof(expires_dir_config));
+ expires_dir_config *base = (expires_dir_config *) basev;
+ expires_dir_config *add = (expires_dir_config *) addv;
+
+ if (add->active == ACTIVE_DONTCARE) {
+ new->active = base->active;
+ }
+ else {
+ new->active = add->active;
+ }
+
+ if (add->expiresdefault != NULL) {
+ new->expiresdefault = add->expiresdefault;
+ }
+ else {
+ new->expiresdefault = base->expiresdefault;
+ }
+ new->wildcards = add->wildcards;
+ new->expiresbytype = apr_table_overlay(p, add->expiresbytype,
+ base->expiresbytype);
+ return new;
+}
+
+/*
+ * Handle the setting of the expiration response header fields according
+ * to our criteria.
+ */
+
+static int set_expiration_fields(request_rec *r, const char *code,
+ apr_table_t *t)
+{
+ apr_time_t base;
+ apr_time_t additional;
+ apr_time_t expires;
+ int additional_sec;
+ char *timestr;
+
+ switch (code[0]) {
+ case 'M':
+ if (r->finfo.filetype == 0) {
+ /* file doesn't exist on disk, so we can't do anything based on
+ * modification time. Note that this does _not_ log an error.
+ */
+ return DECLINED;
+ }
+ base = r->finfo.mtime;
+ additional_sec = atoi(&code[1]);
+ additional = apr_time_from_sec(additional_sec);
+ break;
+ case 'A':
+ /* there's been some discussion and it's possible that
+ * 'access time' will be stored in request structure
+ */
+ base = r->request_time;
+ additional_sec = atoi(&code[1]);
+ additional = apr_time_from_sec(additional_sec);
+ break;
+ default:
+ /* expecting the add_* routines to be case-hardened this
+ * is just a reminder that module is beta
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "internal error: bad expires code: %s", r->filename);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ expires = base + additional;
+ if (expires < r->request_time) {
+ expires = r->request_time;
+ }
+ apr_table_mergen(t, "Cache-Control",
+ apr_psprintf(r->pool, "max-age=%" APR_TIME_T_FMT,
+ apr_time_sec(expires - r->request_time)));
+ timestr = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ apr_rfc822_date(timestr, expires);
+ apr_table_setn(t, "Expires", timestr);
+ return OK;
+}
+
+/*
+ * Output filter to set the Expires response header field
+ * according to the content-type of the response -- if it hasn't
+ * already been set.
+ */
+static apr_status_t expires_filter(ap_filter_t *f,
+ apr_bucket_brigade *b)
+{
+ request_rec *r;
+ expires_dir_config *conf;
+ const char *expiry;
+ apr_table_t *t;
+
+ r = f->r;
+ conf = (expires_dir_config *) ap_get_module_config(r->per_dir_config,
+ &expires_module);
+
+ /*
+ * Check to see which output header table we should use;
+ * mod_cgi loads script fields into r->err_headers_out,
+ * for instance.
+ */
+ expiry = apr_table_get(r->err_headers_out, "Expires");
+ if (expiry != NULL) {
+ t = r->err_headers_out;
+ }
+ else {
+ expiry = apr_table_get(r->headers_out, "Expires");
+ t = r->headers_out;
+ }
+ if (expiry == NULL) {
+ /*
+ * No expiration has been set, so we can apply any managed by
+ * this module. First, check to see if there is an applicable
+ * ExpiresByType directive.
+ */
+ expiry = apr_table_get(conf->expiresbytype,
+ ap_field_noparam(r->pool, r->content_type));
+ if (expiry == NULL) {
+ int usedefault = 1;
+ /*
+ * See if we have a wildcard entry for the major type.
+ */
+ if (conf->wildcards) {
+ char *checkmime;
+ char *spos;
+ checkmime = apr_pstrdup(r->pool, r->content_type);
+ spos = checkmime ? ap_strchr(checkmime, '/') : NULL;
+ if (spos != NULL) {
+ /*
+ * Without a '/' character, nothing we have will match.
+ * However, we have one.
+ */
+ if (strlen(++spos) > 0) {
+ *spos++ = '*';
+ *spos = '\0';
+ }
+ else {
+ checkmime = apr_pstrcat(r->pool, checkmime, "*", NULL);
+ }
+ expiry = apr_table_get(conf->expiresbytype, checkmime);
+ usedefault = (expiry == NULL);
+ }
+ }
+ if (usedefault) {
+ /*
+ * Use the ExpiresDefault directive
+ */
+ expiry = conf->expiresdefault;
+ }
+ }
+ if (expiry != NULL) {
+ set_expiration_fields(r, expiry, t);
+ }
+ }
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, b);
+}
+
+static void expires_insert_filter(request_rec *r)
+{
+ expires_dir_config *conf;
+
+ /* Don't add Expires headers to errors */
+ if (ap_is_HTTP_ERROR(r->status)) {
+ return;
+ }
+ /* Say no to subrequests */
+ if (r->main != NULL) {
+ return;
+ }
+ conf = (expires_dir_config *) ap_get_module_config(r->per_dir_config,
+ &expires_module);
+
+ /* Check to see if the filter is enabled and if there are any applicable
+ * config directives for this directory scope
+ */
+ if (conf->active != ACTIVE_ON ||
+ (apr_is_empty_table(conf->expiresbytype) && !conf->expiresdefault)) {
+ return;
+ }
+ ap_add_output_filter("MOD_EXPIRES", NULL, r, r->connection);
+ return;
+}
+static void register_hooks(apr_pool_t *p)
+{
+ /* mod_expires needs to run *before* the cache save filter which is
+ * AP_FTYPE_CONTENT_SET-1. Otherwise, our expires won't be honored.
+ */
+ ap_register_output_filter("MOD_EXPIRES", expires_filter, NULL,
+ AP_FTYPE_CONTENT_SET-2);
+ ap_hook_insert_error_filter(expires_insert_filter, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_insert_filter(expires_insert_filter, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA expires_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_dir_expires_config, /* dir config creater */
+ merge_expires_dir_configs, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server configs */
+ expires_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_expires.dsp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_expires.dsp
new file mode 100644
index 00000000..6cb9f91f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_expires.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_expires" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_expires - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_expires.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_expires.mak" CFG="mod_expires - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_expires - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_expires - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_expires - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_expires_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_expires.so" /base:@..\..\os\win32\BaseAddr.ref,mod_expires.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_expires.so" /base:@..\..\os\win32\BaseAddr.ref,mod_expires.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_expires - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_expires_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_expires.so" /base:@..\..\os\win32\BaseAddr.ref,mod_expires.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_expires.so" /base:@..\..\os\win32\BaseAddr.ref,mod_expires.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_expires - Win32 Release"
+# Name "mod_expires - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_expires.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_expires.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_expires - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_expires.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_expires.so "expires_module for Apache" ../../include/ap_release.h > .\mod_expires.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_expires - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_expires.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_expires.so "expires_module for Apache" ../../include/ap_release.h > .\mod_expires.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_expires.exp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_expires.exp
new file mode 100644
index 00000000..863a9687
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_expires.exp
@@ -0,0 +1 @@
+expires_module
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_headers.c b/rubbos/app/httpd-2.0.64/modules/metadata/mod_headers.c
new file mode 100644
index 00000000..ce4460d4
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_headers.c
@@ -0,0 +1,620 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_headers.c: Add/append/remove HTTP response headers
+ * Written by Paul Sutton, paul@ukweb.com, 1 Oct 1996
+ *
+ * The Header directive can be used to add/replace/remove HTTP headers
+ * within the response message. The RequestHeader directive can be used
+ * to add/replace/remove HTTP headers before a request message is processed.
+ * Valid in both per-server and per-dir configurations.
+ *
+ * Syntax is:
+ *
+ * Header action header value
+ * RequestHeader action header value
+ *
+ * Where action is one of:
+ * set - set this header, replacing any old value
+ * add - add this header, possible resulting in two or more
+ * headers with the same name
+ * append - append this text onto any existing header of this same
+ * unset - remove this header
+ *
+ * Where action is unset, the third argument (value) should not be given.
+ * The header name can include the colon, or not.
+ *
+ * The Header and RequestHeader directives can only be used where allowed
+ * by the FileInfo override.
+ *
+ * When the request is processed, the header directives are processed in
+ * this order: firstly, the main server, then the virtual server handling
+ * this request (if any), then any <Directory> sections (working downwards
+ * from the root dir), then an <Location> sections (working down from
+ * shortest URL component), the any <File> sections. This order is
+ * important if any 'set' or 'unset' actions are used. For example,
+ * the following two directives have different effect if applied in
+ * the reverse order:
+ *
+ * Header append Author "John P. Doe"
+ * Header unset Author
+ *
+ * Examples:
+ *
+ * To set the "Author" header, use
+ * Header add Author "John P. Doe"
+ *
+ * To remove a header:
+ * Header unset Author
+ *
+ */
+
+#include "apr.h"
+#include "apr_lib.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+
+#include "apr_hash.h"
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_log.h"
+#include "util_filter.h"
+#include "http_protocol.h" /* ap_hook_insert_error_filter */
+
+/* format_tag_hash is initialized during pre-config */
+static apr_hash_t *format_tag_hash;
+
+typedef enum {
+ hdr_add = 'a', /* add header (could mean multiple hdrs) */
+ hdr_set = 's', /* set (replace old value) */
+ hdr_append = 'm', /* append (merge into any old value) */
+ hdr_unset = 'u', /* unset header */
+ hdr_echo = 'e' /* echo headers from request to response */
+} hdr_actions;
+
+/*
+ * magic cmd->info values
+ */
+static char hdr_in = '0'; /* RequestHeader */
+static char hdr_out = '1'; /* Header onsuccess */
+static char hdr_err = '2'; /* Header always */
+
+/*
+ * There is an array of struct format_tag per Header/RequestHeader
+ * config directive
+ */
+typedef struct {
+ const char* (*func)(request_rec *r,char *arg);
+ char *arg;
+} format_tag;
+
+/*
+ * There is one "header_entry" per Header/RequestHeader config directive
+ */
+typedef struct {
+ hdr_actions action;
+ char *header;
+ apr_array_header_t *ta; /* Array of format_tag structs */
+ regex_t *regex;
+ const char *condition_var;
+} header_entry;
+
+/* echo_do is used for Header echo to iterate through the request headers*/
+typedef struct {
+ request_rec *r;
+ header_entry *hdr;
+} echo_do;
+
+/*
+ * headers_conf is our per-module configuration. This is used as both
+ * a per-dir and per-server config
+ */
+typedef struct {
+ apr_array_header_t *fixup_in;
+ apr_array_header_t *fixup_out;
+ apr_array_header_t *fixup_err;
+} headers_conf;
+
+module AP_MODULE_DECLARE_DATA headers_module;
+
+/*
+ * Tag formatting functions
+ */
+static const char *constant_item(request_rec *r, char *stuff)
+{
+ return stuff;
+}
+static const char *header_request_duration(request_rec *r, char *a)
+{
+ return apr_psprintf(r->pool, "D=%" APR_TIME_T_FMT,
+ (apr_time_now() - r->request_time));
+}
+static const char *header_request_time(request_rec *r, char *a)
+{
+ return apr_psprintf(r->pool, "t=%" APR_TIME_T_FMT, r->request_time);
+}
+static const char *header_request_env_var(request_rec *r, char *a)
+{
+ const char *s = apr_table_get(r->subprocess_env,a);
+
+ if (s)
+ return s;
+ else
+ return "(null)";
+}
+/*
+ * Config routines
+ */
+static void *create_headers_config(apr_pool_t *p, char *dummy)
+{
+ headers_conf *conf = apr_palloc(p, sizeof(*conf));
+
+ conf->fixup_in = apr_array_make(p, 2, sizeof(header_entry));
+ conf->fixup_out = apr_array_make(p, 2, sizeof(header_entry));
+ conf->fixup_err = apr_array_make(p, 2, sizeof(header_entry));
+
+ return conf;
+}
+
+static void *merge_headers_config(apr_pool_t *p, void *basev, void *overridesv)
+{
+ headers_conf *newconf = apr_palloc(p, sizeof(*newconf));
+ headers_conf *base = basev;
+ headers_conf *overrides = overridesv;
+
+ newconf->fixup_in = apr_array_append(p, base->fixup_in, overrides->fixup_in);
+ newconf->fixup_out = apr_array_append(p, base->fixup_out, overrides->fixup_out);
+ newconf->fixup_err = apr_array_append(p, base->fixup_err, overrides->fixup_err);
+
+ return newconf;
+}
+
+static char *parse_misc_string(apr_pool_t *p, format_tag *tag, const char **sa)
+{
+ const char *s;
+ char *d;
+
+ tag->func = constant_item;
+
+ s = *sa;
+ while (*s && *s != '%') {
+ s++;
+ }
+ /*
+ * This might allocate a few chars extra if there's a backslash
+ * escape in the format string.
+ */
+ tag->arg = apr_palloc(p, s - *sa + 1);
+
+ d = tag->arg;
+ s = *sa;
+ while (*s && *s != '%') {
+ if (*s != '\\') {
+ *d++ = *s++;
+ }
+ else {
+ s++;
+ switch (*s) {
+ case '\\':
+ *d++ = '\\';
+ s++;
+ break;
+ case 'r':
+ *d++ = '\r';
+ s++;
+ break;
+ case 'n':
+ *d++ = '\n';
+ s++;
+ break;
+ case 't':
+ *d++ = '\t';
+ s++;
+ break;
+ default:
+ /* copy verbatim */
+ *d++ = '\\';
+ /*
+ * Allow the loop to deal with this *s in the normal
+ * fashion so that it handles end of string etc.
+ * properly.
+ */
+ break;
+ }
+ }
+ }
+ *d = '\0';
+
+ *sa = s;
+ return NULL;
+}
+
+static char *parse_format_tag(apr_pool_t *p, format_tag *tag, const char **sa)
+{
+ const char *s = *sa;
+ const char * (*tag_handler)(request_rec *,char *);
+
+ /* Handle string literal/conditionals */
+ if (*s != '%') {
+ return parse_misc_string(p, tag, sa);
+ }
+ s++; /* skip the % */
+ tag->arg = '\0';
+ /* grab the argument if there is one */
+ if (*s == '{') {
+ ++s;
+ tag->arg = ap_getword(p,&s,'}');
+ }
+
+ tag_handler = (const char * (*)(request_rec *,char *))apr_hash_get(format_tag_hash, s++, 1);
+
+ if (!tag_handler) {
+ char dummy[2];
+ dummy[0] = s[-1];
+ dummy[1] = '\0';
+ return apr_pstrcat(p, "Unrecognized Header or RequestHeader directive %",
+ dummy, NULL);
+ }
+ tag->func = tag_handler;
+
+ *sa = s;
+ return NULL;
+}
+
+/*
+ * A format string consists of white space, text and optional format
+ * tags in any order. E.g.,
+ *
+ * Header add MyHeader "Free form text %D %t more text"
+ *
+ * Decompose the format string into its tags. Each tag (struct format_tag)
+ * contains a pointer to the function used to format the tag. Then save each
+ * tag in the tag array anchored in the header_entry.
+ */
+static char *parse_format_string(apr_pool_t *p, header_entry *hdr, const char *s)
+{
+ char *res;
+
+ /* No string to parse with unset and copy commands */
+ if (hdr->action == hdr_unset ||
+ hdr->action == hdr_echo) {
+ return NULL;
+ }
+
+ hdr->ta = apr_array_make(p, 10, sizeof(format_tag));
+
+ while (*s) {
+ if ((res = parse_format_tag(p, (format_tag *) apr_array_push(hdr->ta), &s))) {
+ return res;
+ }
+ }
+ return NULL;
+}
+
+/* handle RequestHeader and Header directive */
+static const char *header_inout_cmd(cmd_parms *cmd, void *indirconf,
+ const char *action, const char *inhdr,
+ const char *value, const char* envclause)
+{
+ headers_conf *dirconf = indirconf;
+ const char *condition_var = NULL;
+ char *colon;
+ char *hdr = apr_pstrdup(cmd->pool, inhdr);
+ header_entry *new;
+ apr_array_header_t *fixup = (cmd->info == &hdr_in)
+ ? dirconf->fixup_in : (cmd->info == &hdr_err)
+ ? dirconf->fixup_err
+ : dirconf->fixup_out;
+
+ new = (header_entry *) apr_array_push(fixup);
+
+ if (!strcasecmp(action, "set"))
+ new->action = hdr_set;
+ else if (!strcasecmp(action, "add"))
+ new->action = hdr_add;
+ else if (!strcasecmp(action, "append"))
+ new->action = hdr_append;
+ else if (!strcasecmp(action, "unset"))
+ new->action = hdr_unset;
+ else if (!strcasecmp(action, "echo"))
+ new->action = hdr_echo;
+ else
+ return "first argument must be add, set, append, unset or echo.";
+
+ if (new->action == hdr_unset) {
+ if (value)
+ return "header unset takes two arguments";
+ }
+ else if (new->action == hdr_echo) {
+ regex_t *regex;
+ if (value)
+ return "Header echo takes two arguments";
+ else if (cmd->info == &hdr_in)
+ return "Header echo only valid on Header directive";
+ else {
+ regex = ap_pregcomp(cmd->pool, hdr, REG_EXTENDED | REG_NOSUB);
+ if (regex == NULL) {
+ return "Header echo regex could not be compiled";
+ }
+ }
+ new->regex = regex;
+ }
+ else if (!value)
+ return "header requires three arguments";
+
+ /* Handle the envclause on Header */
+ if (envclause != NULL) {
+ if (strncasecmp(envclause, "env=", 4) != 0) {
+ return "error: envclause should be in the form env=envar";
+ }
+ if ((envclause[4] == '\0')
+ || ((envclause[4] == '!') && (envclause[5] == '\0'))) {
+ return "error: missing environment variable name. envclause should be in the form env=envar ";
+ }
+ condition_var = apr_pstrdup(cmd->pool, &envclause[4]);
+ }
+
+ if ((colon = strchr(hdr, ':')))
+ *colon = '\0';
+
+ new->header = hdr;
+ new->condition_var = condition_var;
+
+ return parse_format_string(cmd->pool, new, value);
+}
+
+/* Handle all (xxx)Header directives */
+static const char *header_cmd(cmd_parms *cmd, void *indirconf,
+ const char *args)
+{
+ const char *s;
+ const char *action;
+ const char *hdr;
+ const char *val;
+ const char *envclause;
+
+ s = apr_pstrdup(cmd->pool, args);
+ action = ap_getword_conf(cmd->pool, &s);
+ if (cmd->info == &hdr_out) {
+ if (!strcasecmp(action, "always")) {
+ cmd->info = &hdr_err;
+ action = ap_getword_conf(cmd->pool, &s);
+ }
+ else if (!strcasecmp(action, "onsuccess")) {
+ action = ap_getword_conf(cmd->pool, &s);
+ }
+ }
+ hdr = ap_getword_conf(cmd->pool, &s);
+ val = *s ? ap_getword_conf(cmd->pool, &s) : NULL;
+ envclause = *s ? ap_getword_conf(cmd->pool, &s) : NULL;
+
+ if (*s) {
+ return apr_pstrcat(cmd->pool, cmd->cmd->name,
+ " has too many arguments", NULL);
+ }
+
+ return header_inout_cmd(cmd, indirconf, action, hdr, val, envclause);
+}
+
+/*
+ * Process the tags in the format string. Tags may be format specifiers
+ * (%D, %t, etc.), whitespace or text strings. For each tag, run the handler
+ * (formatter) specific to the tag. Handlers return text strings.
+ * Concatenate the return from each handler into one string that is
+ * returned from this call.
+ */
+static char* process_tags(header_entry *hdr, request_rec *r)
+{
+ int i;
+ const char *s;
+ char *str = NULL;
+
+ format_tag *tag = (format_tag*) hdr->ta->elts;
+
+ for (i = 0; i < hdr->ta->nelts; i++) {
+ s = tag[i].func(r, tag[i].arg);
+ if (str == NULL)
+ str = apr_pstrdup(r->pool, s);
+ else
+ str = apr_pstrcat(r->pool, str, s, NULL);
+ }
+ return str ? str : "";
+}
+
+static int echo_header(echo_do *v, const char *key, const char *val)
+{
+ /* If the input header (key) matches the regex, echo it intact to
+ * r->headers_out.
+ */
+ if (!ap_regexec(v->hdr->regex, key, 0, NULL, 0)) {
+ apr_table_add(v->r->headers_out, key, val);
+ }
+
+ return 1;
+}
+
+static void do_headers_fixup(request_rec *r, apr_table_t *headers,
+ apr_array_header_t *fixup)
+{
+ int i;
+
+ for (i = 0; i < fixup->nelts; ++i) {
+ header_entry *hdr = &((header_entry *) (fixup->elts))[i];
+
+ /* Have any conditional envar-controlled Header processing to do? */
+ if (hdr->condition_var) {
+ const char *envar = hdr->condition_var;
+ if (*envar != '!') {
+ if (apr_table_get(r->subprocess_env, envar) == NULL)
+ continue;
+ }
+ else {
+ if (apr_table_get(r->subprocess_env, &envar[1]) != NULL)
+ continue;
+ }
+ }
+
+ switch (hdr->action) {
+ case hdr_add:
+ apr_table_addn(headers, hdr->header, process_tags(hdr, r));
+ break;
+ case hdr_append:
+ apr_table_mergen(headers, hdr->header, process_tags(hdr, r));
+ break;
+ case hdr_set:
+ apr_table_setn(headers, hdr->header, process_tags(hdr, r));
+ break;
+ case hdr_unset:
+ apr_table_unset(headers, hdr->header);
+ break;
+ case hdr_echo:
+ {
+ echo_do v;
+ v.r = r;
+ v.hdr = hdr;
+ apr_table_do((int (*) (void *, const char *, const char *))
+ echo_header, (void *) &v, r->headers_in, NULL);
+ break;
+ }
+ }
+ }
+}
+
+static void ap_headers_insert_output_filter(request_rec *r)
+{
+ headers_conf *dirconf = ap_get_module_config(r->per_dir_config,
+ &headers_module);
+
+ if (dirconf->fixup_out->nelts || dirconf->fixup_err->nelts) {
+ ap_add_output_filter("FIXUP_HEADERS_OUT", NULL, r, r->connection);
+ }
+}
+
+static void ap_headers_insert_error_filter(request_rec *r)
+{
+ headers_conf *dirconf = ap_get_module_config(r->per_dir_config,
+ &headers_module);
+
+ if (dirconf->fixup_err->nelts) {
+ ap_add_output_filter("FIXUP_HEADERS_ERR", NULL, r, r->connection);
+ }
+}
+
+static apr_status_t ap_headers_output_filter(ap_filter_t *f,
+ apr_bucket_brigade *in)
+{
+ headers_conf *dirconf = ap_get_module_config(f->r->per_dir_config,
+ &headers_module);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, f->r->server,
+ "headers: ap_headers_output_filter()");
+
+ /* do the fixup */
+ do_headers_fixup(f->r, f->r->err_headers_out, dirconf->fixup_err);
+ do_headers_fixup(f->r, f->r->headers_out, dirconf->fixup_out);
+
+ /* remove ourselves from the filter chain */
+ ap_remove_output_filter(f);
+
+ /* send the data up the stack */
+ return ap_pass_brigade(f->next,in);
+}
+
+static apr_status_t ap_headers_error_filter(ap_filter_t *f,
+ apr_bucket_brigade *in)
+{
+ headers_conf *dirconf = ap_get_module_config(f->r->per_dir_config,
+ &headers_module);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, f->r->server,
+ "headers: ap_headers_error_filter()");
+
+ /* do the fixup */
+ do_headers_fixup(f->r, f->r->err_headers_out, dirconf->fixup_err);
+
+ /* remove ourselves from the filter chain */
+ ap_remove_output_filter(f);
+
+ /* send the data up the stack */
+ return ap_pass_brigade(f->next,in);
+}
+
+static apr_status_t ap_headers_fixup(request_rec *r)
+{
+ headers_conf *dirconf = ap_get_module_config(r->per_dir_config,
+ &headers_module);
+
+ /* do the fixup */
+ if (dirconf->fixup_in->nelts) {
+ do_headers_fixup(r, r->headers_in, dirconf->fixup_in);
+ }
+
+ return DECLINED;
+}
+
+static const command_rec headers_cmds[] =
+{
+ AP_INIT_RAW_ARGS("Header", header_cmd, &hdr_out, OR_FILEINFO,
+ "an optional condition, an action, header and value "
+ "followed by optional env clause"),
+ AP_INIT_RAW_ARGS("RequestHeader", header_cmd, &hdr_in, OR_FILEINFO,
+ "an action, header and value"),
+ {NULL}
+};
+
+static void register_format_tag_handler(apr_pool_t *p, char *tag, void *tag_handler, int def)
+{
+ const void *h = apr_palloc(p, sizeof(h));
+ h = tag_handler;
+ apr_hash_set(format_tag_hash, tag, 1, h);
+}
+static int header_pre_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp)
+{
+ format_tag_hash = apr_hash_make(p);
+ register_format_tag_handler(p, "D", (void*) header_request_duration, 0);
+ register_format_tag_handler(p, "t", (void*) header_request_time, 0);
+ register_format_tag_handler(p, "e", (void*) header_request_env_var, 0);
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_pre_config(header_pre_config,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_insert_filter(ap_headers_insert_output_filter, NULL, NULL, APR_HOOK_LAST);
+ ap_hook_insert_error_filter(ap_headers_insert_error_filter, NULL, NULL, APR_HOOK_LAST);
+ ap_hook_fixups(ap_headers_fixup, NULL, NULL, APR_HOOK_LAST);
+ ap_register_output_filter("FIXUP_HEADERS_OUT", ap_headers_output_filter,
+ NULL, AP_FTYPE_CONTENT_SET);
+ ap_register_output_filter("FIXUP_HEADERS_ERR", ap_headers_error_filter,
+ NULL, AP_FTYPE_CONTENT_SET);
+}
+
+module AP_MODULE_DECLARE_DATA headers_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_headers_config, /* dir config creater */
+ merge_headers_config, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server configs */
+ headers_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_headers.dsp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_headers.dsp
new file mode 100644
index 00000000..3e5a016f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_headers.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_headers" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_headers - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_headers.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_headers.mak" CFG="mod_headers - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_headers - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_headers - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_headers - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_headers_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_headers.so" /base:@..\..\os\win32\BaseAddr.ref,mod_headers.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_headers.so" /base:@..\..\os\win32\BaseAddr.ref,mod_headers.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_headers - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_headers_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_headers.so" /base:@..\..\os\win32\BaseAddr.ref,mod_headers.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_headers.so" /base:@..\..\os\win32\BaseAddr.ref,mod_headers.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_headers - Win32 Release"
+# Name "mod_headers - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_headers.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_headers.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_headers - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_headers.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_headers.so "headers_module for Apache" ../../include/ap_release.h > .\mod_headers.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_headers - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_headers.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_headers.so "headers_module for Apache" ../../include/ap_release.h > .\mod_headers.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_headers.exp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_headers.exp
new file mode 100644
index 00000000..3f306380
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_headers.exp
@@ -0,0 +1 @@
+headers_module
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_mime_magic.c b/rubbos/app/httpd-2.0.64/modules/metadata/mod_mime_magic.c
new file mode 100644
index 00000000..329d3898
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_mime_magic.c
@@ -0,0 +1,2477 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_mime_magic: MIME type lookup via file magic numbers
+ * Copyright (c) 1996-1997 Cisco Systems, Inc.
+ *
+ * This software was submitted by Cisco Systems to the Apache Software Foundation in July
+ * 1997. Future revisions and derivatives of this source code must
+ * acknowledge Cisco Systems as the original contributor of this module.
+ * All other licensing and usage conditions are those of the Apache Software Foundation.
+ *
+ * Some of this code is derived from the free version of the file command
+ * originally posted to comp.sources.unix. Copyright info for that program
+ * is included below as required.
+ * ---------------------------------------------------------------------------
+ * - Copyright (c) Ian F. Darwin, 1987. Written by Ian F. Darwin.
+ *
+ * This software is not subject to any license of the American Telephone and
+ * Telegraph Company or of the Regents of the University of California.
+ *
+ * Permission is granted to anyone to use this software for any purpose on any
+ * computer system, and to alter it and redistribute it freely, subject to
+ * the following restrictions:
+ *
+ * 1. The author is not responsible for the consequences of use of this
+ * software, no matter how awful, even if they arise from flaws in it.
+ *
+ * 2. The origin of this software must not be misrepresented, either by
+ * explicit claim or by omission. Since few users ever read sources, credits
+ * must appear in the documentation.
+ *
+ * 3. Altered versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software. Since few users ever read
+ * sources, credits must appear in the documentation.
+ *
+ * 4. This notice may not be removed or altered.
+ * -------------------------------------------------------------------------
+ *
+ * For compliance with Mr Darwin's terms: this has been very significantly
+ * modified from the free "file" command.
+ * - all-in-one file for compilation convenience when moving from one
+ * version of Apache to the next.
+ * - Memory allocation is done through the Apache API's apr_pool_t structure.
+ * - All functions have had necessary Apache API request or server
+ * structures passed to them where necessary to call other Apache API
+ * routines. (i.e. usually for logging, files, or memory allocation in
+ * itself or a called function.)
+ * - struct magic has been converted from an array to a single-ended linked
+ * list because it only grows one record at a time, it's only accessed
+ * sequentially, and the Apache API has no equivalent of realloc().
+ * - Functions have been changed to get their parameters from the server
+ * configuration instead of globals. (It should be reentrant now but has
+ * not been tested in a threaded environment.)
+ * - Places where it used to print results to stdout now saves them in a
+ * list where they're used to set the MIME type in the Apache request
+ * record.
+ * - Command-line flags have been removed since they will never be used here.
+ *
+ * Ian Kluft <ikluft@cisco.com>
+ * Engineering Information Framework
+ * Central Engineering
+ * Cisco Systems, Inc.
+ * San Jose, CA, USA
+ *
+ * Initial installation July/August 1996
+ * Misc bug fixes May 1997
+ * Submission to Apache Software Foundation July 1997
+ *
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#include "util_script.h"
+
+/* ### this isn't set by configure? does anybody set this? */
+#ifdef HAVE_UTIME_H
+#include <utime.h>
+#endif
+
+/*
+ * data structures and related constants
+ */
+
+#define MODNAME "mod_mime_magic"
+#define MIME_MAGIC_DEBUG 0
+
+#define MIME_BINARY_UNKNOWN "application/octet-stream"
+#define MIME_TEXT_UNKNOWN "text/plain"
+
+#define MAXMIMESTRING 256
+
+/* HOWMANY must be at least 4096 to make gzip -dcq work */
+#define HOWMANY 4096
+/* SMALL_HOWMANY limits how much work we do to figure out text files */
+#define SMALL_HOWMANY 1024
+#define MAXDESC 50 /* max leng of text description */
+#define MAXstring 64 /* max leng of "string" types */
+
+struct magic {
+ struct magic *next; /* link to next entry */
+ int lineno; /* line number from magic file */
+
+ short flag;
+#define INDIR 1 /* if '>(...)' appears, */
+#define UNSIGNED 2 /* comparison is unsigned */
+ short cont_level; /* level of ">" */
+ struct {
+ char type; /* byte short long */
+ long offset; /* offset from indirection */
+ } in;
+ long offset; /* offset to magic number */
+ unsigned char reln; /* relation (0=eq, '>'=gt, etc) */
+ char type; /* int, short, long or string. */
+ char vallen; /* length of string value, if any */
+#define BYTE 1
+#define SHORT 2
+#define LONG 4
+#define STRING 5
+#define DATE 6
+#define BESHORT 7
+#define BELONG 8
+#define BEDATE 9
+#define LESHORT 10
+#define LELONG 11
+#define LEDATE 12
+ union VALUETYPE {
+ unsigned char b;
+ unsigned short h;
+ unsigned long l;
+ char s[MAXstring];
+ unsigned char hs[2]; /* 2 bytes of a fixed-endian "short" */
+ unsigned char hl[4]; /* 2 bytes of a fixed-endian "long" */
+ } value; /* either number or string */
+ unsigned long mask; /* mask before comparison with value */
+ char nospflag; /* supress space character */
+
+ /* NOTE: this string is suspected of overrunning - find it! */
+ char desc[MAXDESC]; /* description */
+};
+
+/*
+ * data structures for tar file recognition
+ * --------------------------------------------------------------------------
+ * Header file for public domain tar (tape archive) program.
+ *
+ * @(#)tar.h 1.20 86/10/29 Public Domain. Created 25 August 1985 by John
+ * Gilmore, ihnp4!hoptoad!gnu.
+ *
+ * Header block on tape.
+ *
+ * I'm going to use traditional DP naming conventions here. A "block" is a big
+ * chunk of stuff that we do I/O on. A "record" is a piece of info that we
+ * care about. Typically many "record"s fit into a "block".
+ */
+#define RECORDSIZE 512
+#define NAMSIZ 100
+#define TUNMLEN 32
+#define TGNMLEN 32
+
+union record {
+ char charptr[RECORDSIZE];
+ struct header {
+ char name[NAMSIZ];
+ char mode[8];
+ char uid[8];
+ char gid[8];
+ char size[12];
+ char mtime[12];
+ char chksum[8];
+ char linkflag;
+ char linkname[NAMSIZ];
+ char magic[8];
+ char uname[TUNMLEN];
+ char gname[TGNMLEN];
+ char devmajor[8];
+ char devminor[8];
+ } header;
+};
+
+/* The magic field is filled with this if uname and gname are valid. */
+#define TMAGIC "ustar " /* 7 chars and a null */
+
+/*
+ * file-function prototypes
+ */
+static int ascmagic(request_rec *, unsigned char *, apr_size_t);
+static int is_tar(unsigned char *, apr_size_t);
+static int softmagic(request_rec *, unsigned char *, apr_size_t);
+static int tryit(request_rec *, unsigned char *, apr_size_t, int);
+static int zmagic(request_rec *, unsigned char *, apr_size_t);
+
+static int getvalue(server_rec *, struct magic *, char **);
+static int hextoint(int);
+static char *getstr(server_rec *, char *, char *, int, int *);
+static int parse(server_rec *, apr_pool_t *p, char *, int);
+
+static int match(request_rec *, unsigned char *, apr_size_t);
+static int mget(request_rec *, union VALUETYPE *, unsigned char *,
+ struct magic *, apr_size_t);
+static int mcheck(request_rec *, union VALUETYPE *, struct magic *);
+static void mprint(request_rec *, union VALUETYPE *, struct magic *);
+
+static int uncompress(request_rec *, int,
+ unsigned char **, apr_size_t);
+static long from_oct(int, char *);
+static int fsmagic(request_rec *r, const char *fn);
+
+/*
+ * includes for ASCII substring recognition formerly "names.h" in file
+ * command
+ *
+ * Original notes: names and types used by ascmagic in file(1). These tokens are
+ * here because they can appear anywhere in the first HOWMANY bytes, while
+ * tokens in /etc/magic must appear at fixed offsets into the file. Don't
+ * make HOWMANY too high unless you have a very fast CPU.
+ */
+
+/* these types are used to index the apr_table_t 'types': keep em in sync! */
+/* HTML inserted in first because this is a web server module now */
+#define L_HTML 0 /* HTML */
+#define L_C 1 /* first and foremost on UNIX */
+#define L_FORT 2 /* the oldest one */
+#define L_MAKE 3 /* Makefiles */
+#define L_PLI 4 /* PL/1 */
+#define L_MACH 5 /* some kinda assembler */
+#define L_ENG 6 /* English */
+#define L_PAS 7 /* Pascal */
+#define L_MAIL 8 /* Electronic mail */
+#define L_NEWS 9 /* Usenet Netnews */
+
+static char *types[] =
+{
+ "text/html", /* HTML */
+ "text/plain", /* "c program text", */
+ "text/plain", /* "fortran program text", */
+ "text/plain", /* "make commands text", */
+ "text/plain", /* "pl/1 program text", */
+ "text/plain", /* "assembler program text", */
+ "text/plain", /* "English text", */
+ "text/plain", /* "pascal program text", */
+ "message/rfc822", /* "mail text", */
+ "message/news", /* "news text", */
+ "application/binary", /* "can't happen error on names.h/types", */
+ 0
+};
+
+static struct names {
+ char *name;
+ short type;
+} names[] = {
+
+ /* These must be sorted by eye for optimal hit rate */
+ /* Add to this list only after substantial meditation */
+ {
+ "<html>", L_HTML
+ },
+ {
+ "<HTML>", L_HTML
+ },
+ {
+ "<head>", L_HTML
+ },
+ {
+ "<HEAD>", L_HTML
+ },
+ {
+ "<title>", L_HTML
+ },
+ {
+ "<TITLE>", L_HTML
+ },
+ {
+ "<h1>", L_HTML
+ },
+ {
+ "<H1>", L_HTML
+ },
+ {
+ "<!--", L_HTML
+ },
+ {
+ "<!DOCTYPE HTML", L_HTML
+ },
+ {
+ "/*", L_C
+ }, /* must precede "The", "the", etc. */
+ {
+ "#include", L_C
+ },
+ {
+ "char", L_C
+ },
+ {
+ "The", L_ENG
+ },
+ {
+ "the", L_ENG
+ },
+ {
+ "double", L_C
+ },
+ {
+ "extern", L_C
+ },
+ {
+ "float", L_C
+ },
+ {
+ "real", L_C
+ },
+ {
+ "struct", L_C
+ },
+ {
+ "union", L_C
+ },
+ {
+ "CFLAGS", L_MAKE
+ },
+ {
+ "LDFLAGS", L_MAKE
+ },
+ {
+ "all:", L_MAKE
+ },
+ {
+ ".PRECIOUS", L_MAKE
+ },
+ /*
+ * Too many files of text have these words in them. Find another way to
+ * recognize Fortrash.
+ */
+#ifdef NOTDEF
+ {
+ "subroutine", L_FORT
+ },
+ {
+ "function", L_FORT
+ },
+ {
+ "block", L_FORT
+ },
+ {
+ "common", L_FORT
+ },
+ {
+ "dimension", L_FORT
+ },
+ {
+ "integer", L_FORT
+ },
+ {
+ "data", L_FORT
+ },
+#endif /* NOTDEF */
+ {
+ ".ascii", L_MACH
+ },
+ {
+ ".asciiz", L_MACH
+ },
+ {
+ ".byte", L_MACH
+ },
+ {
+ ".even", L_MACH
+ },
+ {
+ ".globl", L_MACH
+ },
+ {
+ "clr", L_MACH
+ },
+ {
+ "(input,", L_PAS
+ },
+ {
+ "dcl", L_PLI
+ },
+ {
+ "Received:", L_MAIL
+ },
+ {
+ ">From", L_MAIL
+ },
+ {
+ "Return-Path:", L_MAIL
+ },
+ {
+ "Cc:", L_MAIL
+ },
+ {
+ "Newsgroups:", L_NEWS
+ },
+ {
+ "Path:", L_NEWS
+ },
+ {
+ "Organization:", L_NEWS
+ },
+ {
+ NULL, 0
+ }
+};
+
+#define NNAMES ((sizeof(names)/sizeof(struct names)) - 1)
+
+/*
+ * Result String List (RSL)
+ *
+ * The file(1) command prints its output. Instead, we store the various
+ * "printed" strings in a list (allocating memory as we go) and concatenate
+ * them at the end when we finally know how much space they'll need.
+ */
+
+typedef struct magic_rsl_s {
+ char *str; /* string, possibly a fragment */
+ struct magic_rsl_s *next; /* pointer to next fragment */
+} magic_rsl;
+
+/*
+ * Apache module configuration structures
+ */
+
+/* per-server info */
+typedef struct {
+ const char *magicfile; /* where magic be found */
+ struct magic *magic; /* head of magic config list */
+ struct magic *last;
+} magic_server_config_rec;
+
+/* per-request info */
+typedef struct {
+ magic_rsl *head; /* result string list */
+ magic_rsl *tail;
+ unsigned suf_recursion; /* recursion depth in suffix check */
+} magic_req_rec;
+
+/*
+ * configuration functions - called by Apache API routines
+ */
+
+module AP_MODULE_DECLARE_DATA mime_magic_module;
+
+static void *create_magic_server_config(apr_pool_t *p, server_rec *d)
+{
+ /* allocate the config - use pcalloc because it needs to be zeroed */
+ return apr_pcalloc(p, sizeof(magic_server_config_rec));
+}
+
+static void *merge_magic_server_config(apr_pool_t *p, void *basev, void *addv)
+{
+ magic_server_config_rec *base = (magic_server_config_rec *) basev;
+ magic_server_config_rec *add = (magic_server_config_rec *) addv;
+ magic_server_config_rec *new = (magic_server_config_rec *)
+ apr_palloc(p, sizeof(magic_server_config_rec));
+
+ new->magicfile = add->magicfile ? add->magicfile : base->magicfile;
+ new->magic = NULL;
+ new->last = NULL;
+ return new;
+}
+
+static const char *set_magicfile(cmd_parms *cmd, void *dummy, const char *arg)
+{
+ magic_server_config_rec *conf = (magic_server_config_rec *)
+ ap_get_module_config(cmd->server->module_config,
+ &mime_magic_module);
+
+ if (!conf) {
+ return MODNAME ": server structure not allocated";
+ }
+ conf->magicfile = arg;
+ return NULL;
+}
+
+/*
+ * configuration file commands - exported to Apache API
+ */
+
+static const command_rec mime_magic_cmds[] =
+{
+ AP_INIT_TAKE1("MimeMagicFile", set_magicfile, NULL, RSRC_CONF,
+ "Path to MIME Magic file (in file(1) format)"),
+ {NULL}
+};
+
+/*
+ * RSL (result string list) processing routines
+ *
+ * These collect strings that would have been printed in fragments by file(1)
+ * into a list of magic_rsl structures with the strings. When complete,
+ * they're concatenated together to become the MIME content and encoding
+ * types.
+ *
+ * return value conventions for these functions: functions which return int:
+ * failure = -1, other = result functions which return pointers: failure = 0,
+ * other = result
+ */
+
+/* allocate a per-request structure and put it in the request record */
+static magic_req_rec *magic_set_config(request_rec *r)
+{
+ magic_req_rec *req_dat = (magic_req_rec *) apr_palloc(r->pool,
+ sizeof(magic_req_rec));
+
+ req_dat->head = req_dat->tail = (magic_rsl *) NULL;
+ ap_set_module_config(r->request_config, &mime_magic_module, req_dat);
+ return req_dat;
+}
+
+/* add a string to the result string list for this request */
+/* it is the responsibility of the caller to allocate "str" */
+static int magic_rsl_add(request_rec *r, char *str)
+{
+ magic_req_rec *req_dat = (magic_req_rec *)
+ ap_get_module_config(r->request_config, &mime_magic_module);
+ magic_rsl *rsl;
+
+ /* make sure we have a list to put it in */
+ if (!req_dat) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_EINVAL, r,
+ MODNAME ": request config should not be NULL");
+ if (!(req_dat = magic_set_config(r))) {
+ /* failure */
+ return -1;
+ }
+ }
+
+ /* allocate the list entry */
+ rsl = (magic_rsl *) apr_palloc(r->pool, sizeof(magic_rsl));
+
+ /* fill it */
+ rsl->str = str;
+ rsl->next = (magic_rsl *) NULL;
+
+ /* append to the list */
+ if (req_dat->head && req_dat->tail) {
+ req_dat->tail->next = rsl;
+ req_dat->tail = rsl;
+ }
+ else {
+ req_dat->head = req_dat->tail = rsl;
+ }
+
+ /* success */
+ return 0;
+}
+
+/* RSL hook for puts-type functions */
+static int magic_rsl_puts(request_rec *r, char *str)
+{
+ return magic_rsl_add(r, str);
+}
+
+/* RSL hook for printf-type functions */
+static int magic_rsl_printf(request_rec *r, char *str,...)
+{
+ va_list ap;
+
+ char buf[MAXMIMESTRING];
+
+ /* assemble the string into the buffer */
+ va_start(ap, str);
+ apr_vsnprintf(buf, sizeof(buf), str, ap);
+ va_end(ap);
+
+ /* add the buffer to the list */
+ return magic_rsl_add(r, apr_pstrdup(r->pool, buf));
+}
+
+/* RSL hook for putchar-type functions */
+static int magic_rsl_putchar(request_rec *r, char c)
+{
+ char str[2];
+
+ /* high overhead for 1 char - just hope they don't do this much */
+ str[0] = c;
+ str[1] = '\0';
+ return magic_rsl_add(r, str);
+}
+
+/* allocate and copy a contiguous string from a result string list */
+static char *rsl_strdup(request_rec *r, int start_frag, int start_pos, int len)
+{
+ char *result; /* return value */
+ int cur_frag, /* current fragment number/counter */
+ cur_pos, /* current position within fragment */
+ res_pos; /* position in result string */
+ magic_rsl *frag; /* list-traversal pointer */
+ magic_req_rec *req_dat = (magic_req_rec *)
+ ap_get_module_config(r->request_config, &mime_magic_module);
+
+ /* allocate the result string */
+ result = (char *) apr_palloc(r->pool, len + 1);
+
+ /* loop through and collect the string */
+ res_pos = 0;
+ for (frag = req_dat->head, cur_frag = 0;
+ frag->next;
+ frag = frag->next, cur_frag++) {
+ /* loop to the first fragment */
+ if (cur_frag < start_frag)
+ continue;
+
+ /* loop through and collect chars */
+ for (cur_pos = (cur_frag == start_frag) ? start_pos : 0;
+ frag->str[cur_pos];
+ cur_pos++) {
+ if (cur_frag >= start_frag
+ && cur_pos >= start_pos
+ && res_pos <= len) {
+ result[res_pos++] = frag->str[cur_pos];
+ if (res_pos > len) {
+ break;
+ }
+ }
+ }
+ }
+
+ /* clean up and return */
+ result[res_pos] = 0;
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ MODNAME ": rsl_strdup() %d chars: %s", res_pos - 1, result);
+#endif
+ return result;
+}
+
+/* states for the state-machine algorithm in magic_rsl_to_request() */
+typedef enum {
+ rsl_leading_space, rsl_type, rsl_subtype, rsl_separator, rsl_encoding
+} rsl_states;
+
+/* process the RSL and set the MIME info in the request record */
+static int magic_rsl_to_request(request_rec *r)
+{
+ int cur_frag, /* current fragment number/counter */
+ cur_pos, /* current position within fragment */
+ type_frag, /* content type starting point: fragment */
+ type_pos, /* content type starting point: position */
+ type_len, /* content type length */
+ encoding_frag, /* content encoding starting point: fragment */
+ encoding_pos, /* content encoding starting point: position */
+ encoding_len; /* content encoding length */
+
+ magic_rsl *frag; /* list-traversal pointer */
+ rsl_states state;
+
+ magic_req_rec *req_dat = (magic_req_rec *)
+ ap_get_module_config(r->request_config, &mime_magic_module);
+
+ /* check if we have a result */
+ if (!req_dat || !req_dat->head) {
+ /* empty - no match, we defer to other Apache modules */
+ return DECLINED;
+ }
+
+ /* start searching for the type and encoding */
+ state = rsl_leading_space;
+ type_frag = type_pos = type_len = 0;
+ encoding_frag = encoding_pos = encoding_len = 0;
+ for (frag = req_dat->head, cur_frag = 0;
+ frag && frag->next;
+ frag = frag->next, cur_frag++) {
+ /* loop through the characters in the fragment */
+ for (cur_pos = 0; frag->str[cur_pos]; cur_pos++) {
+ if (apr_isspace(frag->str[cur_pos])) {
+ /* process whitespace actions for each state */
+ if (state == rsl_leading_space) {
+ /* eat whitespace in this state */
+ continue;
+ }
+ else if (state == rsl_type) {
+ /* whitespace: type has no slash! */
+ return DECLINED;
+ }
+ else if (state == rsl_subtype) {
+ /* whitespace: end of MIME type */
+ state++;
+ continue;
+ }
+ else if (state == rsl_separator) {
+ /* eat whitespace in this state */
+ continue;
+ }
+ else if (state == rsl_encoding) {
+ /* whitespace: end of MIME encoding */
+ /* we're done */
+ frag = req_dat->tail;
+ break;
+ }
+ else {
+ /* should not be possible */
+ /* abandon malfunctioning module */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ MODNAME ": bad state %d (ws)", state);
+ return DECLINED;
+ }
+ /* NOTREACHED */
+ }
+ else if (state == rsl_type &&
+ frag->str[cur_pos] == '/') {
+ /* copy the char and go to rsl_subtype state */
+ type_len++;
+ state++;
+ }
+ else {
+ /* process non-space actions for each state */
+ if (state == rsl_leading_space) {
+ /* non-space: begin MIME type */
+ state++;
+ type_frag = cur_frag;
+ type_pos = cur_pos;
+ type_len = 1;
+ continue;
+ }
+ else if (state == rsl_type ||
+ state == rsl_subtype) {
+ /* non-space: adds to type */
+ type_len++;
+ continue;
+ }
+ else if (state == rsl_separator) {
+ /* non-space: begin MIME encoding */
+ state++;
+ encoding_frag = cur_frag;
+ encoding_pos = cur_pos;
+ encoding_len = 1;
+ continue;
+ }
+ else if (state == rsl_encoding) {
+ /* non-space: adds to encoding */
+ encoding_len++;
+ continue;
+ }
+ else {
+ /* should not be possible */
+ /* abandon malfunctioning module */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ MODNAME ": bad state %d (ns)", state);
+ return DECLINED;
+ }
+ /* NOTREACHED */
+ }
+ /* NOTREACHED */
+ }
+ }
+
+ /* if we ended prior to state rsl_subtype, we had incomplete info */
+ if (state != rsl_subtype && state != rsl_separator &&
+ state != rsl_encoding) {
+ /* defer to other modules */
+ return DECLINED;
+ }
+
+ /* save the info in the request record */
+ if (state == rsl_subtype || state == rsl_encoding ||
+ state == rsl_encoding) {
+ char *tmp;
+ tmp = rsl_strdup(r, type_frag, type_pos, type_len);
+ /* XXX: this could be done at config time I'm sure... but I'm
+ * confused by all this magic_rsl stuff. -djg */
+ ap_content_type_tolower(tmp);
+ ap_set_content_type(r, tmp);
+ }
+ if (state == rsl_encoding) {
+ char *tmp;
+ tmp = rsl_strdup(r, encoding_frag,
+ encoding_pos, encoding_len);
+ /* XXX: this could be done at config time I'm sure... but I'm
+ * confused by all this magic_rsl stuff. -djg */
+ ap_str_tolower(tmp);
+ r->content_encoding = tmp;
+ }
+
+ /* detect memory allocation or other errors */
+ if (!r->content_type ||
+ (state == rsl_encoding && !r->content_encoding)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ MODNAME ": unexpected state %d; could be caused by bad "
+ "data in magic file",
+ state);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* success! */
+ return OK;
+}
+
+/*
+ * magic_process - process input file r Apache API request record
+ * (formerly called "process" in file command, prefix added for clarity) Opens
+ * the file and reads a fixed-size buffer to begin processing the contents.
+ */
+static int magic_process(request_rec *r)
+{
+ apr_file_t *fd = NULL;
+ unsigned char buf[HOWMANY + 1]; /* one extra for terminating '\0' */
+ apr_size_t nbytes = 0; /* number of bytes read from a datafile */
+ int result;
+
+ /*
+ * first try judging the file based on its filesystem status
+ */
+ switch ((result = fsmagic(r, r->filename))) {
+ case DONE:
+ magic_rsl_putchar(r, '\n');
+ return OK;
+ case OK:
+ break;
+ default:
+ /* fatal error, bail out */
+ return result;
+ }
+
+ if (apr_file_open(&fd, r->filename, APR_READ, APR_OS_DEFAULT, r->pool) != APR_SUCCESS) {
+ /* We can't open it, but we were able to stat it. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ MODNAME ": can't read `%s'", r->filename);
+ /* let some other handler decide what the problem is */
+ return DECLINED;
+ }
+
+ /*
+ * try looking at the first HOWMANY bytes
+ */
+ nbytes = sizeof(buf) - 1;
+ if ((result = apr_file_read(fd, (char *) buf, &nbytes)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, result, r,
+ MODNAME ": read failed: %s", r->filename);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ if (nbytes == 0) {
+ return DECLINED;
+ }
+ else {
+ buf[nbytes++] = '\0'; /* null-terminate it */
+ result = tryit(r, buf, nbytes, 1);
+ if (result != OK) {
+ return result;
+ }
+ }
+
+ (void) apr_file_close(fd);
+ (void) magic_rsl_putchar(r, '\n');
+
+ return OK;
+}
+
+
+static int tryit(request_rec *r, unsigned char *buf, apr_size_t nb,
+ int checkzmagic)
+{
+ /*
+ * Try compression stuff
+ */
+ if (checkzmagic == 1) {
+ if (zmagic(r, buf, nb) == 1)
+ return OK;
+ }
+
+ /*
+ * try tests in /etc/magic (or surrogate magic file)
+ */
+ if (softmagic(r, buf, nb) == 1)
+ return OK;
+
+ /*
+ * try known keywords, check for ascii-ness too.
+ */
+ if (ascmagic(r, buf, nb) == 1)
+ return OK;
+
+ /*
+ * abandon hope, all ye who remain here
+ */
+ return DECLINED;
+}
+
+#define EATAB {while (apr_isspace(*l)) ++l;}
+
+/*
+ * apprentice - load configuration from the magic file r
+ * API request record
+ */
+static int apprentice(server_rec *s, apr_pool_t *p)
+{
+ apr_file_t *f = NULL;
+ apr_status_t result;
+ char line[BUFSIZ + 1];
+ int errs = 0;
+ int lineno;
+#if MIME_MAGIC_DEBUG
+ int rule = 0;
+ struct magic *m, *prevm;
+#endif
+ magic_server_config_rec *conf = (magic_server_config_rec *)
+ ap_get_module_config(s->module_config, &mime_magic_module);
+ const char *fname = ap_server_root_relative(p, conf->magicfile);
+
+ if (!fname) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, APR_EBADPATH, s,
+ MODNAME ": Invalid magic file path %s", conf->magicfile);
+ return -1;
+ }
+ if ((result = apr_file_open(&f, fname, APR_READ | APR_BUFFERED,
+ APR_OS_DEFAULT, p) != APR_SUCCESS)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, result, s,
+ MODNAME ": can't read magic file %s", fname);
+ return -1;
+ }
+
+ /* set up the magic list (empty) */
+ conf->magic = conf->last = NULL;
+
+ /* parse it */
+ for (lineno = 1; apr_file_gets(line, BUFSIZ, f) == APR_SUCCESS; lineno++) {
+ int ws_offset;
+ char *last = line + strlen(line) - 1; /* guaranteed that len >= 1 since an
+ * "empty" line contains a '\n'
+ */
+
+ /* delete newline and any other trailing whitespace */
+ while (last >= line
+ && apr_isspace(*last)) {
+ *last = '\0';
+ --last;
+ }
+
+ /* skip leading whitespace */
+ ws_offset = 0;
+ while (line[ws_offset] && apr_isspace(line[ws_offset])) {
+ ws_offset++;
+ }
+
+ /* skip blank lines */
+ if (line[ws_offset] == 0) {
+ continue;
+ }
+
+ /* comment, do not parse */
+ if (line[ws_offset] == '#')
+ continue;
+
+#if MIME_MAGIC_DEBUG
+ /* if we get here, we're going to use it so count it */
+ rule++;
+#endif
+
+ /* parse it */
+ if (parse(s, p, line + ws_offset, lineno) != 0)
+ ++errs;
+ }
+
+ (void) apr_file_close(f);
+
+#if MIME_MAGIC_DEBUG
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ MODNAME ": apprentice conf=%x file=%s m=%s m->next=%s last=%s",
+ conf,
+ conf->magicfile ? conf->magicfile : "NULL",
+ conf->magic ? "set" : "NULL",
+ (conf->magic && conf->magic->next) ? "set" : "NULL",
+ conf->last ? "set" : "NULL");
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ MODNAME ": apprentice read %d lines, %d rules, %d errors",
+ lineno, rule, errs);
+#endif
+
+#if MIME_MAGIC_DEBUG
+ prevm = 0;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ MODNAME ": apprentice test");
+ for (m = conf->magic; m; m = m->next) {
+ if (apr_isprint((((unsigned long) m) >> 24) & 255) &&
+ apr_isprint((((unsigned long) m) >> 16) & 255) &&
+ apr_isprint((((unsigned long) m) >> 8) & 255) &&
+ apr_isprint(((unsigned long) m) & 255)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ MODNAME ": apprentice: POINTER CLOBBERED! "
+ "m=\"%c%c%c%c\" line=%d",
+ (((unsigned long) m) >> 24) & 255,
+ (((unsigned long) m) >> 16) & 255,
+ (((unsigned long) m) >> 8) & 255,
+ ((unsigned long) m) & 255,
+ prevm ? prevm->lineno : -1);
+ break;
+ }
+ prevm = m;
+ }
+#endif
+
+ return (errs ? -1 : 0);
+}
+
+/*
+ * extend the sign bit if the comparison is to be signed
+ */
+static unsigned long signextend(server_rec *s, struct magic *m, unsigned long v)
+{
+ if (!(m->flag & UNSIGNED))
+ switch (m->type) {
+ /*
+ * Do not remove the casts below. They are vital. When later
+ * compared with the data, the sign extension must have happened.
+ */
+ case BYTE:
+ v = (char) v;
+ break;
+ case SHORT:
+ case BESHORT:
+ case LESHORT:
+ v = (short) v;
+ break;
+ case DATE:
+ case BEDATE:
+ case LEDATE:
+ case LONG:
+ case BELONG:
+ case LELONG:
+ v = (long) v;
+ break;
+ case STRING:
+ break;
+ default:
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ MODNAME ": can't happen: m->type=%d", m->type);
+ return -1;
+ }
+ return v;
+}
+
+/*
+ * parse one line from magic file, put into magic[index++] if valid
+ */
+static int parse(server_rec *serv, apr_pool_t *p, char *l, int lineno)
+{
+ struct magic *m;
+ char *t, *s;
+ magic_server_config_rec *conf = (magic_server_config_rec *)
+ ap_get_module_config(serv->module_config, &mime_magic_module);
+
+ /* allocate magic structure entry */
+ m = (struct magic *) apr_pcalloc(p, sizeof(struct magic));
+
+ /* append to linked list */
+ m->next = NULL;
+ if (!conf->magic || !conf->last) {
+ conf->magic = conf->last = m;
+ }
+ else {
+ conf->last->next = m;
+ conf->last = m;
+ }
+
+ /* set values in magic structure */
+ m->flag = 0;
+ m->cont_level = 0;
+ m->lineno = lineno;
+
+ while (*l == '>') {
+ ++l; /* step over */
+ m->cont_level++;
+ }
+
+ if (m->cont_level != 0 && *l == '(') {
+ ++l; /* step over */
+ m->flag |= INDIR;
+ }
+
+ /* get offset, then skip over it */
+ m->offset = (int) strtol(l, &t, 0);
+ if (l == t) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, serv,
+ MODNAME ": offset %s invalid", l);
+ }
+ l = t;
+
+ if (m->flag & INDIR) {
+ m->in.type = LONG;
+ m->in.offset = 0;
+ /*
+ * read [.lbs][+-]nnnnn)
+ */
+ if (*l == '.') {
+ switch (*++l) {
+ case 'l':
+ m->in.type = LONG;
+ break;
+ case 's':
+ m->in.type = SHORT;
+ break;
+ case 'b':
+ m->in.type = BYTE;
+ break;
+ default:
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, serv,
+ MODNAME ": indirect offset type %c invalid", *l);
+ break;
+ }
+ l++;
+ }
+ s = l;
+ if (*l == '+' || *l == '-')
+ l++;
+ if (apr_isdigit((unsigned char) *l)) {
+ m->in.offset = strtol(l, &t, 0);
+ if (*s == '-')
+ m->in.offset = -m->in.offset;
+ }
+ else
+ t = l;
+ if (*t++ != ')') {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, serv,
+ MODNAME ": missing ')' in indirect offset");
+ }
+ l = t;
+ }
+
+
+ while (apr_isdigit((unsigned char) *l))
+ ++l;
+ EATAB;
+
+#define NBYTE 4
+#define NSHORT 5
+#define NLONG 4
+#define NSTRING 6
+#define NDATE 4
+#define NBESHORT 7
+#define NBELONG 6
+#define NBEDATE 6
+#define NLESHORT 7
+#define NLELONG 6
+#define NLEDATE 6
+
+ if (*l == 'u') {
+ ++l;
+ m->flag |= UNSIGNED;
+ }
+
+ /* get type, skip it */
+ if (strncmp(l, "byte", NBYTE) == 0) {
+ m->type = BYTE;
+ l += NBYTE;
+ }
+ else if (strncmp(l, "short", NSHORT) == 0) {
+ m->type = SHORT;
+ l += NSHORT;
+ }
+ else if (strncmp(l, "long", NLONG) == 0) {
+ m->type = LONG;
+ l += NLONG;
+ }
+ else if (strncmp(l, "string", NSTRING) == 0) {
+ m->type = STRING;
+ l += NSTRING;
+ }
+ else if (strncmp(l, "date", NDATE) == 0) {
+ m->type = DATE;
+ l += NDATE;
+ }
+ else if (strncmp(l, "beshort", NBESHORT) == 0) {
+ m->type = BESHORT;
+ l += NBESHORT;
+ }
+ else if (strncmp(l, "belong", NBELONG) == 0) {
+ m->type = BELONG;
+ l += NBELONG;
+ }
+ else if (strncmp(l, "bedate", NBEDATE) == 0) {
+ m->type = BEDATE;
+ l += NBEDATE;
+ }
+ else if (strncmp(l, "leshort", NLESHORT) == 0) {
+ m->type = LESHORT;
+ l += NLESHORT;
+ }
+ else if (strncmp(l, "lelong", NLELONG) == 0) {
+ m->type = LELONG;
+ l += NLELONG;
+ }
+ else if (strncmp(l, "ledate", NLEDATE) == 0) {
+ m->type = LEDATE;
+ l += NLEDATE;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, serv,
+ MODNAME ": type %s invalid", l);
+ return -1;
+ }
+ /* New-style anding: "0 byte&0x80 =0x80 dynamically linked" */
+ if (*l == '&') {
+ ++l;
+ m->mask = signextend(serv, m, strtol(l, &l, 0));
+ }
+ else
+ m->mask = ~0L;
+ EATAB;
+
+ switch (*l) {
+ case '>':
+ case '<':
+ /* Old-style anding: "0 byte &0x80 dynamically linked" */
+ case '&':
+ case '^':
+ case '=':
+ m->reln = *l;
+ ++l;
+ break;
+ case '!':
+ if (m->type != STRING) {
+ m->reln = *l;
+ ++l;
+ break;
+ }
+ /* FALL THROUGH */
+ default:
+ if (*l == 'x' && apr_isspace(l[1])) {
+ m->reln = *l;
+ ++l;
+ goto GetDesc; /* Bill The Cat */
+ }
+ m->reln = '=';
+ break;
+ }
+ EATAB;
+
+ if (getvalue(serv, m, &l))
+ return -1;
+ /*
+ * now get last part - the description
+ */
+ GetDesc:
+ EATAB;
+ if (l[0] == '\b') {
+ ++l;
+ m->nospflag = 1;
+ }
+ else if ((l[0] == '\\') && (l[1] == 'b')) {
+ ++l;
+ ++l;
+ m->nospflag = 1;
+ }
+ else
+ m->nospflag = 0;
+ strncpy(m->desc, l, sizeof(m->desc) - 1);
+ m->desc[sizeof(m->desc) - 1] = '\0';
+
+#if MIME_MAGIC_DEBUG
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, serv,
+ MODNAME ": parse line=%d m=%x next=%x cont=%d desc=%s",
+ lineno, m, m->next, m->cont_level, m->desc);
+#endif /* MIME_MAGIC_DEBUG */
+
+ return 0;
+}
+
+/*
+ * Read a numeric value from a pointer, into the value union of a magic
+ * pointer, according to the magic type. Update the string pointer to point
+ * just after the number read. Return 0 for success, non-zero for failure.
+ */
+static int getvalue(server_rec *s, struct magic *m, char **p)
+{
+ int slen;
+
+ if (m->type == STRING) {
+ *p = getstr(s, *p, m->value.s, sizeof(m->value.s), &slen);
+ m->vallen = slen;
+ }
+ else if (m->reln != 'x')
+ m->value.l = signextend(s, m, strtol(*p, p, 0));
+ return 0;
+}
+
+/*
+ * Convert a string containing C character escapes. Stop at an unescaped
+ * space or tab. Copy the converted version to "p", returning its length in
+ * *slen. Return updated scan pointer as function result.
+ */
+static char *getstr(server_rec *serv, register char *s, register char *p,
+ int plen, int *slen)
+{
+ char *origs = s, *origp = p;
+ char *pmax = p + plen - 1;
+ register int c;
+ register int val;
+
+ while ((c = *s++) != '\0') {
+ if (apr_isspace(c))
+ break;
+ if (p >= pmax) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, serv,
+ MODNAME ": string too long: %s", origs);
+ break;
+ }
+ if (c == '\\') {
+ switch (c = *s++) {
+
+ case '\0':
+ goto out;
+
+ default:
+ *p++ = (char) c;
+ break;
+
+ case 'n':
+ *p++ = '\n';
+ break;
+
+ case 'r':
+ *p++ = '\r';
+ break;
+
+ case 'b':
+ *p++ = '\b';
+ break;
+
+ case 't':
+ *p++ = '\t';
+ break;
+
+ case 'f':
+ *p++ = '\f';
+ break;
+
+ case 'v':
+ *p++ = '\v';
+ break;
+
+ /* \ and up to 3 octal digits */
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ val = c - '0';
+ c = *s++; /* try for 2 */
+ if (c >= '0' && c <= '7') {
+ val = (val << 3) | (c - '0');
+ c = *s++; /* try for 3 */
+ if (c >= '0' && c <= '7')
+ val = (val << 3) | (c - '0');
+ else
+ --s;
+ }
+ else
+ --s;
+ *p++ = (char) val;
+ break;
+
+ /* \x and up to 3 hex digits */
+ case 'x':
+ val = 'x'; /* Default if no digits */
+ c = hextoint(*s++); /* Get next char */
+ if (c >= 0) {
+ val = c;
+ c = hextoint(*s++);
+ if (c >= 0) {
+ val = (val << 4) + c;
+ c = hextoint(*s++);
+ if (c >= 0) {
+ val = (val << 4) + c;
+ }
+ else
+ --s;
+ }
+ else
+ --s;
+ }
+ else
+ --s;
+ *p++ = (char) val;
+ break;
+ }
+ }
+ else
+ *p++ = (char) c;
+ }
+ out:
+ *p = '\0';
+ *slen = p - origp;
+ return s;
+}
+
+
+/* Single hex char to int; -1 if not a hex char. */
+static int hextoint(int c)
+{
+ if (apr_isdigit(c))
+ return c - '0';
+ if ((c >= 'a') && (c <= 'f'))
+ return c + 10 - 'a';
+ if ((c >= 'A') && (c <= 'F'))
+ return c + 10 - 'A';
+ return -1;
+}
+
+
+/*
+ * return DONE to indicate it's been handled
+ * return OK to indicate it's a regular file still needing handling
+ * other returns indicate a failure of some sort
+ */
+static int fsmagic(request_rec *r, const char *fn)
+{
+ switch (r->finfo.filetype) {
+ case APR_DIR:
+ magic_rsl_puts(r, DIR_MAGIC_TYPE);
+ return DONE;
+ case APR_CHR:
+ /*
+ * (void) magic_rsl_printf(r,"character special (%d/%d)",
+ * major(sb->st_rdev), minor(sb->st_rdev));
+ */
+ (void) magic_rsl_puts(r, MIME_BINARY_UNKNOWN);
+ return DONE;
+ case APR_BLK:
+ /*
+ * (void) magic_rsl_printf(r,"block special (%d/%d)",
+ * major(sb->st_rdev), minor(sb->st_rdev));
+ */
+ (void) magic_rsl_puts(r, MIME_BINARY_UNKNOWN);
+ return DONE;
+ /* TODO add code to handle V7 MUX and Blit MUX files */
+ case APR_PIPE:
+ /*
+ * magic_rsl_puts(r,"fifo (named pipe)");
+ */
+ (void) magic_rsl_puts(r, MIME_BINARY_UNKNOWN);
+ return DONE;
+ case APR_LNK:
+ /* We used stat(), the only possible reason for this is that the
+ * symlink is broken.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ MODNAME ": broken symlink (%s)", fn);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ case APR_SOCK:
+ magic_rsl_puts(r, MIME_BINARY_UNKNOWN);
+ return DONE;
+ case APR_REG:
+ break;
+ default:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ MODNAME ": invalid file type %d.", r->finfo.filetype);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /*
+ * regular file, check next possibility
+ */
+ if (r->finfo.size == 0) {
+ magic_rsl_puts(r, MIME_TEXT_UNKNOWN);
+ return DONE;
+ }
+ return OK;
+}
+
+/*
+ * softmagic - lookup one file in database (already read from /etc/magic by
+ * apprentice.c). Passed the name and FILE * of one file to be typed.
+ */
+ /* ARGSUSED1 *//* nbytes passed for regularity, maybe need later */
+static int softmagic(request_rec *r, unsigned char *buf, apr_size_t nbytes)
+{
+ if (match(r, buf, nbytes))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Go through the whole list, stopping if you find a match. Process all the
+ * continuations of that match before returning.
+ *
+ * We support multi-level continuations:
+ *
+ * At any time when processing a successful top-level match, there is a current
+ * continuation level; it represents the level of the last successfully
+ * matched continuation.
+ *
+ * Continuations above that level are skipped as, if we see one, it means that
+ * the continuation that controls them - i.e, the lower-level continuation
+ * preceding them - failed to match.
+ *
+ * Continuations below that level are processed as, if we see one, it means
+ * we've finished processing or skipping higher-level continuations under the
+ * control of a successful or unsuccessful lower-level continuation, and are
+ * now seeing the next lower-level continuation and should process it. The
+ * current continuation level reverts to the level of the one we're seeing.
+ *
+ * Continuations at the current level are processed as, if we see one, there's
+ * no lower-level continuation that may have failed.
+ *
+ * If a continuation matches, we bump the current continuation level so that
+ * higher-level continuations are processed.
+ */
+static int match(request_rec *r, unsigned char *s, apr_size_t nbytes)
+{
+#if MIME_MAGIC_DEBUG
+ int rule_counter = 0;
+#endif
+ int cont_level = 0;
+ int need_separator = 0;
+ union VALUETYPE p;
+ magic_server_config_rec *conf = (magic_server_config_rec *)
+ ap_get_module_config(r->server->module_config, &mime_magic_module);
+ struct magic *m;
+
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ MODNAME ": match conf=%x file=%s m=%s m->next=%s last=%s",
+ conf,
+ conf->magicfile ? conf->magicfile : "NULL",
+ conf->magic ? "set" : "NULL",
+ (conf->magic && conf->magic->next) ? "set" : "NULL",
+ conf->last ? "set" : "NULL");
+#endif
+
+#if MIME_MAGIC_DEBUG
+ for (m = conf->magic; m; m = m->next) {
+ if (apr_isprint((((unsigned long) m) >> 24) & 255) &&
+ apr_isprint((((unsigned long) m) >> 16) & 255) &&
+ apr_isprint((((unsigned long) m) >> 8) & 255) &&
+ apr_isprint(((unsigned long) m) & 255)) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ MODNAME ": match: POINTER CLOBBERED! "
+ "m=\"%c%c%c%c\"",
+ (((unsigned long) m) >> 24) & 255,
+ (((unsigned long) m) >> 16) & 255,
+ (((unsigned long) m) >> 8) & 255,
+ ((unsigned long) m) & 255);
+ break;
+ }
+ }
+#endif
+
+ for (m = conf->magic; m; m = m->next) {
+#if MIME_MAGIC_DEBUG
+ rule_counter++;
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ MODNAME ": line=%d desc=%s", m->lineno, m->desc);
+#endif
+
+ /* check if main entry matches */
+ if (!mget(r, &p, s, m, nbytes) ||
+ !mcheck(r, &p, m)) {
+ struct magic *m_cont;
+
+ /*
+ * main entry didn't match, flush its continuations
+ */
+ if (!m->next || (m->next->cont_level == 0)) {
+ continue;
+ }
+
+ m_cont = m->next;
+ while (m_cont && (m_cont->cont_level != 0)) {
+#if MIME_MAGIC_DEBUG
+ rule_counter++;
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ MODNAME ": line=%d mc=%x mc->next=%x cont=%d desc=%s",
+ m_cont->lineno, m_cont,
+ m_cont->next, m_cont->cont_level,
+ m_cont->desc);
+#endif
+ /*
+ * this trick allows us to keep *m in sync when the continue
+ * advances the pointer
+ */
+ m = m_cont;
+ m_cont = m_cont->next;
+ }
+ continue;
+ }
+
+ /* if we get here, the main entry rule was a match */
+ /* this will be the last run through the loop */
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ MODNAME ": rule matched, line=%d type=%d %s",
+ m->lineno, m->type,
+ (m->type == STRING) ? m->value.s : "");
+#endif
+
+ /* print the match */
+ mprint(r, &p, m);
+
+ /*
+ * If we printed something, we'll need to print a blank before we
+ * print something else.
+ */
+ if (m->desc[0])
+ need_separator = 1;
+ /* and any continuations that match */
+ cont_level++;
+ /*
+ * while (m && m->next && m->next->cont_level != 0 && ( m = m->next
+ * ))
+ */
+ m = m->next;
+ while (m && (m->cont_level != 0)) {
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ MODNAME ": match line=%d cont=%d type=%d %s",
+ m->lineno, m->cont_level, m->type,
+ (m->type == STRING) ? m->value.s : "");
+#endif
+ if (cont_level >= m->cont_level) {
+ if (cont_level > m->cont_level) {
+ /*
+ * We're at the end of the level "cont_level"
+ * continuations.
+ */
+ cont_level = m->cont_level;
+ }
+ if (mget(r, &p, s, m, nbytes) &&
+ mcheck(r, &p, m)) {
+ /*
+ * This continuation matched. Print its message, with a
+ * blank before it if the previous item printed and this
+ * item isn't empty.
+ */
+ /* space if previous printed */
+ if (need_separator
+ && (m->nospflag == 0)
+ && (m->desc[0] != '\0')
+ ) {
+ (void) magic_rsl_putchar(r, ' ');
+ need_separator = 0;
+ }
+ mprint(r, &p, m);
+ if (m->desc[0])
+ need_separator = 1;
+
+ /*
+ * If we see any continuations at a higher level, process
+ * them.
+ */
+ cont_level++;
+ }
+ }
+
+ /* move to next continuation record */
+ m = m->next;
+ }
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ MODNAME ": matched after %d rules", rule_counter);
+#endif
+ return 1; /* all through */
+ }
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ MODNAME ": failed after %d rules", rule_counter);
+#endif
+ return 0; /* no match at all */
+}
+
+static void mprint(request_rec *r, union VALUETYPE *p, struct magic *m)
+{
+ char *pp;
+ unsigned long v;
+ char time_str[APR_CTIME_LEN];
+
+ switch (m->type) {
+ case BYTE:
+ v = p->b;
+ break;
+
+ case SHORT:
+ case BESHORT:
+ case LESHORT:
+ v = p->h;
+ break;
+
+ case LONG:
+ case BELONG:
+ case LELONG:
+ v = p->l;
+ break;
+
+ case STRING:
+ if (m->reln == '=') {
+ (void) magic_rsl_printf(r, m->desc, m->value.s);
+ }
+ else {
+ (void) magic_rsl_printf(r, m->desc, p->s);
+ }
+ return;
+
+ case DATE:
+ case BEDATE:
+ case LEDATE:
+ apr_ctime(time_str, apr_time_from_sec(*(time_t *)&p->l));
+ pp = time_str;
+ (void) magic_rsl_printf(r, m->desc, pp);
+ return;
+ default:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ MODNAME ": invalid m->type (%d) in mprint().",
+ m->type);
+ return;
+ }
+
+ v = signextend(r->server, m, v) & m->mask;
+ (void) magic_rsl_printf(r, m->desc, (unsigned long) v);
+}
+
+/*
+ * Convert the byte order of the data we are looking at
+ */
+static int mconvert(request_rec *r, union VALUETYPE *p, struct magic *m)
+{
+ char *rt;
+
+ switch (m->type) {
+ case BYTE:
+ case SHORT:
+ case LONG:
+ case DATE:
+ return 1;
+ case STRING:
+ /* Null terminate and eat the return */
+ p->s[sizeof(p->s) - 1] = '\0';
+ if ((rt = strchr(p->s, '\n')) != NULL)
+ *rt = '\0';
+ return 1;
+ case BESHORT:
+ p->h = (short) ((p->hs[0] << 8) | (p->hs[1]));
+ return 1;
+ case BELONG:
+ case BEDATE:
+ p->l = (long)
+ ((p->hl[0] << 24) | (p->hl[1] << 16) | (p->hl[2] << 8) | (p->hl[3]));
+ return 1;
+ case LESHORT:
+ p->h = (short) ((p->hs[1] << 8) | (p->hs[0]));
+ return 1;
+ case LELONG:
+ case LEDATE:
+ p->l = (long)
+ ((p->hl[3] << 24) | (p->hl[2] << 16) | (p->hl[1] << 8) | (p->hl[0]));
+ return 1;
+ default:
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ MODNAME ": invalid type %d in mconvert().", m->type);
+ return 0;
+ }
+}
+
+
+static int mget(request_rec *r, union VALUETYPE *p, unsigned char *s,
+ struct magic *m, apr_size_t nbytes)
+{
+ long offset = m->offset;
+
+ if (offset + sizeof(union VALUETYPE) > nbytes)
+ return 0;
+
+ memcpy(p, s + offset, sizeof(union VALUETYPE));
+
+ if (!mconvert(r, p, m))
+ return 0;
+
+ if (m->flag & INDIR) {
+
+ switch (m->in.type) {
+ case BYTE:
+ offset = p->b + m->in.offset;
+ break;
+ case SHORT:
+ offset = p->h + m->in.offset;
+ break;
+ case LONG:
+ offset = p->l + m->in.offset;
+ break;
+ }
+
+ if (offset + sizeof(union VALUETYPE) > nbytes)
+ return 0;
+
+ memcpy(p, s + offset, sizeof(union VALUETYPE));
+
+ if (!mconvert(r, p, m))
+ return 0;
+ }
+ return 1;
+}
+
+static int mcheck(request_rec *r, union VALUETYPE *p, struct magic *m)
+{
+ register unsigned long l = m->value.l;
+ register unsigned long v;
+ int matched;
+
+ if ((m->value.s[0] == 'x') && (m->value.s[1] == '\0')) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ MODNAME ": BOINK");
+ return 1;
+ }
+
+ switch (m->type) {
+ case BYTE:
+ v = p->b;
+ break;
+
+ case SHORT:
+ case BESHORT:
+ case LESHORT:
+ v = p->h;
+ break;
+
+ case LONG:
+ case BELONG:
+ case LELONG:
+ case DATE:
+ case BEDATE:
+ case LEDATE:
+ v = p->l;
+ break;
+
+ case STRING:
+ l = 0;
+ /*
+ * What we want here is: v = strncmp(m->value.s, p->s, m->vallen);
+ * but ignoring any nulls. bcmp doesn't give -/+/0 and isn't
+ * universally available anyway.
+ */
+ v = 0;
+ {
+ register unsigned char *a = (unsigned char *) m->value.s;
+ register unsigned char *b = (unsigned char *) p->s;
+ register int len = m->vallen;
+
+ while (--len >= 0)
+ if ((v = *b++ - *a++) != 0)
+ break;
+ }
+ break;
+ default:
+ /* bogosity, pretend that it just wasn't a match */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ MODNAME ": invalid type %d in mcheck().", m->type);
+ return 0;
+ }
+
+ v = signextend(r->server, m, v) & m->mask;
+
+ switch (m->reln) {
+ case 'x':
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "%lu == *any* = 1", v);
+#endif
+ matched = 1;
+ break;
+
+ case '!':
+ matched = v != l;
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "%lu != %lu = %d", v, l, matched);
+#endif
+ break;
+
+ case '=':
+ matched = v == l;
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "%lu == %lu = %d", v, l, matched);
+#endif
+ break;
+
+ case '>':
+ if (m->flag & UNSIGNED) {
+ matched = v > l;
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "%lu > %lu = %d", v, l, matched);
+#endif
+ }
+ else {
+ matched = (long) v > (long) l;
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "%ld > %ld = %d", v, l, matched);
+#endif
+ }
+ break;
+
+ case '<':
+ if (m->flag & UNSIGNED) {
+ matched = v < l;
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "%lu < %lu = %d", v, l, matched);
+#endif
+ }
+ else {
+ matched = (long) v < (long) l;
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "%ld < %ld = %d", v, l, matched);
+#endif
+ }
+ break;
+
+ case '&':
+ matched = (v & l) == l;
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "((%lx & %lx) == %lx) = %d", v, l, l, matched);
+#endif
+ break;
+
+ case '^':
+ matched = (v & l) != l;
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "((%lx & %lx) != %lx) = %d", v, l, l, matched);
+#endif
+ break;
+
+ default:
+ /* bogosity, pretend it didn't match */
+ matched = 0;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ MODNAME ": mcheck: can't happen: invalid relation %d.",
+ m->reln);
+ break;
+ }
+
+ return matched;
+}
+
+/* an optimization over plain strcmp() */
+#define STREQ(a, b) (*(a) == *(b) && strcmp((a), (b)) == 0)
+
+static int ascmagic(request_rec *r, unsigned char *buf, apr_size_t nbytes)
+{
+ int has_escapes = 0;
+ unsigned char *s;
+ char nbuf[HOWMANY + 1]; /* one extra for terminating '\0' */
+ char *token;
+ register struct names *p;
+ int small_nbytes;
+ char *strtok_state;
+
+ /* these are easy, do them first */
+
+ /*
+ * for troff, look for . + letter + letter or .\"; this must be done to
+ * disambiguate tar archives' ./file and other trash from real troff
+ * input.
+ */
+ if (*buf == '.') {
+ unsigned char *tp = buf + 1;
+
+ while (apr_isspace(*tp))
+ ++tp; /* skip leading whitespace */
+ if ((apr_isalnum(*tp) || *tp == '\\') &&
+ (apr_isalnum(*(tp + 1)) || *tp == '"')) {
+ magic_rsl_puts(r, "application/x-troff");
+ return 1;
+ }
+ }
+ if ((*buf == 'c' || *buf == 'C') && apr_isspace(*(buf + 1))) {
+ /* Fortran */
+ magic_rsl_puts(r, "text/plain");
+ return 1;
+ }
+
+ /* look for tokens from names.h - this is expensive!, so we'll limit
+ * ourselves to only SMALL_HOWMANY bytes */
+ small_nbytes = (nbytes > SMALL_HOWMANY) ? SMALL_HOWMANY : nbytes;
+ /* make a copy of the buffer here because apr_strtok() will destroy it */
+ s = (unsigned char *) memcpy(nbuf, buf, small_nbytes);
+ s[small_nbytes] = '\0';
+ has_escapes = (memchr(s, '\033', small_nbytes) != NULL);
+ while ((token = apr_strtok((char *) s, " \t\n\r\f", &strtok_state)) != NULL) {
+ s = NULL; /* make apr_strtok() keep on tokin' */
+ for (p = names; p < names + NNAMES; p++) {
+ if (STREQ(p->name, token)) {
+ magic_rsl_puts(r, types[p->type]);
+ if (has_escapes)
+ magic_rsl_puts(r, " (with escape sequences)");
+ return 1;
+ }
+ }
+ }
+
+ switch (is_tar(buf, nbytes)) {
+ case 1:
+ /* V7 tar archive */
+ magic_rsl_puts(r, "application/x-tar");
+ return 1;
+ case 2:
+ /* POSIX tar archive */
+ magic_rsl_puts(r, "application/x-tar");
+ return 1;
+ }
+
+ /* all else fails, but it is ascii... */
+ return 0;
+}
+
+
+/*
+ * compress routines: zmagic() - returns 0 if not recognized, uncompresses
+ * and prints information if recognized uncompress(s, method, old, n, newch)
+ * - uncompress old into new, using method, return sizeof new
+ */
+
+static struct {
+ char *magic;
+ apr_size_t maglen;
+ char *argv[3];
+ int silent;
+ char *encoding; /* MUST be lowercase */
+} compr[] = {
+
+ /* we use gzip here rather than uncompress because we have to pass
+ * it a full filename -- and uncompress only considers filenames
+ * ending with .Z
+ */
+ {
+ "\037\235", 2, {
+ "gzip", "-dcq", NULL
+ }, 0, "x-compress"
+ },
+ {
+ "\037\213", 2, {
+ "gzip", "-dcq", NULL
+ }, 1, "x-gzip"
+ },
+ /*
+ * XXX pcat does not work, cause I don't know how to make it read stdin,
+ * so we use gzip
+ */
+ {
+ "\037\036", 2, {
+ "gzip", "-dcq", NULL
+ }, 0, "x-gzip"
+ },
+};
+
+static int ncompr = sizeof(compr) / sizeof(compr[0]);
+
+static int zmagic(request_rec *r, unsigned char *buf, apr_size_t nbytes)
+{
+ unsigned char *newbuf;
+ int newsize;
+ int i;
+
+ for (i = 0; i < ncompr; i++) {
+ if (nbytes < compr[i].maglen)
+ continue;
+ if (memcmp(buf, compr[i].magic, compr[i].maglen) == 0)
+ break;
+ }
+
+ if (i == ncompr)
+ return 0;
+
+ if ((newsize = uncompress(r, i, &newbuf, nbytes)) > 0) {
+ if (tryit(r, newbuf, newsize, 0) != OK) {
+ return 0;
+ }
+
+ /* set encoding type in the request record */
+ r->content_encoding = compr[i].encoding;
+ }
+ return 1;
+}
+
+
+struct uncompress_parms {
+ request_rec *r;
+ int method;
+};
+
+static int create_uncompress_child(struct uncompress_parms *parm, apr_pool_t *cntxt,
+ apr_file_t **pipe_in)
+{
+ int rc = 1;
+ const char *new_argv[4];
+ const char *const *env;
+ request_rec *r = parm->r;
+ apr_pool_t *child_context = cntxt;
+ apr_procattr_t *procattr;
+ apr_proc_t *procnew;
+
+ /* XXX missing 1.3 logic:
+ *
+ * what happens when !compr[parm->method].silent?
+ * Should we create the err pipe, read it, and copy to the log?
+ */
+
+ env = (const char *const *)ap_create_environment(child_context, r->subprocess_env);
+
+ if ((apr_procattr_create(&procattr, child_context) != APR_SUCCESS) ||
+ (apr_procattr_io_set(procattr, APR_FULL_BLOCK,
+ APR_FULL_BLOCK, APR_NO_PIPE) != APR_SUCCESS) ||
+ (apr_procattr_dir_set(procattr, r->filename) != APR_SUCCESS) ||
+ (apr_procattr_cmdtype_set(procattr, APR_PROGRAM) != APR_SUCCESS)) {
+ /* Something bad happened, tell the world. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_ENOPROC, r,
+ "couldn't setup child process: %s", r->filename);
+ }
+ else {
+ new_argv[0] = compr[parm->method].argv[0];
+ new_argv[1] = compr[parm->method].argv[1];
+ new_argv[2] = r->filename;
+ new_argv[3] = NULL;
+
+ procnew = apr_pcalloc(child_context, sizeof(*procnew));
+ rc = apr_proc_create(procnew, compr[parm->method].argv[0],
+ new_argv, env, procattr, child_context);
+
+ if (rc != APR_SUCCESS) {
+ /* Bad things happened. Everyone should have cleaned up. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_ENOPROC, r,
+ MODNAME ": could not execute `%s'.",
+ compr[parm->method].argv[0]);
+ }
+ else {
+ apr_pool_note_subprocess(child_context, procnew, APR_KILL_AFTER_TIMEOUT);
+ *pipe_in = procnew->out;
+ }
+ }
+
+ return (rc);
+}
+
+static int uncompress(request_rec *r, int method,
+ unsigned char **newch, apr_size_t n)
+{
+ struct uncompress_parms parm;
+ apr_file_t *pipe_out = NULL;
+ apr_pool_t *sub_context;
+ apr_status_t rv;
+
+ parm.r = r;
+ parm.method = method;
+
+ /* We make a sub_pool so that we can collect our child early, otherwise
+ * there are cases (i.e. generating directory indicies with mod_autoindex)
+ * where we would end up with LOTS of zombies.
+ */
+ if (apr_pool_create(&sub_context, r->pool) != APR_SUCCESS)
+ return -1;
+
+ if ((rv = create_uncompress_child(&parm, sub_context, &pipe_out)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ MODNAME ": couldn't spawn uncompress process: %s", r->uri);
+ return -1;
+ }
+
+ *newch = (unsigned char *) apr_palloc(r->pool, n);
+ rv = apr_file_read(pipe_out, *newch, &n);
+ if (n == 0) {
+ apr_pool_destroy(sub_context);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ MODNAME ": read failed %s", r->filename);
+ return -1;
+ }
+ apr_pool_destroy(sub_context);
+ return n;
+}
+
+/*
+ * is_tar() -- figure out whether file is a tar archive.
+ *
+ * Stolen (by author of file utility) from the public domain tar program: Public
+ * Domain version written 26 Aug 1985 John Gilmore (ihnp4!hoptoad!gnu).
+ *
+ * @(#)list.c 1.18 9/23/86 Public Domain - gnu $Id: mod_mime_magic.c,v 1.7
+ * 1997/06/24 00:41:02 ikluft Exp ikluft $
+ *
+ * Comments changed and some code/comments reformatted for file command by Ian
+ * Darwin.
+ */
+
+#define isodigit(c) (((unsigned char)(c) >= '0') && ((unsigned char)(c) <= '7'))
+
+/*
+ * Return 0 if the checksum is bad (i.e., probably not a tar archive), 1 for
+ * old UNIX tar file, 2 for Unix Std (POSIX) tar file.
+ */
+
+static int is_tar(unsigned char *buf, apr_size_t nbytes)
+{
+ register union record *header = (union record *) buf;
+ register int i;
+ register long sum, recsum;
+ register char *p;
+
+ if (nbytes < sizeof(union record))
+ return 0;
+
+ recsum = from_oct(8, header->header.chksum);
+
+ sum = 0;
+ p = header->charptr;
+ for (i = sizeof(union record); --i >= 0;) {
+ /*
+ * We can't use unsigned char here because of old compilers, e.g. V7.
+ */
+ sum += 0xFF & *p++;
+ }
+
+ /* Adjust checksum to count the "chksum" field as blanks. */
+ for (i = sizeof(header->header.chksum); --i >= 0;)
+ sum -= 0xFF & header->header.chksum[i];
+ sum += ' ' * sizeof header->header.chksum;
+
+ if (sum != recsum)
+ return 0; /* Not a tar archive */
+
+ if (0 == strcmp(header->header.magic, TMAGIC))
+ return 2; /* Unix Standard tar archive */
+
+ return 1; /* Old fashioned tar archive */
+}
+
+
+/*
+ * Quick and dirty octal conversion.
+ *
+ * Result is -1 if the field is invalid (all blank, or nonoctal).
+ */
+static long from_oct(int digs, char *where)
+{
+ register long value;
+
+ while (apr_isspace(*where)) { /* Skip spaces */
+ where++;
+ if (--digs <= 0)
+ return -1; /* All blank field */
+ }
+ value = 0;
+ while (digs > 0 && isodigit(*where)) { /* Scan til nonoctal */
+ value = (value << 3) | (*where++ - '0');
+ --digs;
+ }
+
+ if (digs > 0 && *where && !apr_isspace(*where))
+ return -1; /* Ended on non-space/nul */
+
+ return value;
+}
+
+/*
+ * Check for file-revision suffix
+ *
+ * This is for an obscure document control system used on an intranet.
+ * The web representation of each file's revision has an @1, @2, etc
+ * appended with the revision number. This needs to be stripped off to
+ * find the file suffix, which can be recognized by sending the name back
+ * through a sub-request. The base file name (without the @num suffix)
+ * must exist because its type will be used as the result.
+ */
+static int revision_suffix(request_rec *r)
+{
+ int suffix_pos, result;
+ char *sub_filename;
+ request_rec *sub;
+
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ MODNAME ": revision_suffix checking %s", r->filename);
+#endif /* MIME_MAGIC_DEBUG */
+
+ /* check for recognized revision suffix */
+ suffix_pos = strlen(r->filename) - 1;
+ if (!apr_isdigit(r->filename[suffix_pos])) {
+ return 0;
+ }
+ while (suffix_pos >= 0 && apr_isdigit(r->filename[suffix_pos]))
+ suffix_pos--;
+ if (suffix_pos < 0 || r->filename[suffix_pos] != '@') {
+ return 0;
+ }
+
+ /* perform sub-request for the file name without the suffix */
+ result = 0;
+ sub_filename = apr_pstrndup(r->pool, r->filename, suffix_pos);
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ MODNAME ": subrequest lookup for %s", sub_filename);
+#endif /* MIME_MAGIC_DEBUG */
+ sub = ap_sub_req_lookup_file(sub_filename, r, NULL);
+
+ /* extract content type/encoding/language from sub-request */
+ if (sub->content_type) {
+ ap_set_content_type(r, apr_pstrdup(r->pool, sub->content_type));
+#if MIME_MAGIC_DEBUG
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ MODNAME ": subrequest %s got %s",
+ sub_filename, r->content_type);
+#endif /* MIME_MAGIC_DEBUG */
+ if (sub->content_encoding)
+ r->content_encoding =
+ apr_pstrdup(r->pool, sub->content_encoding);
+ if (sub->content_languages) {
+ int n;
+ r->content_languages = apr_array_copy(r->pool,
+ sub->content_languages);
+ for (n = 0; n < r->content_languages->nelts; ++n) {
+ char **lang = ((char **)r->content_languages->elts) + n;
+ *lang = apr_pstrdup(r->pool, *lang);
+ }
+ }
+ result = 1;
+ }
+
+ /* clean up */
+ ap_destroy_sub_req(sub);
+
+ return result;
+}
+
+/*
+ * initialize the module
+ */
+static int magic_init(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *main_server)
+{
+ int result;
+ magic_server_config_rec *conf;
+ magic_server_config_rec *main_conf;
+ server_rec *s;
+#if MIME_MAGIC_DEBUG
+ struct magic *m, *prevm;
+#endif /* MIME_MAGIC_DEBUG */
+
+ main_conf = ap_get_module_config(main_server->module_config, &mime_magic_module);
+ for (s = main_server; s; s = s->next) {
+ conf = ap_get_module_config(s->module_config, &mime_magic_module);
+ if (conf->magicfile == NULL && s != main_server) {
+ /* inherits from the parent */
+ *conf = *main_conf;
+ }
+ else if (conf->magicfile) {
+ result = apprentice(s, p);
+ if (result == -1)
+ return OK;
+#if MIME_MAGIC_DEBUG
+ prevm = 0;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ MODNAME ": magic_init 1 test");
+ for (m = conf->magic; m; m = m->next) {
+ if (apr_isprint((((unsigned long) m) >> 24) & 255) &&
+ apr_isprint((((unsigned long) m) >> 16) & 255) &&
+ apr_isprint((((unsigned long) m) >> 8) & 255) &&
+ apr_isprint(((unsigned long) m) & 255)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ MODNAME ": magic_init 1: POINTER CLOBBERED! "
+ "m=\"%c%c%c%c\" line=%d",
+ (((unsigned long) m) >> 24) & 255,
+ (((unsigned long) m) >> 16) & 255,
+ (((unsigned long) m) >> 8) & 255,
+ ((unsigned long) m) & 255,
+ prevm ? prevm->lineno : -1);
+ break;
+ }
+ prevm = m;
+ }
+#endif
+ }
+ }
+ return OK;
+}
+
+/*
+ * Find the Content-Type from any resource this module has available
+ */
+
+static int magic_find_ct(request_rec *r)
+{
+ int result;
+ magic_server_config_rec *conf;
+
+ /* the file has to exist */
+ if (r->finfo.filetype == 0 || !r->filename) {
+ return DECLINED;
+ }
+
+ /* was someone else already here? */
+ if (r->content_type) {
+ return DECLINED;
+ }
+
+ conf = ap_get_module_config(r->server->module_config, &mime_magic_module);
+ if (!conf || !conf->magic) {
+ return DECLINED;
+ }
+
+ /* initialize per-request info */
+ if (!magic_set_config(r)) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* try excluding file-revision suffixes */
+ if (revision_suffix(r) != 1) {
+ /* process it based on the file contents */
+ if ((result = magic_process(r)) != OK) {
+ return result;
+ }
+ }
+
+ /* if we have any results, put them in the request structure */
+ return magic_rsl_to_request(r);
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ static const char * const aszPre[]={ "mod_mime.c", NULL };
+
+ /* mod_mime_magic should be run after mod_mime, if at all. */
+
+ ap_hook_type_checker(magic_find_ct, aszPre, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_config(magic_init, NULL, NULL, APR_HOOK_FIRST);
+}
+
+/*
+ * Apache API module interface
+ */
+
+module AP_MODULE_DECLARE_DATA mime_magic_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creator */
+ NULL, /* dir merger --- default is to override */
+ create_magic_server_config, /* server config */
+ merge_magic_server_config, /* merge server config */
+ mime_magic_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_mime_magic.dsp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_mime_magic.dsp
new file mode 100644
index 00000000..bbffdfcd
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_mime_magic.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_mime_magic" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_mime_magic - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime_magic.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime_magic.mak" CFG="mod_mime_magic - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_mime_magic - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_mime_magic - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_mime_magic - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_mime_magic_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib /nologo /subsystem:windows /dll /out:"Release/mod_mime_magic.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime_magic.so
+# ADD LINK32 kernel32.lib ws2_32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_mime_magic.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime_magic.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_mime_magic - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_mime_magic_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_mime_magic.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime_magic.so
+# ADD LINK32 kernel32.lib ws2_32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_mime_magic.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime_magic.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_mime_magic - Win32 Release"
+# Name "mod_mime_magic - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_mime_magic.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_mime_magic.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_mime_magic - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_mime_magic.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_mime_magic.so "mime_magic_module for Apache" ../../include/ap_release.h > .\mod_mime_magic.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_mime_magic - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_mime_magic.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_mime_magic.so "mime_magic_module for Apache" ../../include/ap_release.h > .\mod_mime_magic.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_mime_magic.exp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_mime_magic.exp
new file mode 100644
index 00000000..42068a43
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_mime_magic.exp
@@ -0,0 +1 @@
+mime_magic_module
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.c b/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.c
new file mode 100644
index 00000000..a85806d3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.c
@@ -0,0 +1,586 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_setenvif.c
+ * Set environment variables based on matching request headers or
+ * attributes against regex strings
+ *
+ * Paul Sutton <paul@ukweb.com> 27 Oct 1996
+ * Based on mod_browser by Alexei Kosut <akosut@organic.com>
+ */
+
+/*
+ * Used to set environment variables based on the incoming request headers,
+ * or some selected other attributes of the request (e.g., the remote host
+ * name).
+ *
+ * Usage:
+ *
+ * SetEnvIf name regex var ...
+ *
+ * where name is either a HTTP request header name, or one of the
+ * special values (see below). 'name' may be a regex when it is used
+ * to specify an HTTP request header name. The 'value' of the header
+ & (or the value of the special value from below) are compared against
+ * the regex argument. If this is a simple string, a simple sub-string
+ * match is performed. Otherwise, a request expression match is
+ * done. If the value matches the string or regular expression, the
+ * environment variables listed as var ... are set. Each var can
+ * be in one of three formats: var, which sets the named variable
+ * (the value value "1"); var=value, which sets the variable to
+ * the given value; or !var, which unsets the variable is it has
+ * been previously set.
+ *
+ * Normally the strings are compared with regard to case. To ignore
+ * case, use the directive SetEnvIfNoCase instead.
+ *
+ * Special values for 'name' are:
+ *
+ * server_addr IP address of interface on which request arrived
+ * (analogous to SERVER_ADDR set in ap_add_common_vars())
+ * remote_host Remote host name (if available)
+ * remote_addr Remote IP address
+ * request_method Request method (GET, POST, etc)
+ * request_uri Requested URI
+ *
+ * Examples:
+ *
+ * To set the enviroment variable LOCALHOST if the client is the local
+ * machine:
+ *
+ * SetEnvIf remote_addr 127.0.0.1 LOCALHOST
+ *
+ * To set LOCAL if the client is the local host, or within our company's
+ * domain (192.168.10):
+ *
+ * SetEnvIf remote_addr 192.168.10. LOCAL
+ * SetEnvIf remote_addr 127.0.0.1 LOCALHOST
+ *
+ * This could be written as:
+ *
+ * SetEnvIf remote_addr (127.0.0.1|192.168.10.) LOCAL
+ *
+ * To set HAVE_TS if the client request contains any header beginning
+ * with "TS" with a value beginning with a lower case alphabet:
+ *
+ * SetEnvIf ^TS* ^[a-z].* HAVE_TS
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_strmatch.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_protocol.h"
+
+
+enum special {
+ SPECIAL_NOT,
+ SPECIAL_REMOTE_ADDR,
+ SPECIAL_REMOTE_HOST,
+ SPECIAL_REQUEST_URI,
+ SPECIAL_REQUEST_METHOD,
+ SPECIAL_REQUEST_PROTOCOL,
+ SPECIAL_SERVER_ADDR
+};
+typedef struct {
+ char *name; /* header name */
+ regex_t *pnamereg; /* compiled header name regex */
+ char *regex; /* regex to match against */
+ regex_t *preg; /* compiled regex */
+ const apr_strmatch_pattern *pattern; /* non-regex pattern to match */
+ apr_table_t *features; /* env vars to set (or unset) */
+ enum special special_type; /* is it a "special" header ? */
+ int icase; /* ignoring case? */
+} sei_entry;
+
+typedef struct {
+ apr_array_header_t *conditionals;
+} sei_cfg_rec;
+
+module AP_MODULE_DECLARE_DATA setenvif_module;
+
+/*
+ * These routines, the create- and merge-config functions, are called
+ * for both the server-wide and the per-directory contexts. This is
+ * because the different definitions are used at different times; the
+ * server-wide ones are used in the post-read-request phase, and the
+ * per-directory ones are used during the header-parse phase (after
+ * the URI has been mapped to a file and we have anything from the
+ * .htaccess file and <Directory> and <Files> containers).
+ */
+static void *create_setenvif_config(apr_pool_t *p)
+{
+ sei_cfg_rec *new = (sei_cfg_rec *) apr_palloc(p, sizeof(sei_cfg_rec));
+
+ new->conditionals = apr_array_make(p, 20, sizeof(sei_entry));
+ return (void *) new;
+}
+
+static void *create_setenvif_config_svr(apr_pool_t *p, server_rec *dummy)
+{
+ return create_setenvif_config(p);
+}
+
+static void *create_setenvif_config_dir(apr_pool_t *p, char *dummy)
+{
+ return create_setenvif_config(p);
+}
+
+static void *merge_setenvif_config(apr_pool_t *p, void *basev, void *overridesv)
+{
+ sei_cfg_rec *a = apr_pcalloc(p, sizeof(sei_cfg_rec));
+ sei_cfg_rec *base = basev, *overrides = overridesv;
+
+ a->conditionals = apr_array_append(p, base->conditionals,
+ overrides->conditionals);
+ return a;
+}
+
+/*
+ * any non-NULL magic constant will do... used to indicate if REG_ICASE should
+ * be used
+ */
+#define ICASE_MAGIC ((void *)(&setenvif_module))
+#define SEI_MAGIC_HEIRLOOM "setenvif-phase-flag"
+
+static int is_header_regex(apr_pool_t *p, const char* name)
+{
+ /* If a Header name contains characters other than:
+ * -,_,[A-Z\, [a-z] and [0-9].
+ * assume the header name is a regular expression.
+ */
+ regex_t *preg = ap_pregcomp(p, "^[-A-Za-z0-9_]*$",
+ (REG_EXTENDED | REG_NOSUB ));
+ ap_assert(preg != NULL);
+
+ if (ap_regexec(preg, name, 0, NULL, 0)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/* If the input string does not take advantage of regular
+ * expression metacharacters, return a pointer to an equivalent
+ * string that can be searched using apr_strmatch(). (The
+ * returned string will often be the input string. But if
+ * the input string contains escaped characters, the returned
+ * string will be a copy with the escapes removed.)
+ */
+static const char *non_regex_pattern(apr_pool_t *p, const char *s)
+{
+ const char *src = s;
+ int escapes_found = 0;
+ int in_escape = 0;
+
+ while (*src) {
+ switch (*src) {
+ case '^':
+ case '.':
+ case '$':
+ case '|':
+ case '(':
+ case ')':
+ case '[':
+ case ']':
+ case '*':
+ case '+':
+ case '?':
+ case '{':
+ case '}':
+ if (!in_escape) {
+ return NULL;
+ }
+ in_escape = 0;
+ break;
+ case '\\':
+ if (!in_escape) {
+ in_escape = 1;
+ escapes_found = 1;
+ }
+ else {
+ in_escape = 0;
+ }
+ break;
+ default:
+ if (in_escape) {
+ return NULL;
+ }
+ break;
+ }
+ src++;
+ }
+ if (!escapes_found) {
+ return s;
+ }
+ else {
+ char *unescaped = (char *)apr_palloc(p, src - s + 1);
+ char *dst = unescaped;
+ src = s;
+ do {
+ if (*src == '\\') {
+ src++;
+ }
+ } while ((*dst++ = *src++));
+ return unescaped;
+ }
+}
+
+static const char *add_setenvif_core(cmd_parms *cmd, void *mconfig,
+ char *fname, const char *args)
+{
+ char *regex;
+ const char *simple_pattern;
+ const char *feature;
+ sei_cfg_rec *sconf;
+ sei_entry *new;
+ sei_entry *entries;
+ char *var;
+ int i;
+ int beenhere = 0;
+ int icase;
+
+ /*
+ * Determine from our context into which record to put the entry.
+ * cmd->path == NULL means we're in server-wide context; otherwise,
+ * we're dealing with a per-directory setting.
+ */
+ sconf = (cmd->path != NULL)
+ ? (sei_cfg_rec *) mconfig
+ : (sei_cfg_rec *) ap_get_module_config(cmd->server->module_config,
+ &setenvif_module);
+ entries = (sei_entry *) sconf->conditionals->elts;
+ /* get regex */
+ regex = ap_getword_conf(cmd->pool, &args);
+ if (!*regex) {
+ return apr_pstrcat(cmd->pool, "Missing regular expression for ",
+ cmd->cmd->name, NULL);
+ }
+
+ /*
+ * If we've already got a sei_entry with the same name we want to
+ * just copy the name pointer... so that later on we can compare
+ * two header names just by comparing the pointers.
+ */
+ for (i = 0; i < sconf->conditionals->nelts; ++i) {
+ new = &entries[i];
+ if (!strcasecmp(new->name, fname)) {
+ fname = new->name;
+ break;
+ }
+ }
+
+ /* if the last entry has an identical headername and regex then
+ * merge with it
+ */
+ i = sconf->conditionals->nelts - 1;
+ icase = cmd->info == ICASE_MAGIC;
+ if (i < 0
+ || entries[i].name != fname
+ || entries[i].icase != icase
+ || strcmp(entries[i].regex, regex)) {
+
+ /* no match, create a new entry */
+ new = apr_array_push(sconf->conditionals);
+ new->name = fname;
+ new->regex = regex;
+ new->icase = icase;
+ if ((simple_pattern = non_regex_pattern(cmd->pool, regex))) {
+ new->pattern = apr_strmatch_precompile(cmd->pool,
+ simple_pattern, !icase);
+ if (new->pattern == NULL) {
+ return apr_pstrcat(cmd->pool, cmd->cmd->name,
+ " pattern could not be compiled.", NULL);
+ }
+ new->preg = NULL;
+ }
+ else {
+ new->preg = ap_pregcomp(cmd->pool, regex,
+ (REG_EXTENDED | (icase ? REG_ICASE : 0)));
+ if (new->preg == NULL) {
+ return apr_pstrcat(cmd->pool, cmd->cmd->name,
+ " regex could not be compiled.", NULL);
+ }
+ new->pattern = NULL;
+ }
+ new->features = apr_table_make(cmd->pool, 2);
+
+ if (!strcasecmp(fname, "remote_addr")) {
+ new->special_type = SPECIAL_REMOTE_ADDR;
+ }
+ else if (!strcasecmp(fname, "remote_host")) {
+ new->special_type = SPECIAL_REMOTE_HOST;
+ }
+ else if (!strcasecmp(fname, "request_uri")) {
+ new->special_type = SPECIAL_REQUEST_URI;
+ }
+ else if (!strcasecmp(fname, "request_method")) {
+ new->special_type = SPECIAL_REQUEST_METHOD;
+ }
+ else if (!strcasecmp(fname, "request_protocol")) {
+ new->special_type = SPECIAL_REQUEST_PROTOCOL;
+ }
+ else if (!strcasecmp(fname, "server_addr")) {
+ new->special_type = SPECIAL_SERVER_ADDR;
+ }
+ else {
+ new->special_type = SPECIAL_NOT;
+ /* Handle fname as a regular expression.
+ * If fname a simple header string, identify as such
+ * (new->pnamereg = NULL) to avoid the overhead of searching
+ * through headers_in for a regex match.
+ */
+ if (is_header_regex(cmd->pool, fname)) {
+ new->pnamereg = ap_pregcomp(cmd->pool, fname,
+ (REG_EXTENDED | REG_NOSUB
+ | (icase ? REG_ICASE : 0)));
+ if (new->pnamereg == NULL)
+ return apr_pstrcat(cmd->pool, cmd->cmd->name,
+ "Header name regex could not be "
+ "compiled.", NULL);
+ }
+ else {
+ new->pnamereg = NULL;
+ }
+ }
+ }
+ else {
+ new = &entries[i];
+ }
+
+ for ( ; ; ) {
+ feature = ap_getword_conf(cmd->pool, &args);
+ if (!*feature) {
+ break;
+ }
+ beenhere++;
+
+ var = ap_getword(cmd->pool, &feature, '=');
+ if (*feature) {
+ apr_table_setn(new->features, var, feature);
+ }
+ else if (*var == '!') {
+ apr_table_setn(new->features, var + 1, "!");
+ }
+ else {
+ apr_table_setn(new->features, var, "1");
+ }
+ }
+
+ if (!beenhere) {
+ return apr_pstrcat(cmd->pool, "Missing envariable expression for ",
+ cmd->cmd->name, NULL);
+ }
+
+ return NULL;
+}
+
+static const char *add_setenvif(cmd_parms *cmd, void *mconfig,
+ const char *args)
+{
+ char *fname;
+
+ /* get header name */
+ fname = ap_getword_conf(cmd->pool, &args);
+ if (!*fname) {
+ return apr_pstrcat(cmd->pool, "Missing header-field name for ",
+ cmd->cmd->name, NULL);
+ }
+ return add_setenvif_core(cmd, mconfig, fname, args);
+}
+
+/*
+ * This routine handles the BrowserMatch* directives. It simply turns around
+ * and feeds them, with the appropriate embellishments, to the general-purpose
+ * command handler.
+ */
+static const char *add_browser(cmd_parms *cmd, void *mconfig, const char *args)
+{
+ return add_setenvif_core(cmd, mconfig, "User-Agent", args);
+}
+
+static const command_rec setenvif_module_cmds[] =
+{
+ AP_INIT_RAW_ARGS("SetEnvIf", add_setenvif, NULL, OR_FILEINFO,
+ "A header-name, regex and a list of variables."),
+ AP_INIT_RAW_ARGS("SetEnvIfNoCase", add_setenvif, ICASE_MAGIC, OR_FILEINFO,
+ "a header-name, regex and a list of variables."),
+ AP_INIT_RAW_ARGS("BrowserMatch", add_browser, NULL, OR_FILEINFO,
+ "A browser regex and a list of variables."),
+ AP_INIT_RAW_ARGS("BrowserMatchNoCase", add_browser, ICASE_MAGIC,
+ OR_FILEINFO,
+ "A browser regex and a list of variables."),
+ { NULL },
+};
+
+/*
+ * This routine gets called at two different points in request processing:
+ * once before the URI has been translated (during the post-read-request
+ * phase) and once after (during the header-parse phase). We use different
+ * config records for the two different calls to reduce overhead (by not
+ * re-doing the server-wide settings during directory processing), and
+ * signal which call it is by having the earlier one pass a flag to the
+ * later one.
+ */
+static int match_headers(request_rec *r)
+{
+ sei_cfg_rec *sconf;
+ sei_entry *entries;
+ const apr_table_entry_t *elts;
+ const char *val;
+ apr_size_t val_len = 0;
+ int i, j;
+ char *last_name;
+ regmatch_t regm[AP_MAX_REG_MATCH];
+
+ if (!ap_get_module_config(r->request_config, &setenvif_module)) {
+ ap_set_module_config(r->request_config, &setenvif_module,
+ SEI_MAGIC_HEIRLOOM);
+ sconf = (sei_cfg_rec *) ap_get_module_config(r->server->module_config,
+ &setenvif_module);
+ }
+ else {
+ sconf = (sei_cfg_rec *) ap_get_module_config(r->per_dir_config,
+ &setenvif_module);
+ }
+ entries = (sei_entry *) sconf->conditionals->elts;
+ last_name = NULL;
+ val = NULL;
+ for (i = 0; i < sconf->conditionals->nelts; ++i) {
+ sei_entry *b = &entries[i];
+
+ /* Optimize the case where a bunch of directives in a row use the
+ * same header. Remember we don't need to strcmp the two header
+ * names because we made sure the pointers were equal during
+ * configuration.
+ */
+ if (b->name != last_name) {
+ last_name = b->name;
+ switch (b->special_type) {
+ case SPECIAL_REMOTE_ADDR:
+ val = r->connection->remote_ip;
+ break;
+ case SPECIAL_SERVER_ADDR:
+ val = r->connection->local_ip;
+ break;
+ case SPECIAL_REMOTE_HOST:
+ val = ap_get_remote_host(r->connection, r->per_dir_config,
+ REMOTE_NAME, NULL);
+ break;
+ case SPECIAL_REQUEST_URI:
+ val = r->uri;
+ break;
+ case SPECIAL_REQUEST_METHOD:
+ val = r->method;
+ break;
+ case SPECIAL_REQUEST_PROTOCOL:
+ val = r->protocol;
+ break;
+ case SPECIAL_NOT:
+ if (b->pnamereg) {
+ /* Matching headers_in against a regex. Iterate through
+ * the headers_in until we find a match or run out of
+ * headers.
+ */
+ const apr_array_header_t
+ *arr = apr_table_elts(r->headers_in);
+
+ elts = (const apr_table_entry_t *) arr->elts;
+ val = NULL;
+ for (j = 0; j < arr->nelts; ++j) {
+ if (!ap_regexec(b->pnamereg, elts[j].key, 0, NULL, 0)) {
+ val = elts[j].val;
+ }
+ }
+ }
+ else {
+ /* Not matching against a regex */
+ val = apr_table_get(r->headers_in, b->name);
+ if (val == NULL) {
+ val = apr_table_get(r->subprocess_env, b->name);
+ }
+ }
+ }
+ val_len = val ? strlen(val) : 0;
+ }
+
+ /*
+ * A NULL value indicates that the header field or special entity
+ * wasn't present or is undefined. Represent that as an empty string
+ * so that REs like "^$" will work and allow envariable setting
+ * based on missing or empty field.
+ */
+ if (val == NULL) {
+ val = "";
+ val_len = 0;
+ }
+
+ if ((b->pattern && apr_strmatch(b->pattern, val, val_len)) ||
+ (!b->pattern && !ap_regexec(b->preg, val, AP_MAX_REG_MATCH, regm,
+ 0))) {
+ const apr_array_header_t *arr = apr_table_elts(b->features);
+ elts = (const apr_table_entry_t *) arr->elts;
+
+ for (j = 0; j < arr->nelts; ++j) {
+ if (*(elts[j].val) == '!') {
+ apr_table_unset(r->subprocess_env, elts[j].key);
+ }
+ else {
+ if (!b->pattern) {
+ char *replaced = ap_pregsub(r->pool, elts[j].val, val,
+ AP_MAX_REG_MATCH, regm);
+ if (replaced) {
+ apr_table_setn(r->subprocess_env, elts[j].key,
+ replaced);
+ }
+ }
+ else {
+ apr_table_setn(r->subprocess_env, elts[j].key,
+ elts[j].val);
+ }
+ }
+ }
+ }
+ }
+
+ return DECLINED;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_header_parser(match_headers, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_read_request(match_headers, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA setenvif_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_setenvif_config_dir, /* dir config creater */
+ merge_setenvif_config, /* dir merger --- default is to override */
+ create_setenvif_config_svr, /* server config */
+ merge_setenvif_config, /* merge server configs */
+ setenvif_module_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.dsp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.dsp
new file mode 100644
index 00000000..cffd4649
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_setenvif" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_setenvif - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_setenvif.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_setenvif.mak" CFG="mod_setenvif - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_setenvif - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_setenvif - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_setenvif - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_setenvif_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_setenvif.so" /base:@..\..\os\win32\BaseAddr.ref,mod_setenvif.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_setenvif.so" /base:@..\..\os\win32\BaseAddr.ref,mod_setenvif.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_setenvif - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_setenvif_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_setenvif.so" /base:@..\..\os\win32\BaseAddr.ref,mod_setenvif.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_setenvif.so" /base:@..\..\os\win32\BaseAddr.ref,mod_setenvif.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_setenvif - Win32 Release"
+# Name "mod_setenvif - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_setenvif.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_setenvif.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_setenvif - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_setenvif.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_setenvif.so "setenvif_module for Apache" ../../include/ap_release.h > .\mod_setenvif.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_setenvif - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_setenvif.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_setenvif.so "setenvif_module for Apache" ../../include/ap_release.h > .\mod_setenvif.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.exp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.exp
new file mode 100644
index 00000000..4f3800e3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.exp
@@ -0,0 +1 @@
+setenvif_module
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.la b/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.la
new file mode 100644
index 00000000..6562d1dc
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.la
@@ -0,0 +1,35 @@
+# mod_setenvif.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='mod_setenvif.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/bottlenecks/rubbos/app/httpd-2.0.64/srclib/apr-util/xml/expat/lib'
+
+# Version information for mod_setenvif.
+current=
+age=
+revision=
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir=''
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.lo b/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.lo
new file mode 100644
index 00000000..1a7420ba
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.lo
@@ -0,0 +1,12 @@
+# mod_setenvif.lo - a libtool object file
+# Generated by ltmain.sh - GNU libtool 1.5.26 (1.1220.2.493 2008/02/01 16:58:18)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object='.libs/mod_setenvif.o'
+
+# Name of the non-PIC object.
+non_pic_object='mod_setenvif.o'
+
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.o b/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.o
new file mode 100644
index 00000000..0f0fee97
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_setenvif.o
Binary files differ
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_unique_id.c b/rubbos/app/httpd-2.0.64/modules/metadata/mod_unique_id.c
new file mode 100644
index 00000000..faf1dbd0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_unique_id.c
@@ -0,0 +1,367 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_unique_id.c: generate a unique identifier for each request
+ *
+ * Original author: Dean Gaudet <dgaudet@arctic.org>
+ * UUencoding modified by: Alvaro Martinez Echevarria <alvaro@lander.es>
+ */
+
+#define APR_WANT_BYTEFUNC /* for htons() et al */
+#include "apr_want.h"
+#include "apr_general.h" /* for APR_OFFSETOF */
+#include "apr_network_io.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "http_protocol.h" /* for ap_hook_post_read_request */
+
+#if APR_HAVE_UNISTD_H
+#include <unistd.h> /* for getpid() */
+#endif
+
+typedef struct {
+ unsigned int stamp;
+ unsigned int in_addr;
+ unsigned int pid;
+ unsigned short counter;
+ unsigned int thread_index;
+} unique_id_rec;
+
+/* We are using thread_index (the index into the scoreboard), because we
+ * cannot guarantee the thread_id will be an integer.
+ *
+ * This code looks like it won't give a unique ID with the new thread logic.
+ * It will. The reason is, we don't increment the counter in a thread_safe
+ * manner. Because the thread_index is also in the unique ID now, this does
+ * not matter. In order for the id to not be unique, the same thread would
+ * have to get the same counter twice in the same second.
+ */
+
+/* Comments:
+ *
+ * We want an identifier which is unique across all hits, everywhere.
+ * "everywhere" includes multiple httpd instances on the same machine, or on
+ * multiple machines. Essentially "everywhere" should include all possible
+ * httpds across all servers at a particular "site". We make some assumptions
+ * that if the site has a cluster of machines then their time is relatively
+ * synchronized. We also assume that the first address returned by a
+ * gethostbyname (gethostname()) is unique across all the machines at the
+ * "site".
+ *
+ * We also further assume that pids fit in 32-bits. If something uses more
+ * than 32-bits, the fix is trivial, but it requires the unrolled uuencoding
+ * loop to be extended. * A similar fix is needed to support multithreaded
+ * servers, using a pid/tid combo.
+ *
+ * Together, the in_addr and pid are assumed to absolutely uniquely identify
+ * this one child from all other currently running children on all servers
+ * (including this physical server if it is running multiple httpds) from each
+ * other.
+ *
+ * The stamp and counter are used to distinguish all hits for a particular
+ * (in_addr,pid) pair. The stamp is updated using r->request_time,
+ * saving cpu cycles. The counter is never reset, and is used to permit up to
+ * 64k requests in a single second by a single child.
+ *
+ * The 112-bits of unique_id_rec are encoded using the alphabet
+ * [A-Za-z0-9@-], resulting in 19 bytes of printable characters. That is then
+ * stuffed into the environment variable UNIQUE_ID so that it is available to
+ * other modules. The alphabet choice differs from normal base64 encoding
+ * [A-Za-z0-9+/] because + and / are special characters in URLs and we want to
+ * make it easy to use UNIQUE_ID in URLs.
+ *
+ * Note that UNIQUE_ID should be considered an opaque token by other
+ * applications. No attempt should be made to dissect its internal components.
+ * It is an abstraction that may change in the future as the needs of this
+ * module change.
+ *
+ * It is highly desirable that identifiers exist for "eternity". But future
+ * needs (such as much faster webservers, moving to 64-bit pids, or moving to a
+ * multithreaded server) may dictate a need to change the contents of
+ * unique_id_rec. Such a future implementation should ensure that the first
+ * field is still a time_t stamp. By doing that, it is possible for a site to
+ * have a "flag second" in which they stop all of their old-format servers,
+ * wait one entire second, and then start all of their new-servers. This
+ * procedure will ensure that the new space of identifiers is completely unique
+ * from the old space. (Since the first four unencoded bytes always differ.)
+ */
+/*
+ * Sun Jun 7 05:43:49 CEST 1998 -- Alvaro
+ * More comments:
+ * 1) The UUencoding prodecure is now done in a general way, avoiding the problems
+ * with sizes and paddings that can arise depending on the architecture. Now the
+ * offsets and sizes of the elements of the unique_id_rec structure are calculated
+ * in unique_id_global_init; and then used to duplicate the structure without the
+ * paddings that might exist. The multithreaded server fix should be now very easy:
+ * just add a new "tid" field to the unique_id_rec structure, and increase by one
+ * UNIQUE_ID_REC_MAX.
+ * 2) unique_id_rec.stamp has been changed from "time_t" to "unsigned int", because
+ * its size is 64bits on some platforms (linux/alpha), and this caused problems with
+ * htonl/ntohl. Well, this shouldn't be a problem till year 2106.
+ */
+
+static unsigned global_in_addr;
+
+static unique_id_rec cur_unique_id;
+
+/*
+ * Number of elements in the structure unique_id_rec.
+ */
+#define UNIQUE_ID_REC_MAX 5
+
+static unsigned short unique_id_rec_offset[UNIQUE_ID_REC_MAX],
+ unique_id_rec_size[UNIQUE_ID_REC_MAX],
+ unique_id_rec_total_size,
+ unique_id_rec_size_uu;
+
+static int unique_id_global_init(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *main_server)
+{
+ char str[APRMAXHOSTLEN + 1];
+ apr_status_t rv;
+ char *ipaddrstr;
+ apr_sockaddr_t *sockaddr;
+
+ /*
+ * Calculate the sizes and offsets in cur_unique_id.
+ */
+ unique_id_rec_offset[0] = APR_OFFSETOF(unique_id_rec, stamp);
+ unique_id_rec_size[0] = sizeof(cur_unique_id.stamp);
+ unique_id_rec_offset[1] = APR_OFFSETOF(unique_id_rec, in_addr);
+ unique_id_rec_size[1] = sizeof(cur_unique_id.in_addr);
+ unique_id_rec_offset[2] = APR_OFFSETOF(unique_id_rec, pid);
+ unique_id_rec_size[2] = sizeof(cur_unique_id.pid);
+ unique_id_rec_offset[3] = APR_OFFSETOF(unique_id_rec, counter);
+ unique_id_rec_size[3] = sizeof(cur_unique_id.counter);
+ unique_id_rec_offset[4] = APR_OFFSETOF(unique_id_rec, thread_index);
+ unique_id_rec_size[4] = sizeof(cur_unique_id.thread_index);
+ unique_id_rec_total_size = unique_id_rec_size[0] + unique_id_rec_size[1] +
+ unique_id_rec_size[2] + unique_id_rec_size[3] +
+ unique_id_rec_size[4];
+
+ /*
+ * Calculate the size of the structure when encoded.
+ */
+ unique_id_rec_size_uu = (unique_id_rec_total_size*8+5)/6;
+
+ /*
+ * Now get the global in_addr. Note that it is not sufficient to use one
+ * of the addresses from the main_server, since those aren't as likely to
+ * be unique as the physical address of the machine
+ */
+ if ((rv = apr_gethostname(str, sizeof(str) - 1, p)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ALERT, rv, main_server,
+ "mod_unique_id: unable to find hostname of the server");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ if ((rv = apr_sockaddr_info_get(&sockaddr, str, AF_INET, 0, 0, p)) == APR_SUCCESS) {
+ global_in_addr = sockaddr->sa.sin.sin_addr.s_addr;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_ALERT, rv, main_server,
+ "mod_unique_id: unable to find IPv4 address of \"%s\"", str);
+#if APR_HAVE_IPV6
+ if ((rv = apr_sockaddr_info_get(&sockaddr, str, AF_INET6, 0, 0, p)) == APR_SUCCESS) {
+ memcpy(&global_in_addr,
+ (char *)sockaddr->ipaddr_ptr + sockaddr->ipaddr_len - sizeof(global_in_addr),
+ sizeof(global_in_addr));
+ ap_log_error(APLOG_MARK, APLOG_ALERT, rv, main_server,
+ "mod_unique_id: using low-order bits of IPv6 address "
+ "as if they were unique");
+ }
+ else
+#endif
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ apr_sockaddr_ip_get(&ipaddrstr, sockaddr);
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, main_server,
+ "mod_unique_id: using ip addr %s",
+ ipaddrstr);
+
+ /*
+ * If the server is pummelled with restart requests we could possibly end
+ * up in a situation where we're starting again during the same second
+ * that has been used in previous identifiers. Avoid that situation.
+ *
+ * In truth, for this to actually happen not only would it have to restart
+ * in the same second, but it would have to somehow get the same pids as
+ * one of the other servers that was running in that second. Which would
+ * mean a 64k wraparound on pids ... not very likely at all.
+ *
+ * But protecting against it is relatively cheap. We just sleep into the
+ * next second.
+ */
+ apr_sleep(apr_time_from_sec(1) - apr_time_usec(apr_time_now()));
+ return OK;
+}
+
+static void unique_id_child_init(apr_pool_t *p, server_rec *s)
+{
+ pid_t pid;
+ apr_time_t tv;
+
+ /*
+ * Note that we use the pid because it's possible that on the same
+ * physical machine there are multiple servers (i.e. using Listen). But
+ * it's guaranteed that none of them will share the same pids between
+ * children.
+ *
+ * XXX: for multithread this needs to use a pid/tid combo and probably
+ * needs to be expanded to 32 bits
+ */
+ pid = getpid();
+ cur_unique_id.pid = pid;
+
+ /*
+ * Test our assumption that the pid is 32-bits. It's possible that
+ * 64-bit machines will declare pid_t to be 64 bits but only use 32
+ * of them. It would have been really nice to test this during
+ * global_init ... but oh well.
+ */
+ if ((pid_t)cur_unique_id.pid != pid) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
+ "oh no! pids are greater than 32-bits! I'm broken!");
+ }
+
+ cur_unique_id.in_addr = global_in_addr;
+
+ /*
+ * If we use 0 as the initial counter we have a little less protection
+ * against restart problems, and a little less protection against a clock
+ * going backwards in time.
+ */
+ tv = apr_time_now();
+ /* Some systems have very low variance on the low end of their system
+ * counter, defend against that.
+ */
+ cur_unique_id.counter = (unsigned short)(apr_time_usec(tv) / 10);
+
+ /*
+ * We must always use network ordering for these bytes, so that
+ * identifiers are comparable between machines of different byte
+ * orderings. Note in_addr is already in network order.
+ */
+ cur_unique_id.pid = htonl(cur_unique_id.pid);
+ cur_unique_id.counter = htons(cur_unique_id.counter);
+}
+
+/* NOTE: This is *NOT* the same encoding used by base64encode ... the last two
+ * characters should be + and /. But those two characters have very special
+ * meanings in URLs, and we want to make it easy to use identifiers in
+ * URLs. So we replace them with @ and -.
+ */
+static const char uuencoder[64] = {
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
+ 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
+ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
+ 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '@', '-',
+};
+
+static int gen_unique_id(request_rec *r)
+{
+ char *str;
+ /*
+ * Buffer padded with two final bytes, used to copy the unique_id_red
+ * structure without the internal paddings that it could have.
+ */
+ unique_id_rec new_unique_id;
+ struct {
+ unique_id_rec foo;
+ unsigned char pad[2];
+ } paddedbuf;
+ unsigned char *x,*y;
+ unsigned short counter;
+ const char *e;
+ int i,j,k;
+
+ /* copy the unique_id if this is an internal redirect (we're never
+ * actually called for sub requests, so we don't need to test for
+ * them) */
+ if (r->prev && (e = apr_table_get(r->subprocess_env, "REDIRECT_UNIQUE_ID"))) {
+ apr_table_setn(r->subprocess_env, "UNIQUE_ID", e);
+ return DECLINED;
+ }
+
+ new_unique_id.in_addr = cur_unique_id.in_addr;
+ new_unique_id.pid = cur_unique_id.pid;
+ new_unique_id.counter = cur_unique_id.counter;
+
+ new_unique_id.stamp = htonl((unsigned int)r->request_time);
+ new_unique_id.thread_index = htonl((unsigned int)r->connection->id);
+
+ /* we'll use a temporal buffer to avoid uuencoding the possible internal
+ * paddings of the original structure */
+ x = (unsigned char *) &paddedbuf;
+ y = (unsigned char *) &new_unique_id;
+ k = 0;
+ for (i = 0; i < UNIQUE_ID_REC_MAX; i++) {
+ y = ((unsigned char *) &new_unique_id) + unique_id_rec_offset[i];
+ for (j = 0; j < unique_id_rec_size[i]; j++, k++) {
+ x[k] = y[j];
+ }
+ }
+ /*
+ * We reset two more bytes just in case padding is needed for the uuencoding.
+ */
+ x[k++] = '\0';
+ x[k++] = '\0';
+
+ /* alloc str and do the uuencoding */
+ str = (char *)apr_palloc(r->pool, unique_id_rec_size_uu + 1);
+ k = 0;
+ for (i = 0; i < unique_id_rec_total_size; i += 3) {
+ y = x + i;
+ str[k++] = uuencoder[y[0] >> 2];
+ str[k++] = uuencoder[((y[0] & 0x03) << 4) | ((y[1] & 0xf0) >> 4)];
+ if (k == unique_id_rec_size_uu) break;
+ str[k++] = uuencoder[((y[1] & 0x0f) << 2) | ((y[2] & 0xc0) >> 6)];
+ if (k == unique_id_rec_size_uu) break;
+ str[k++] = uuencoder[y[2] & 0x3f];
+ }
+ str[k++] = '\0';
+
+ /* set the environment variable */
+ apr_table_setn(r->subprocess_env, "UNIQUE_ID", str);
+
+ /* and increment the identifier for the next call */
+
+ counter = ntohs(new_unique_id.counter) + 1;
+ cur_unique_id.counter = htons(counter);
+
+ return DECLINED;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(unique_id_global_init, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_child_init(unique_id_child_init, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_read_request(gen_unique_id, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA unique_id_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server configs */
+ NULL, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_unique_id.dsp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_unique_id.dsp
new file mode 100644
index 00000000..9c80da49
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_unique_id.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_unique_id" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_unique_id - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_unique_id.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_unique_id.mak" CFG="mod_unique_id - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_unique_id - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_unique_id - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_unique_id - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_unique_id_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib /nologo /subsystem:windows /dll /out:"Release/mod_unique_id.so" /base:@..\..\os\win32\BaseAddr.ref,mod_unique_id.so
+# ADD LINK32 kernel32.lib ws2_32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_unique_id.so" /base:@..\..\os\win32\BaseAddr.ref,mod_unique_id.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_unique_id - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_unique_id_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_unique_id.so" /base:@..\..\os\win32\BaseAddr.ref,mod_unique_id.so
+# ADD LINK32 kernel32.lib ws2_32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_unique_id.so" /base:@..\..\os\win32\BaseAddr.ref,mod_unique_id.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_unique_id - Win32 Release"
+# Name "mod_unique_id - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_unique_id.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_unique_id.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_unique_id - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_unique_id.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_unique_id.so "unique_id_module for Apache" ../../include/ap_release.h > .\mod_unique_id.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_unique_id - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_unique_id.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_unique_id.so "unique_id_module for Apache" ../../include/ap_release.h > .\mod_unique_id.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_unique_id.exp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_unique_id.exp
new file mode 100644
index 00000000..93000f1e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_unique_id.exp
@@ -0,0 +1 @@
+unique_id_module
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_usertrack.c b/rubbos/app/httpd-2.0.64/modules/metadata/mod_usertrack.c
new file mode 100644
index 00000000..94c46a37
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_usertrack.c
@@ -0,0 +1,454 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* User Tracking Module (Was mod_cookies.c)
+ *
+ * *** IMPORTANT NOTE: This module is not designed to generate
+ * *** cryptographically secure cookies. This means you should not
+ * *** use cookies generated by this module for authentication purposes
+ *
+ * This Apache module is designed to track users paths through a site.
+ * It uses the client-side state ("Cookie") protocol developed by Netscape.
+ * It is known to work on most browsers.
+ *
+ * Each time a page is requested we look to see if the browser is sending
+ * us a Cookie: header that we previously generated.
+ *
+ * If we don't find one then the user hasn't been to this site since
+ * starting their browser or their browser doesn't support cookies. So
+ * we generate a unique Cookie for the transaction and send it back to
+ * the browser (via a "Set-Cookie" header)
+ * Future requests from the same browser should keep the same Cookie line.
+ *
+ * By matching up all the requests with the same cookie you can
+ * work out exactly what path a user took through your site. To log
+ * the cookie use the " %{Cookie}n " directive in a custom access log;
+ *
+ * Example 1 : If you currently use the standard Log file format (CLF)
+ * and use the command "TransferLog somefilename", add the line
+ * LogFormat "%h %l %u %t \"%r\" %s %b %{Cookie}n"
+ * to your config file.
+ *
+ * Example 2 : If you used to use the old "CookieLog" directive, you
+ * can emulate it by adding the following command to your config file
+ * CustomLog filename "%{Cookie}n \"%r\" %t"
+ *
+ * Mark Cox, mjc@apache.org, 6 July 95
+ *
+ * This file replaces mod_cookies.c
+ */
+
+#include "apr.h"
+#include "apr_lib.h"
+#include "apr_strings.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_request.h"
+
+
+module AP_MODULE_DECLARE_DATA usertrack_module;
+
+typedef struct {
+ int always;
+ int expires;
+} cookie_log_state;
+
+typedef enum {
+ CT_UNSET,
+ CT_NETSCAPE,
+ CT_COOKIE,
+ CT_COOKIE2
+} cookie_type_e;
+
+typedef struct {
+ int enabled;
+ cookie_type_e style;
+ char *cookie_name;
+ char *cookie_domain;
+ char *regexp_string; /* used to compile regexp; save for debugging */
+ regex_t *regexp; /* used to find usertrack cookie in cookie header */
+} cookie_dir_rec;
+
+/* Make Cookie: Now we have to generate something that is going to be
+ * pretty unique. We can base it on the pid, time, hostip */
+
+#define COOKIE_NAME "Apache"
+
+static void make_cookie(request_rec *r)
+{
+ cookie_log_state *cls = ap_get_module_config(r->server->module_config,
+ &usertrack_module);
+ /* 1024 == hardcoded constant */
+ char cookiebuf[1024];
+ char *new_cookie;
+ const char *rname = ap_get_remote_host(r->connection, r->per_dir_config,
+ REMOTE_NAME, NULL);
+ cookie_dir_rec *dcfg;
+
+ dcfg = ap_get_module_config(r->per_dir_config, &usertrack_module);
+
+ /* XXX: hmm, this should really tie in with mod_unique_id */
+ apr_snprintf(cookiebuf, sizeof(cookiebuf), "%s.%" APR_TIME_T_FMT, rname,
+ apr_time_now());
+
+ if (cls->expires) {
+
+ /* Cookie with date; as strftime '%a, %d-%h-%y %H:%M:%S GMT' */
+ new_cookie = apr_psprintf(r->pool, "%s=%s; path=/",
+ dcfg->cookie_name, cookiebuf);
+
+ if ((dcfg->style == CT_UNSET) || (dcfg->style == CT_NETSCAPE)) {
+ apr_time_exp_t tms;
+ apr_time_exp_gmt(&tms, r->request_time
+ + apr_time_from_sec(cls->expires));
+ new_cookie = apr_psprintf(r->pool,
+ "%s; expires=%s, "
+ "%.2d-%s-%.2d %.2d:%.2d:%.2d GMT",
+ new_cookie, apr_day_snames[tms.tm_wday],
+ tms.tm_mday,
+ apr_month_snames[tms.tm_mon],
+ tms.tm_year % 100,
+ tms.tm_hour, tms.tm_min, tms.tm_sec);
+ }
+ else {
+ new_cookie = apr_psprintf(r->pool, "%s; max-age=%d",
+ new_cookie, cls->expires);
+ }
+ }
+ else {
+ new_cookie = apr_psprintf(r->pool, "%s=%s; path=/",
+ dcfg->cookie_name, cookiebuf);
+ }
+ if (dcfg->cookie_domain != NULL) {
+ new_cookie = apr_pstrcat(r->pool, new_cookie, "; domain=",
+ dcfg->cookie_domain,
+ (dcfg->style == CT_COOKIE2
+ ? "; version=1"
+ : ""),
+ NULL);
+ }
+
+ apr_table_addn(r->headers_out,
+ (dcfg->style == CT_COOKIE2 ? "Set-Cookie2" : "Set-Cookie"),
+ new_cookie);
+ apr_table_setn(r->notes, "cookie", apr_pstrdup(r->pool, cookiebuf)); /* log first time */
+ return;
+}
+
+/* dcfg->regexp is "^cookie_name=([^;]+)|;[ \t]+cookie_name=([^;]+)",
+ * which has three subexpressions, $0..$2 */
+#define NUM_SUBS 3
+
+static void set_and_comp_regexp(cookie_dir_rec *dcfg,
+ apr_pool_t *p,
+ const char *cookie_name)
+{
+ int danger_chars = 0;
+ const char *sp = cookie_name;
+
+ /* The goal is to end up with this regexp,
+ * ^cookie_name=([^;]+)|;[\t]+cookie_name=([^;]+)
+ * with cookie_name obviously substituted either
+ * with the real cookie name set by the user in httpd.conf, or with the
+ * default COOKIE_NAME.
+ */
+
+ /* Anyway, we need to escape the cookie_name before pasting it
+ * into the regex
+ */
+ while (*sp) {
+ if (!apr_isalnum(*sp)) {
+ ++danger_chars;
+ }
+ ++sp;
+ }
+
+ if (danger_chars) {
+ char *cp;
+ cp = apr_palloc(p, sp - cookie_name + danger_chars + 1); /* 1 == \0 */
+ sp = cookie_name;
+ cookie_name = cp;
+ while (*sp) {
+ if (!apr_isalnum(*sp)) {
+ *cp++ = '\\';
+ }
+ *cp++ = *sp++;
+ }
+ *cp = '\0';
+ }
+
+ dcfg->regexp_string = apr_pstrcat(p, "^",
+ cookie_name,
+ "=([^;]+)|;[ \t]+",
+ cookie_name,
+ "=([^;]+)", NULL);
+
+ dcfg->regexp = ap_pregcomp(p, dcfg->regexp_string, REG_EXTENDED);
+ ap_assert(dcfg->regexp != NULL);
+}
+
+static int spot_cookie(request_rec *r)
+{
+ cookie_dir_rec *dcfg = ap_get_module_config(r->per_dir_config,
+ &usertrack_module);
+ const char *cookie_header;
+ regmatch_t regm[NUM_SUBS];
+
+ /* Do not run in subrequests */
+ if (!dcfg->enabled || r->main) {
+ return DECLINED;
+ }
+
+ if ((cookie_header = apr_table_get(r->headers_in, "Cookie"))) {
+ if (!ap_regexec(dcfg->regexp, cookie_header, NUM_SUBS, regm, 0)) {
+ char *cookieval = NULL;
+ /* Our regexp,
+ * ^cookie_name=([^;]+)|;[ \t]+cookie_name=([^;]+)
+ * only allows for $1 or $2 to be available. ($0 is always
+ * filled with the entire matched expression, not just
+ * the part in parentheses.) So just check for either one
+ * and assign to cookieval if present. */
+ if (regm[1].rm_so != -1) {
+ cookieval = ap_pregsub(r->pool, "$1", cookie_header,
+ NUM_SUBS, regm);
+ }
+ if (regm[2].rm_so != -1) {
+ cookieval = ap_pregsub(r->pool, "$2", cookie_header,
+ NUM_SUBS, regm);
+ }
+ /* Set the cookie in a note, for logging */
+ apr_table_setn(r->notes, "cookie", cookieval);
+
+ return DECLINED; /* There's already a cookie, no new one */
+ }
+ }
+ make_cookie(r);
+ return OK; /* We set our cookie */
+}
+
+static void *make_cookie_log_state(apr_pool_t *p, server_rec *s)
+{
+ cookie_log_state *cls =
+ (cookie_log_state *) apr_palloc(p, sizeof(cookie_log_state));
+
+ cls->expires = 0;
+
+ return (void *) cls;
+}
+
+static void *make_cookie_dir(apr_pool_t *p, char *d)
+{
+ cookie_dir_rec *dcfg;
+
+ dcfg = (cookie_dir_rec *) apr_pcalloc(p, sizeof(cookie_dir_rec));
+ dcfg->cookie_name = COOKIE_NAME;
+ dcfg->cookie_domain = NULL;
+ dcfg->style = CT_UNSET;
+ dcfg->enabled = 0;
+
+ /* In case the user does not use the CookieName directive,
+ * we need to compile the regexp for the default cookie name. */
+ set_and_comp_regexp(dcfg, p, COOKIE_NAME);
+
+ return dcfg;
+}
+
+static const char *set_cookie_enable(cmd_parms *cmd, void *mconfig, int arg)
+{
+ cookie_dir_rec *dcfg = mconfig;
+
+ dcfg->enabled = arg;
+ return NULL;
+}
+
+static const char *set_cookie_exp(cmd_parms *parms, void *dummy,
+ const char *arg)
+{
+ cookie_log_state *cls;
+ time_t factor, modifier = 0;
+ time_t num = 0;
+ char *word;
+
+ cls = ap_get_module_config(parms->server->module_config,
+ &usertrack_module);
+ /* The simple case first - all numbers (we assume) */
+ if (apr_isdigit(arg[0]) && apr_isdigit(arg[strlen(arg) - 1])) {
+ cls->expires = atol(arg);
+ return NULL;
+ }
+
+ /*
+ * The harder case - stolen from mod_expires
+ *
+ * CookieExpires "[plus] {<num> <type>}*"
+ */
+
+ word = ap_getword_conf(parms->pool, &arg);
+ if (!strncasecmp(word, "plus", 1)) {
+ word = ap_getword_conf(parms->pool, &arg);
+ };
+
+ /* {<num> <type>}* */
+ while (word[0]) {
+ /* <num> */
+ if (apr_isdigit(word[0]))
+ num = atoi(word);
+ else
+ return "bad expires code, numeric value expected.";
+
+ /* <type> */
+ word = ap_getword_conf(parms->pool, &arg);
+ if (!word[0])
+ return "bad expires code, missing <type>";
+
+ factor = 0;
+ if (!strncasecmp(word, "years", 1))
+ factor = 60 * 60 * 24 * 365;
+ else if (!strncasecmp(word, "months", 2))
+ factor = 60 * 60 * 24 * 30;
+ else if (!strncasecmp(word, "weeks", 1))
+ factor = 60 * 60 * 24 * 7;
+ else if (!strncasecmp(word, "days", 1))
+ factor = 60 * 60 * 24;
+ else if (!strncasecmp(word, "hours", 1))
+ factor = 60 * 60;
+ else if (!strncasecmp(word, "minutes", 2))
+ factor = 60;
+ else if (!strncasecmp(word, "seconds", 1))
+ factor = 1;
+ else
+ return "bad expires code, unrecognized type";
+
+ modifier = modifier + factor * num;
+
+ /* next <num> */
+ word = ap_getword_conf(parms->pool, &arg);
+ }
+
+ cls->expires = modifier;
+
+ return NULL;
+}
+
+static const char *set_cookie_name(cmd_parms *cmd, void *mconfig,
+ const char *name)
+{
+ cookie_dir_rec *dcfg = (cookie_dir_rec *) mconfig;
+
+ dcfg->cookie_name = apr_pstrdup(cmd->pool, name);
+
+ set_and_comp_regexp(dcfg, cmd->pool, name);
+
+ if (dcfg->regexp == NULL) {
+ return "Regular expression could not be compiled.";
+ }
+ if (dcfg->regexp->re_nsub + 1 != NUM_SUBS) {
+ return apr_pstrcat(cmd->pool, "Invalid cookie name \"",
+ name, "\"", NULL);
+ }
+
+ return NULL;
+}
+
+/*
+ * Set the value for the 'Domain=' attribute.
+ */
+static const char *set_cookie_domain(cmd_parms *cmd, void *mconfig,
+ const char *name)
+{
+ cookie_dir_rec *dcfg;
+
+ dcfg = (cookie_dir_rec *) mconfig;
+
+ /*
+ * Apply the restrictions on cookie domain attributes.
+ */
+ if (strlen(name) == 0) {
+ return "CookieDomain values may not be null";
+ }
+ if (name[0] != '.') {
+ return "CookieDomain values must begin with a dot";
+ }
+ if (ap_strchr_c(&name[1], '.') == NULL) {
+ return "CookieDomain values must contain at least one embedded dot";
+ }
+
+ dcfg->cookie_domain = apr_pstrdup(cmd->pool, name);
+ return NULL;
+}
+
+/*
+ * Make a note of the cookie style we should use.
+ */
+static const char *set_cookie_style(cmd_parms *cmd, void *mconfig,
+ const char *name)
+{
+ cookie_dir_rec *dcfg;
+
+ dcfg = (cookie_dir_rec *) mconfig;
+
+ if (strcasecmp(name, "Netscape") == 0) {
+ dcfg->style = CT_NETSCAPE;
+ }
+ else if ((strcasecmp(name, "Cookie") == 0)
+ || (strcasecmp(name, "RFC2109") == 0)) {
+ dcfg->style = CT_COOKIE;
+ }
+ else if ((strcasecmp(name, "Cookie2") == 0)
+ || (strcasecmp(name, "RFC2965") == 0)) {
+ dcfg->style = CT_COOKIE2;
+ }
+ else {
+ return apr_psprintf(cmd->pool, "Invalid %s keyword: '%s'",
+ cmd->cmd->name, name);
+ }
+
+ return NULL;
+}
+
+static const command_rec cookie_log_cmds[] = {
+ AP_INIT_TAKE1("CookieExpires", set_cookie_exp, NULL, OR_FILEINFO,
+ "an expiry date code"),
+ AP_INIT_TAKE1("CookieDomain", set_cookie_domain, NULL, OR_FILEINFO,
+ "domain to which this cookie applies"),
+ AP_INIT_TAKE1("CookieStyle", set_cookie_style, NULL, OR_FILEINFO,
+ "'Netscape', 'Cookie' (RFC2109), or 'Cookie2' (RFC2965)"),
+ AP_INIT_FLAG("CookieTracking", set_cookie_enable, NULL, OR_FILEINFO,
+ "whether or not to enable cookies"),
+ AP_INIT_TAKE1("CookieName", set_cookie_name, NULL, OR_FILEINFO,
+ "name of the tracking cookie"),
+ {NULL}
+};
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_fixups(spot_cookie,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA usertrack_module = {
+ STANDARD20_MODULE_STUFF,
+ make_cookie_dir, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ make_cookie_log_state, /* server config */
+ NULL, /* merge server configs */
+ cookie_log_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_usertrack.dsp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_usertrack.dsp
new file mode 100644
index 00000000..63497a9d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_usertrack.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_usertrack" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_usertrack - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_usertrack.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_usertrack.mak" CFG="mod_usertrack - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_usertrack - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_usertrack - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_usertrack - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_usertrack_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_usertrack.so" /base:@..\..\os\win32\BaseAddr.ref,mod_usertrack.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_usertrack.so" /base:@..\..\os\win32\BaseAddr.ref,mod_usertrack.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_usertrack - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_usertrack_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_usertrack.so" /base:@..\..\os\win32\BaseAddr.ref,mod_usertrack.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_usertrack.so" /base:@..\..\os\win32\BaseAddr.ref,mod_usertrack.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_usertrack - Win32 Release"
+# Name "mod_usertrack - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_usertrack.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_usertrack.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_usertrack - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_usertrack.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_usertrack.so "usertrack_module for Apache" ../../include/ap_release.h > .\mod_usertrack.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_usertrack - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_usertrack.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_usertrack.so "usertrack_module for Apache" ../../include/ap_release.h > .\mod_usertrack.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_usertrack.exp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_usertrack.exp
new file mode 100644
index 00000000..234a5f75
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_usertrack.exp
@@ -0,0 +1 @@
+usertrack_module
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_version.c b/rubbos/app/httpd-2.0.64/modules/metadata/mod_version.c
new file mode 100644
index 00000000..9104929b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_version.c
@@ -0,0 +1,312 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_version.c
+ * Allow conditional configuration depending on the httpd version
+ *
+ * André Malo (nd/perlig.de), January 2004
+ *
+ * Some stuff coded here is heavily based on the core <IfModule>
+ * containers.
+ *
+ * The module makes the following confgurations possible:
+ *
+ * <IfVersion op major.minor.patch>
+ * # conditional config here ...
+ *</IfVersion>
+ *
+ * where "op" is one of:
+ * = / == equal
+ * > greater than
+ * >= greater or equal
+ * < less than
+ * <= less or equal
+ *
+ * If minor version and patch level are omitted they are assumed to be 0.
+ *
+ * Alternatively you can match the whole version (including some vendor-added
+ * string of the CORE version, see ap_release.h) against a regular expression:
+ *
+ * <IfVersion op regex>
+ * # conditional config here ...
+ *</IfVersion>
+ *
+ * where "op" is one of:
+ * = / == match; regex must be surrounded by slashes
+ * ~ match; regex MAY NOT be surrounded by slashes
+ *
+ * Note that all operators may be preceeded by an exclamation mark
+ * (without spaces) in order to reverse their meaning.
+ *
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+
+
+/* module structure */
+module AP_MODULE_DECLARE_DATA version_module;
+
+/* queried httpd version */
+static ap_version_t httpd_version;
+
+
+/*
+ * compare the supplied version with the core one
+ */
+static int compare_version(char *version_string, const char **error)
+{
+ char *p = version_string, *ep;
+ int version[3] = {0, 0, 0};
+ int c = 0;
+
+ *error = "Version appears to be invalid. It must have the format "
+ "major[.minor[.patch]] where major, minor and patch are "
+ "numbers.";
+
+ if (!apr_isdigit(*p)) {
+ return 0;
+ }
+
+ /* parse supplied version */
+ ep = version_string + strlen(version_string);
+ while (p <= ep && c < 3) {
+ if (*p == '.') {
+ *p = '\0';
+ }
+
+ if (!*p) {
+ version[c++] = atoi(version_string);
+ version_string = ++p;
+ continue;
+ }
+
+ if (!apr_isdigit(*p)) {
+ break;
+ }
+
+ ++p;
+ }
+
+ if (p < ep) { /* syntax error */
+ return 0;
+ }
+
+ *error = NULL;
+
+ if (httpd_version.major > version[0]) {
+ return 1;
+ }
+ else if (httpd_version.major < version[0]) {
+ return -1;
+ }
+ else if (httpd_version.minor > version[1]) {
+ return 1;
+ }
+ else if (httpd_version.minor < version[1]) {
+ return -1;
+ }
+ else if (httpd_version.patch > version[2]) {
+ return 1;
+ }
+ else if (httpd_version.patch < version[2]) {
+ return -1;
+ }
+
+ /* seems to be the same */
+ return 0;
+}
+
+/*
+ * match version against a regular expression
+ */
+static int match_version(apr_pool_t *pool, char *version_string,
+ const char **error)
+{
+ regex_t *compiled;
+ const char *to_match;
+ int rc;
+
+ compiled = ap_pregcomp(pool, version_string, REG_EXTENDED);
+ if (!compiled) {
+ *error = "Unable to compile regular expression";
+ return 0;
+ }
+
+ *error = NULL;
+
+ to_match = apr_psprintf(pool, "%d.%d.%d%s",
+ httpd_version.major,
+ httpd_version.minor,
+ httpd_version.patch,
+ httpd_version.add_string);
+
+ rc = !ap_regexec(compiled, to_match, 0, NULL, 0);
+
+ ap_pregfree(pool, compiled);
+ return rc;
+}
+
+/*
+ * Implements the <IfVersion> container
+ */
+static const char *start_ifversion(cmd_parms *cmd, void *mconfig,
+ const char *arg1, const char *arg2,
+ const char *arg3)
+{
+ const char *endp;
+ int reverse = 0, done = 0, match = 0, compare;
+ const char *p, *error;
+ char c;
+
+ /* supplying one argument is possible, we assume an equality check then */
+ if (!arg2) {
+ arg2 = arg1;
+ arg1 = "=";
+ }
+
+ /* surrounding quotes without operator */
+ if (!arg3 && *arg2 == '>' && !arg2[1]) {
+ arg3 = ">";
+ arg2 = arg1;
+ arg1 = "=";
+ }
+
+ /* the third argument makes version surrounding quotes plus operator
+ * possible.
+ */
+ endp = arg2 + strlen(arg2);
+ if ( endp == arg2
+ || (!(arg3 && *arg3 == '>' && !arg3[1]) && *--endp != '>')) {
+ return apr_pstrcat(cmd->pool, cmd->cmd->name,
+ "> directive missing closing '>'", NULL);
+ }
+
+ p = arg1;
+ if (*p == '!') {
+ reverse = 1;
+ if (p[1]) {
+ ++p;
+ }
+ }
+
+ c = *p++;
+ if (!*p || (*p == '=' && !p[1] && c != '~')) {
+ if (!httpd_version.major) {
+ ap_get_server_revision(&httpd_version);
+ }
+
+ done = 1;
+ switch (c) {
+ case '=':
+ /* normal comparison */
+ if (*arg2 != '/') {
+ compare = compare_version(apr_pstrmemdup(cmd->pool, arg2,
+ endp-arg2),
+ &error);
+ if (error) {
+ return error;
+ }
+
+ match = !compare;
+ break;
+ }
+
+ /* regexp otherwise */
+ if (endp == ++arg2 || *--endp != '/') {
+ return "Missing delimiting / of regular expression.";
+ }
+
+ case '~':
+ /* regular expression */
+ match = match_version(cmd->pool, apr_pstrmemdup(cmd->pool, arg2,
+ endp-arg2),
+ &error);
+ if (error) {
+ return error;
+ }
+ break;
+
+ case '<':
+ compare = compare_version(apr_pstrmemdup(cmd->pool, arg2,
+ endp-arg2),
+ &error);
+ if (error) {
+ return error;
+ }
+
+ match = ((-1 == compare) || (*p && !compare));
+ break;
+
+ case '>':
+ compare = compare_version(apr_pstrmemdup(cmd->pool, arg2,
+ endp-arg2),
+ &error);
+ if (error) {
+ return error;
+ }
+
+ match = ((1 == compare) || (*p && !compare));
+ break;
+
+ default:
+ done = 0;
+ break;
+ }
+ }
+
+ if (!done) {
+ return apr_pstrcat(cmd->pool, "unrecognized operator '", arg1, "'",
+ NULL);
+ }
+
+ if ((!reverse && match) || (reverse && !match)) {
+ ap_directive_t *parent = NULL;
+ ap_directive_t *current = NULL;
+ const char *retval;
+
+ retval = ap_build_cont_config(cmd->pool, cmd->temp_pool, cmd,
+ &current, &parent, "<IfVersion");
+ *(ap_directive_t **)mconfig = current;
+ return retval;
+ }
+
+ *(ap_directive_t **)mconfig = NULL;
+ return ap_soak_end_container(cmd, "<IfVersion");
+}
+
+static const command_rec version_cmds[] = {
+ AP_INIT_TAKE123("<IfVersion", start_ifversion, NULL, EXEC_ON_READ | OR_ALL,
+ "a comparison operator, a version (and a delimiter)"),
+ { NULL }
+};
+
+module AP_MODULE_DECLARE_DATA version_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* dir config creater */
+ NULL, /* dir merger --- default is to override */
+ NULL, /* server config */
+ NULL, /* merge server configs */
+ version_cmds, /* command apr_table_t */
+ NULL, /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_version.dsp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_version.dsp
new file mode 100644
index 00000000..e30ff21a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_version.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_version" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_version - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_version.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_version.mak" CFG="mod_version - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_version - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_version - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_version - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_version_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_version.so" /base:@..\..\os\win32\BaseAddr.ref,mod_version.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_version.so" /base:@..\..\os\win32\BaseAddr.ref,mod_version.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_version - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_version_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_version.so" /base:@..\..\os\win32\BaseAddr.ref,mod_version.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_version.so" /base:@..\..\os\win32\BaseAddr.ref,mod_version.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_version - Win32 Release"
+# Name "mod_version - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_version.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_version.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_version - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_version.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_version.so "version_module for Apache" ../../include/ap_release.h > .\mod_version.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_version - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_version.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_version.so "version_module for Apache" ../../include/ap_release.h > .\mod_version.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/mod_version.exp b/rubbos/app/httpd-2.0.64/modules/metadata/mod_version.exp
new file mode 100644
index 00000000..3dce8456
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/mod_version.exp
@@ -0,0 +1 @@
+version_module
diff --git a/rubbos/app/httpd-2.0.64/modules/metadata/modules.mk b/rubbos/app/httpd-2.0.64/modules/metadata/modules.mk
new file mode 100644
index 00000000..fb8eff50
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/metadata/modules.mk
@@ -0,0 +1,7 @@
+mod_env.la: mod_env.lo
+ $(MOD_LINK) mod_env.lo $(MOD_ENV_LDADD)
+mod_setenvif.la: mod_setenvif.lo
+ $(MOD_LINK) mod_setenvif.lo $(MOD_SETENVIF_LDADD)
+DISTCLEAN_TARGETS = modules.mk
+static = mod_env.la mod_setenvif.la
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/.deps b/rubbos/app/httpd-2.0.64/modules/proxy/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/.indent.pro b/rubbos/app/httpd-2.0.64/modules/proxy/.indent.pro
new file mode 100644
index 00000000..e2cd357a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/.indent.pro
@@ -0,0 +1,58 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tapr_bucket_brigade
+-Tapr_pool_t
+-Tap_filter_t
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
+-Tproxy_server_conf
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/CHANGES b/rubbos/app/httpd-2.0.64/modules/proxy/CHANGES
new file mode 100644
index 00000000..73a9228d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/CHANGES
@@ -0,0 +1,223 @@
+******************************************
+* PLEASE NOTE: Now that development for *
+* mod_proxy has been folded back into *
+* the httpd-2.0 tree, this file has *
+* been depreciated. Proxy changes should *
+* be noted in httpd-2.0's CHANGES file. *
+* This file exists for historical *
+* purposes. *
+******************************************
+
+mod_proxy changes for httpd 2.0.29-dev
+ *) don't do keepalives for sub-requests. [Ian Holsman]
+
+ *) fix up proxypass handling [Ian Holsman]
+
+ *) don't send If-Modified-Since, Cache-Control, or If-None-Match on
+ a subrequest [Ian Holsman]
+
+mod_proxy changes for httpd 2.0.26-dev
+ *) Add New option 'HTTPProxyOverrideReturnedErrors'. By Turning the
+ Flag on, you will mask the error pages returned by the proxied
+ server, and will it will be handled as if your server generated
+ the error. This change was put in so that a 404 on a included
+ r-proxied component will act in the same manner as a 404 on a
+ included file. [Ian Holsman <ianh@cnet.com>]
+
+mod_proxy changes for httpd 2.0.25-dev
+
+ *) Split proxy: space using <Proxy[Match] > directive blocks from
+ the <Directory[Match] > and <Files[Match] > blocks. Mod_proxy
+ now bypasses the directory and files testing phase (and skips
+ the http TRACE default handler on it's own, as well). Note that
+ <Location > blocks continue to be processed for proxy: requests.
+ [William Rowe <wrowe@covalent.net>]
+
+ *) apr_uri type/function namespace changes in apr_uri functions
+ [Doug MacEachern <dougm@covalent.net>]
+
+mod_proxy changes for httpd 2.0.23-dev
+
+ *) break the proxy_http_handler into multiple smaller functions.
+ [John Barbee <barbee@veribox.net>]
+
+ *) Fix the proxy when the origin server sends back a 100
+ Continue response. [John Barbee <barbee@veribox.net>]
+
+ *) Change 'readbytes' from apr_size_t to apr_off_t due to change
+ in ap_get_brigade's parameters [John Barbee <barbee@veribox.net>]
+
+mod_proxy changes for httpd 2.0.20-dev
+ *) Timeout added for backend connections.
+ [Victor Orlikowski <v.j.orlikowski@gte.net>]
+
+ *) Fix abort code path in proxy_http.c, similar to FTP fix.
+ [Chuck Murcko <chuck@topsail.org>]
+
+ *) Fix FTP ABOR command execution path.
+ [Victor Orlikowski <v.j.orlikowski@gte.net>]
+
+ *) FTP return code variable cleanup; fixed problem in login
+ [Chuck Murcko <chuck@topsail.org>]
+
+ *) Get PORT working again in the ftp proxy.
+ [Victor Orlikowski <v.j.orlikowski@gte.net>]
+
+ *) Return result code check for FTP QUIT, after fixing
+ problems with passive connection handling.
+ [Victor Orlikowski <v.j.orlikowski@gte.net>]
+
+ *) Reorganize ap_proxy_string_read() internally to not process eos
+ buckets.
+ [Chuck Murcko <chuck@topsail.org>]
+ [Victor Orlikowski <v.j.orlikowski@gte.net>]
+
+ *) Remove result code check for FTP QUIT command. Some servers send
+ nothing at all back in response to QUIT.
+ [Chuck Murcko <chuck@topsail.org>]
+ [Victor Orlikowski <v.j.orlikowski@gte.net>]
+
+mod_proxy changes for httpd 2.0.19
+
+ *) Reverse previous patch since the core reverted.
+ [Chuck Murcko <chuck@topsail.org>]
+
+ *) Remove indirection on number of bytes to read for input filters.
+ [Chuck Murcko <chuck@topsail.org>]
+
+ *) Fixed a problem with directory listing corruption in the
+ PROXY_DIR filter.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) mod_proxy and the proxy submodules now build properly as DSOs.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Stopped the HTTP proxy from trying to read entity bodies when there
+ wasn't one (response was 1xx, 204, 205 or 304).
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Made sure dates were canonicalised correctly when passed to the client
+ browser through the HTTP proxy.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Split each individual proxy protocol into separate modules.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Added Max-Forwards support for all request types so as to prevent
+ loops.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Fix warnings about byte count type on Darwin (connect handler).
+ [Chuck Murcko <chuck@topsail.org>]
+
+mod_proxy changes for httpd 2.0.18
+
+ *) IPV6 EPSV support for IPV6 in FTP proxy.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) FTP directory filter works now.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Fixed some thread-safety issues with the HTTP proxy in mod_proxy.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) PASV FTP works now.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Reworked the line-at-a-time read from the control connection to
+ workaround a stray empty bucket returned by the HTTP_IN filter.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Stopped the CORE filter from sending off an HTTP response when a
+ CONNECT tunnel was closed.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Fixed the poll() loop in proxy_connect.c -> it works now!!!
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Converted send_dir() to ap_proxy_send_dir_filter() in proxy_ftp.c.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+mod_proxy changes for httpd 2.0.17
+
+ *) Major rework of ap_proxy_ftp_handler() to use filters (begone foul
+ BUFF!!!). It compiles, but is untested, and the build environment needs
+ to be fixed to include proxy_ftp.c.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Cleanup of dead functions within proxy_util.c.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Reworked the storage of the client socket between keepalive connections
+ to fix some nasty problems with the socket lasting longer than the
+ memory pool it was allocated from.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Fixed bug where a hostname without a "." in it (such as "localhost")
+ would not trigger an IP address check with ProxyBlock.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+mod_proxy changes for httpd 2.0.16
+
+ *) Fixed ProxyBlock bugs with ap_proxy_http_handler() and
+ ap_proxy_connect_handler().
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Updated ap_proxy_connect_handler() to support APR, while
+ moving some common code between http_handler and connect_handler
+ to proxy_util.c.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Updated mod_proxy.html docs to include v2.0 configuration.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Fixed problem where responses without entity bodies would cause
+ the directly following proxy keepalive request to fail.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+mod_proxy changes for httpd 2.0.15
+
+ *) Added support for downstream keepalives in mod_proxy.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Changed mod_proxy ap_proxy_http_handler() to support APR properly.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Fix problem where incoming response headers were not being returned
+ to the client in mod_proxy.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Added X-Forwarded-For, X-Forwarded-Host and X-Forwarded-Server to
+ reverse proxied request headers in mod_proxy.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) replace INADDR_NONE with APR_INADDR_NONE [Ian Holsman <IanH@cnet.com>]
+
+ *) Fix problem with proxy configuration where globally set
+ configuration options were overridden inside virtual hosts.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Fix ProxyReceiveBufferSize where default value was left
+ uninitialised.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+ *) Some small changes:
+ - Ensured hop-by-hop headers were stripped as per
+ RFC2616 13.5.1.
+ - Upgraded version code to HTTP/1.1.
+ - Added Connection: close until Keepalives come.
+ - Some cosmetic fixes and commenting.
+ [Graham Leggett <minfrin@sharp.fm>]
+
+mod_proxy changes for httpd 2.0.14
+
+ *) removed ProxyNoCache and ProxyCacheForceCompletion config directives,
+ since we no longer directly cache from this module
+ [Chuck Murcko <chuck@topsail.org>]
+
+ *) removed cache
+ [Chuck Murcko <chuck@topsail.org>]
+
+ *) initial rerebuild for 2.0
+ [Chuck Murcko <chuck@topsail.org>]
+
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/Makefile b/rubbos/app/httpd-2.0.64/modules/proxy/Makefile
new file mode 100644
index 00000000..d1597bdf
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/proxy
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/proxy
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/proxy
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/Makefile.in b/rubbos/app/httpd-2.0.64/modules/proxy/Makefile.in
new file mode 100644
index 00000000..7c5c149d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/Makefile.in
@@ -0,0 +1,3 @@
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUmakefile b/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUmakefile
new file mode 100644
index 00000000..61842f0a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUmakefile
@@ -0,0 +1,247 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)\build\NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME =
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION =
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME =
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE =
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/proxy.nlm \
+ $(OBJDIR)/proxycon.nlm \
+ $(OBJDIR)/proxyftp.nlm \
+ $(OBJDIR)/proxyhtp.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ copy $(OBJDIR)\*.nlm $(INSTALL)\Apache2\modules\*.*
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxy b/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxy
new file mode 100644
index 00000000..d6abf6b3
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxy
@@ -0,0 +1,261 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(AP_WORK)/modules/http \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = proxy
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Proxy Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Proxy Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/proxy.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_proxy.o \
+ $(OBJDIR)/proxy_util.o \
+ $(OBJDIR)/libprews.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ @ws2nlm.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ proxy_module \
+ proxy_hook_scheme_handler \
+ proxy_hook_canon_handler \
+ ap_proxy_ssl_enable \
+ ap_proxy_ssl_disable \
+ proxy_run_fixups \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+$(OBJDIR)/%.o: ../arch/netware/%.c $(OBJDIR)\$(NLM_NAME)_cc.opt
+ @echo compiling $<
+ $(CC) $< -o=$(OBJDIR)\$(@F) @$(OBJDIR)\$(NLM_NAME)_cc.opt
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxycon b/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxycon
new file mode 100644
index 00000000..07c91f70
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxycon
@@ -0,0 +1,254 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(AP_WORK)/modules/http \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = proxycon
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Proxy Connection Sub-Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Proxy Conn Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/proxycon.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/proxy_connect.o \
+ $(OBJDIR)/proxy_util.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ proxy \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ proxy_module \
+ proxy_hook_scheme_handler \
+ proxy_hook_canon_handler \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ proxy_connect_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxyftp b/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxyftp
new file mode 100644
index 00000000..bd5d527c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxyftp
@@ -0,0 +1,260 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(AP_WORK)/modules/http \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = proxyftp
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Proxy FTP Sub-Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Proxy FTP Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/proxyftp.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/proxy_ftp.o \
+ $(OBJDIR)/proxy_util.o \
+ $(OBJDIR)/libprews.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ proxy \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ @ws2nlm.imp \
+ proxy_module \
+ proxy_hook_scheme_handler \
+ proxy_hook_canon_handler \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ proxy_ftp_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+$(OBJDIR)/%.o: ../arch/netware/%.c $(OBJDIR)\$(NLM_NAME)_cc.opt
+ @echo compiling $<
+ $(CC) $< -o=$(OBJDIR)\$(@F) @$(OBJDIR)\$(NLM_NAME)_cc.opt
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxyhtp b/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxyhtp
new file mode 100644
index 00000000..5fda2693
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/NWGNUproxyhtp
@@ -0,0 +1,263 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(AP_WORK)/include \
+ $(AP_WORK)/modules/http \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = proxyhtp
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Proxy HTTP Sub-Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Proxy HTTP Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/proxyhtp.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/proxy_http.o \
+ $(OBJDIR)/proxy_util.o \
+ $(OBJDIR)/libprews.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ proxy \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @libc.imp \
+ @ws2nlm.imp \
+ proxy_module \
+ proxy_hook_scheme_handler \
+ proxy_hook_canon_handler \
+ proxy_run_fixups \
+ ap_proxy_ssl_enable \
+ ap_proxy_ssl_disable \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ proxy_http_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+$(OBJDIR)/%.o: ../arch/netware/%.c $(OBJDIR)\$(NLM_NAME)_cc.opt
+ @echo compiling $<
+ $(CC) $< -o=$(OBJDIR)\$(@F) @$(OBJDIR)\$(NLM_NAME)_cc.opt
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/config.m4 b/rubbos/app/httpd-2.0.64/modules/proxy/config.m4
new file mode 100644
index 00000000..d33683e9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/config.m4
@@ -0,0 +1,34 @@
+dnl modules enabled in this directory by default
+
+APACHE_MODPATH_INIT(proxy)
+
+if test "$enable_proxy" = "shared"; then
+ proxy_mods_enable=shared
+elif test "$enable_proxy" = "yes"; then
+ proxy_mods_enable=yes
+else
+ proxy_mods_enable=no
+fi
+
+proxy_objs="mod_proxy.lo proxy_util.lo"
+APACHE_MODULE(proxy, Apache proxy module, $proxy_objs, , $proxy_mods_enable)
+
+proxy_connect_objs="proxy_connect.lo"
+proxy_ftp_objs="proxy_ftp.lo"
+proxy_http_objs="proxy_http.lo"
+
+case "$host" in
+ *os2*)
+ # OS/2 DLLs must resolve all symbols at build time and
+ # these sub-modules need some from the main proxy module
+ proxy_connect_objs="$proxy_connect_objs mod_proxy.la"
+ proxy_ftp_objs="$proxy_ftp_objs mod_proxy.la"
+ proxy_http_objs="$proxy_http_objs mod_proxy.la"
+ ;;
+esac
+
+APACHE_MODULE(proxy_connect, Apache proxy CONNECT module, $proxy_connect_objs, , $proxy_mods_enable)
+APACHE_MODULE(proxy_ftp, Apache proxy FTP module, $proxy_ftp_objs, , $proxy_mods_enable)
+APACHE_MODULE(proxy_http, Apache proxy HTTP module, $proxy_http_objs, , $proxy_mods_enable)
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/libproxy.exp b/rubbos/app/httpd-2.0.64/modules/proxy/libproxy.exp
new file mode 100644
index 00000000..a20f2378
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/libproxy.exp
@@ -0,0 +1 @@
+proxy_module
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy.c b/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy.c
new file mode 100644
index 00000000..84d5fb10
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy.c
@@ -0,0 +1,1181 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define CORE_PRIVATE
+
+#include "mod_proxy.h"
+#include "mod_core.h"
+
+#include "apr_optional.h"
+
+#ifndef MAX
+#define MAX(x,y) ((x) >= (y) ? (x) : (y))
+#endif
+
+/*
+ * A Web proxy module. Stages:
+ *
+ * translate_name: set filename to proxy:<URL>
+ * map_to_storage: run proxy_walk (rather than directory_walk/file_walk)
+ * can't trust directory_walk/file_walk since these are
+ * not in our filesystem. Prevents mod_http from serving
+ * the TRACE request we will set aside to handle later.
+ * type_checker: set type to PROXY_MAGIC_TYPE if filename begins proxy:
+ * fix_ups: convert the URL stored in the filename to the
+ * canonical form.
+ * handler: handle proxy requests
+ */
+
+/* -------------------------------------------------------------- */
+/* Translate the URL into a 'filename' */
+
+static int alias_match(const char *uri, const char *alias_fakename)
+{
+ const char *end_fakename = alias_fakename + strlen(alias_fakename);
+ const char *aliasp = alias_fakename, *urip = uri;
+
+ while (aliasp < end_fakename) {
+ if (*aliasp == '/') {
+ /* any number of '/' in the alias matches any number in
+ * the supplied URI, but there must be at least one...
+ */
+ if (*urip != '/')
+ return 0;
+
+ while (*aliasp == '/')
+ ++aliasp;
+ while (*urip == '/')
+ ++urip;
+ }
+ else {
+ /* Other characters are compared literally */
+ if (*urip++ != *aliasp++)
+ return 0;
+ }
+ }
+
+ /* Check last alias path component matched all the way */
+
+ if (aliasp[-1] != '/' && *urip != '\0' && *urip != '/')
+ return 0;
+
+ /* Return number of characters from URI which matched (may be
+ * greater than length of alias, since we may have matched
+ * doubled slashes)
+ */
+
+ return urip - uri;
+}
+
+/* Detect if an absoluteURI should be proxied or not. Note that we
+ * have to do this during this phase because later phases are
+ * "short-circuiting"... i.e. translate_names will end when the first
+ * module returns OK. So for example, if the request is something like:
+ *
+ * GET http://othervhost/cgi-bin/printenv HTTP/1.0
+ *
+ * mod_alias will notice the /cgi-bin part and ScriptAlias it and
+ * short-circuit the proxy... just because of the ordering in the
+ * configuration file.
+ */
+static int proxy_detect(request_rec *r)
+{
+ void *sconf = r->server->module_config;
+ proxy_server_conf *conf;
+
+ conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
+
+ /* Ick... msvc (perhaps others) promotes ternary short results to int */
+
+ if (conf->req && r->parsed_uri.scheme) {
+ /* but it might be something vhosted */
+ if (!(r->parsed_uri.hostname
+ && !strcasecmp(r->parsed_uri.scheme, ap_http_method(r))
+ && ap_matches_request_vhost(r, r->parsed_uri.hostname,
+ (apr_port_t)(r->parsed_uri.port_str ? r->parsed_uri.port
+ : ap_default_port(r))))) {
+ r->proxyreq = PROXYREQ_PROXY;
+ r->uri = r->unparsed_uri;
+ r->filename = apr_pstrcat(r->pool, "proxy:", r->uri, NULL);
+ r->handler = "proxy-server";
+ }
+ }
+ /* We need special treatment for CONNECT proxying: it has no scheme part */
+ else if (conf->req && r->method_number == M_CONNECT
+ && r->parsed_uri.hostname
+ && r->parsed_uri.port_str) {
+ r->proxyreq = PROXYREQ_PROXY;
+ r->uri = r->unparsed_uri;
+ r->filename = apr_pstrcat(r->pool, "proxy:", r->uri, NULL);
+ r->handler = "proxy-server";
+ }
+ return DECLINED;
+}
+
+static int proxy_trans(request_rec *r)
+{
+ void *sconf = r->server->module_config;
+ proxy_server_conf *conf =
+ (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
+ int i, len;
+ struct proxy_alias *ent = (struct proxy_alias *) conf->aliases->elts;
+
+ if (r->proxyreq) {
+ /* someone has already set up the proxy, it was possibly ourselves
+ * in proxy_detect
+ */
+ return OK;
+ }
+
+ /* XXX: since r->uri has been manipulated already we're not really
+ * compliant with RFC1945 at this point. But this probably isn't
+ * an issue because this is a hybrid proxy/origin server.
+ */
+
+ for (i = 0; i < conf->aliases->nelts; i++) {
+ len = alias_match(r->uri, ent[i].fake);
+
+ if (len > 0) {
+ if ((ent[i].real[0] == '!' ) && ( ent[i].real[1] == 0 )) {
+ return DECLINED;
+ }
+
+ r->filename = apr_pstrcat(r->pool, "proxy:", ent[i].real,
+ (r->uri + len ), NULL);
+ r->handler = "proxy-server";
+ r->proxyreq = PROXYREQ_REVERSE;
+ return OK;
+ }
+ }
+ return DECLINED;
+}
+
+static int proxy_walk(request_rec *r)
+{
+ proxy_server_conf *sconf = ap_get_module_config(r->server->module_config,
+ &proxy_module);
+ ap_conf_vector_t *per_dir_defaults = r->server->lookup_defaults;
+ ap_conf_vector_t **sec_proxy = (ap_conf_vector_t **) sconf->sec_proxy->elts;
+ ap_conf_vector_t *entry_config;
+ proxy_dir_conf *entry_proxy;
+ int num_sec = sconf->sec_proxy->nelts;
+ /* XXX: shouldn't we use URI here? Canonicalize it first?
+ * Pass over "proxy:" prefix
+ */
+ const char *proxyname = r->filename + 6;
+ int j;
+
+ for (j = 0; j < num_sec; ++j)
+ {
+ entry_config = sec_proxy[j];
+ entry_proxy = ap_get_module_config(entry_config, &proxy_module);
+
+ /* XXX: What about case insensitive matching ???
+ * Compare regex, fnmatch or string as appropriate
+ * If the entry doesn't relate, then continue
+ */
+ if (entry_proxy->r
+ ? ap_regexec(entry_proxy->r, proxyname, 0, NULL, 0)
+ : (entry_proxy->p_is_fnmatch
+ ? apr_fnmatch(entry_proxy->p, proxyname, 0)
+ : strncmp(proxyname, entry_proxy->p,
+ strlen(entry_proxy->p)))) {
+ continue;
+ }
+ per_dir_defaults = ap_merge_per_dir_configs(r->pool, per_dir_defaults,
+ entry_config);
+ }
+
+ r->per_dir_config = per_dir_defaults;
+
+ return OK;
+}
+
+static int proxy_map_location(request_rec *r)
+{
+ int access_status;
+
+ if (!r->proxyreq || !r->filename || strncmp(r->filename, "proxy:", 6) != 0)
+ return DECLINED;
+
+ /* Don't let the core or mod_http map_to_storage hooks handle this,
+ * We don't need directory/file_walk, and we want to TRACE on our own.
+ */
+ if ((access_status = proxy_walk(r))) {
+ ap_die(access_status, r);
+ return access_status;
+ }
+
+ return OK;
+}
+
+/* -------------------------------------------------------------- */
+/* Fixup the filename */
+
+/*
+ * Canonicalise the URL
+ */
+static int proxy_fixup(request_rec *r)
+{
+ char *url, *p;
+ int access_status;
+
+ if (!r->proxyreq || !r->filename || strncmp(r->filename, "proxy:", 6) != 0)
+ return DECLINED;
+
+ /* XXX: Shouldn't we try this before we run the proxy_walk? */
+ url = &r->filename[6];
+
+ /* canonicalise each specific scheme */
+ if ((access_status = proxy_run_canon_handler(r, url))) {
+ return access_status;
+ }
+
+ p = strchr(url, ':');
+ if (p == NULL || p == url)
+ return HTTP_BAD_REQUEST;
+
+ return OK; /* otherwise; we've done the best we can */
+}
+
+/* Send a redirection if the request contains a hostname which is not */
+/* fully qualified, i.e. doesn't have a domain name appended. Some proxy */
+/* servers like Netscape's allow this and access hosts from the local */
+/* domain in this case. I think it is better to redirect to a FQDN, since */
+/* these will later be found in the bookmarks files. */
+/* The "ProxyDomain" directive determines what domain will be appended */
+static int proxy_needsdomain(request_rec *r, const char *url, const char *domain)
+{
+ char *nuri;
+ const char *ref;
+
+ /* We only want to worry about GETs */
+ if (!r->proxyreq || r->method_number != M_GET || !r->parsed_uri.hostname)
+ return DECLINED;
+
+ /* If host does contain a dot already, or it is "localhost", decline */
+ if (strchr(r->parsed_uri.hostname, '.') != NULL
+ || strcasecmp(r->parsed_uri.hostname, "localhost") == 0)
+ return DECLINED; /* host name has a dot already */
+
+ ref = apr_table_get(r->headers_in, "Referer");
+
+ /* Reassemble the request, but insert the domain after the host name */
+ /* Note that the domain name always starts with a dot */
+ r->parsed_uri.hostname = apr_pstrcat(r->pool, r->parsed_uri.hostname,
+ domain, NULL);
+ nuri = apr_uri_unparse(r->pool,
+ &r->parsed_uri,
+ APR_URI_UNP_REVEALPASSWORD);
+
+ apr_table_set(r->headers_out, "Location", nuri);
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
+ "Domain missing: %s sent to %s%s%s", r->uri,
+ apr_uri_unparse(r->pool, &r->parsed_uri,
+ APR_URI_UNP_OMITUSERINFO),
+ ref ? " from " : "", ref ? ref : "");
+
+ return HTTP_MOVED_PERMANENTLY;
+}
+
+/* -------------------------------------------------------------- */
+/* Invoke handler */
+
+static int proxy_handler(request_rec *r)
+{
+ char *url, *scheme, *p;
+ const char *p2;
+ void *sconf = r->server->module_config;
+ proxy_server_conf *conf = (proxy_server_conf *)
+ ap_get_module_config(sconf, &proxy_module);
+ apr_array_header_t *proxies = conf->proxies;
+ struct proxy_remote *ents = (struct proxy_remote *) proxies->elts;
+ int i, rc, access_status;
+ int direct_connect = 0;
+ const char *str;
+ long maxfwd;
+
+ /* is this for us? */
+ if (!r->proxyreq || !r->filename || strncmp(r->filename, "proxy:", 6) != 0)
+ return DECLINED;
+
+ /* handle max-forwards / OPTIONS / TRACE */
+ if ((str = apr_table_get(r->headers_in, "Max-Forwards"))) {
+ maxfwd = strtol(str, NULL, 10);
+ if (maxfwd < 1) {
+ switch (r->method_number) {
+ case M_TRACE: {
+ int access_status;
+ r->proxyreq = PROXYREQ_NONE;
+ if ((access_status = ap_send_http_trace(r)))
+ ap_die(access_status, r);
+ else
+ ap_finalize_request_protocol(r);
+ return OK;
+ }
+ case M_OPTIONS: {
+ int access_status;
+ r->proxyreq = PROXYREQ_NONE;
+ if ((access_status = ap_send_http_options(r)))
+ ap_die(access_status, r);
+ else
+ ap_finalize_request_protocol(r);
+ return OK;
+ }
+ default: {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Max-Forwards has reached zero - proxy loop?");
+ }
+ }
+ }
+ maxfwd = (maxfwd > 0) ? maxfwd - 1 : 0;
+ }
+ else {
+ /* set configured max-forwards */
+ maxfwd = conf->maxfwd;
+ }
+ apr_table_set(r->headers_in, "Max-Forwards",
+ apr_psprintf(r->pool, "%ld", (maxfwd > 0) ? maxfwd : 0));
+
+ if (r->method_number == M_TRACE) {
+ core_server_config *coreconf = (core_server_config *)
+ ap_get_module_config(sconf, &core_module);
+
+ if (coreconf->trace_enable == AP_TRACE_DISABLE)
+ {
+ /* Allow "error-notes" string to be printed by ap_send_error_response()
+ * Note; this goes nowhere, canned error response need an overhaul.
+ */
+ apr_table_setn(r->notes, "error-notes",
+ "TRACE forbidden by server configuration");
+ apr_table_setn(r->notes, "verbose-error-to", "*");
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "proxy: TRACE forbidden by server configuration");
+ return HTTP_FORBIDDEN;
+ }
+
+ /* Can't test ap_should_client_block, we aren't ready to send
+ * the client a 100 Continue response till the connection has
+ * been established
+ */
+ if (coreconf->trace_enable != AP_TRACE_EXTENDED
+ && (r->read_length || r->read_chunked || r->remaining))
+ {
+ /* Allow "error-notes" string to be printed by ap_send_error_response()
+ * Note; this goes nowhere, canned error response need an overhaul.
+ */
+ apr_table_setn(r->notes, "error-notes",
+ "TRACE with request body is not allowed");
+ apr_table_setn(r->notes, "verbose-error-to", "*");
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "proxy: TRACE with request body is not allowed");
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+ }
+
+ url = r->filename + 6;
+ p = strchr(url, ':');
+ if (p == NULL)
+ return HTTP_BAD_REQUEST;
+
+ /* If the host doesn't have a domain name, add one and redirect. */
+ if (conf->domain != NULL) {
+ rc = proxy_needsdomain(r, url, conf->domain);
+ if (ap_is_HTTP_REDIRECT(rc))
+ return HTTP_MOVED_PERMANENTLY;
+ }
+
+ *p = '\0';
+ scheme = apr_pstrdup(r->pool, url);
+ *p = ':';
+
+ /* Check URI's destination host against NoProxy hosts */
+ /* Bypass ProxyRemote server lookup if configured as NoProxy */
+ /* we only know how to handle communication to a proxy via http */
+ /*if (strcasecmp(scheme, "http") == 0) */
+ {
+ int ii;
+ struct dirconn_entry *list = (struct dirconn_entry *) conf->dirconn->elts;
+
+ for (direct_connect = ii = 0; ii < conf->dirconn->nelts && !direct_connect; ii++) {
+ direct_connect = list[ii].matcher(&list[ii], r);
+ }
+#if DEBUGGING
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ (direct_connect) ? "NoProxy for %s" : "UseProxy for %s",
+ r->uri);
+#endif
+ }
+
+ /* firstly, try a proxy, unless a NoProxy directive is active */
+ if (!direct_connect) {
+ for (i = 0; i < proxies->nelts; i++) {
+ p2 = ap_strchr_c(ents[i].scheme, ':'); /* is it a partial URL? */
+ if (strcmp(ents[i].scheme, "*") == 0 ||
+ (ents[i].use_regex &&
+ ap_regexec(ents[i].regexp, url, 0,NULL, 0) == 0) ||
+ (p2 == NULL && strcasecmp(scheme, ents[i].scheme) == 0) ||
+ (p2 != NULL &&
+ strncasecmp(url, ents[i].scheme, strlen(ents[i].scheme)) == 0)) {
+
+ /* handle the scheme */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "Trying to run scheme_handler against proxy");
+ access_status = proxy_run_scheme_handler(r, conf, url, ents[i].hostname, ents[i].port);
+
+ /* an error or success */
+ if (access_status != DECLINED && access_status != HTTP_BAD_GATEWAY) {
+ return access_status;
+ }
+ /* we failed to talk to the upstream proxy */
+ }
+ }
+ }
+
+ /* otherwise, try it direct */
+ /* N.B. what if we're behind a firewall, where we must use a proxy or
+ * give up??
+ */
+
+ /* handle the scheme */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "Trying to run scheme_handler");
+ access_status = proxy_run_scheme_handler(r, conf, url, NULL, 0);
+ if (DECLINED == access_status) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, r->server,
+ "proxy: No protocol handler was valid for the URL %s. "
+ "If you are using a DSO version of mod_proxy, make sure "
+ "the proxy submodules are included in the configuration "
+ "using LoadModule.", r->uri);
+ return HTTP_FORBIDDEN;
+ }
+ return access_status;
+}
+
+/* -------------------------------------------------------------- */
+/* Setup configurable data */
+
+static void * create_proxy_config(apr_pool_t *p, server_rec *s)
+{
+ proxy_server_conf *ps = apr_pcalloc(p, sizeof(proxy_server_conf));
+
+ ps->sec_proxy = apr_array_make(p, 10, sizeof(ap_conf_vector_t *));
+ ps->proxies = apr_array_make(p, 10, sizeof(struct proxy_remote));
+ ps->aliases = apr_array_make(p, 10, sizeof(struct proxy_alias));
+ ps->raliases = apr_array_make(p, 10, sizeof(struct proxy_alias));
+ ps->noproxies = apr_array_make(p, 10, sizeof(struct noproxy_entry));
+ ps->dirconn = apr_array_make(p, 10, sizeof(struct dirconn_entry));
+ ps->allowed_connect_ports = apr_array_make(p, 10, sizeof(int));
+ ps->domain = NULL;
+ ps->viaopt = via_off; /* initially backward compatible with 1.3.1 */
+ ps->viaopt_set = 0; /* 0 means default */
+ ps->req = 0;
+ ps->req_set = 0;
+ ps->recv_buffer_size = 0; /* this default was left unset for some reason */
+ ps->recv_buffer_size_set = 0;
+ ps->io_buffer_size = AP_IOBUFSIZE;
+ ps->io_buffer_size_set = 0;
+ ps->maxfwd = DEFAULT_MAX_FORWARDS;
+ ps->maxfwd_set = 0;
+ ps->error_override = 0;
+ ps->error_override_set = 0;
+ ps->preserve_host_set = 0;
+ ps->preserve_host = 0;
+ ps->timeout = 0;
+ ps->timeout_set = 0;
+ ps->badopt = bad_error;
+ ps->badopt_set = 0;
+ return ps;
+}
+
+static void * merge_proxy_config(apr_pool_t *p, void *basev, void *overridesv)
+{
+ proxy_server_conf *ps = apr_pcalloc(p, sizeof(proxy_server_conf));
+ proxy_server_conf *base = (proxy_server_conf *) basev;
+ proxy_server_conf *overrides = (proxy_server_conf *) overridesv;
+
+ ps->proxies = apr_array_append(p, base->proxies, overrides->proxies);
+ ps->sec_proxy = apr_array_append(p, base->sec_proxy, overrides->sec_proxy);
+ ps->aliases = apr_array_append(p, base->aliases, overrides->aliases);
+ ps->raliases = apr_array_append(p, base->raliases, overrides->raliases);
+ ps->noproxies = apr_array_append(p, base->noproxies, overrides->noproxies);
+ ps->dirconn = apr_array_append(p, base->dirconn, overrides->dirconn);
+ ps->allowed_connect_ports = apr_array_append(p, base->allowed_connect_ports, overrides->allowed_connect_ports);
+
+ ps->domain = (overrides->domain == NULL) ? base->domain : overrides->domain;
+ ps->viaopt = (overrides->viaopt_set == 0) ? base->viaopt : overrides->viaopt;
+ ps->viaopt_set = overrides->viaopt_set || base->viaopt_set;
+ ps->req = (overrides->req_set == 0) ? base->req : overrides->req;
+ ps->req_set = overrides->req_set || base->req_set;
+ ps->recv_buffer_size = (overrides->recv_buffer_size_set == 0) ? base->recv_buffer_size : overrides->recv_buffer_size;
+ ps->recv_buffer_size_set = overrides->recv_buffer_size_set || base->recv_buffer_size_set;
+ ps->io_buffer_size = (overrides->io_buffer_size_set == 0) ? base->io_buffer_size : overrides->io_buffer_size;
+ ps->io_buffer_size_set = overrides->io_buffer_size_set || base->io_buffer_size_set;
+ ps->maxfwd = (overrides->maxfwd_set == 0) ? base->maxfwd : overrides->maxfwd;
+ ps->maxfwd_set = overrides->maxfwd_set || base->maxfwd_set;
+ ps->error_override = (overrides->error_override_set == 0) ? base->error_override : overrides->error_override;
+ ps->error_override_set = overrides->error_override_set || base->error_override_set;
+ ps->preserve_host = (overrides->preserve_host_set == 0) ? base->preserve_host : overrides->preserve_host;
+ ps->preserve_host_set = overrides->preserve_host_set || base->preserve_host_set;
+ ps->timeout= (overrides->timeout_set == 0) ? base->timeout : overrides->timeout;
+ ps->timeout_set = overrides->timeout_set || base->timeout_set;
+ ps->badopt = (overrides->badopt_set == 0) ? base->badopt : overrides->badopt;
+ ps->badopt_set = overrides->badopt_set || base->badopt_set;
+
+ return ps;
+}
+
+static void *create_proxy_dir_config(apr_pool_t *p, char *dummy)
+{
+ proxy_dir_conf *new =
+ (proxy_dir_conf *) apr_pcalloc(p, sizeof(proxy_dir_conf));
+
+ /* Filled in by proxysection, when applicable */
+
+ return (void *) new;
+}
+
+static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv)
+{
+ proxy_dir_conf *new = (proxy_dir_conf *) apr_pcalloc(p, sizeof(proxy_dir_conf));
+ proxy_dir_conf *add = (proxy_dir_conf *) addv;
+ proxy_dir_conf *base = (proxy_dir_conf *) basev;
+
+ new->p = add->p;
+ new->p_is_fnmatch = add->p_is_fnmatch;
+ new->r = add->r;
+ new->ftp_directory_charset = add->ftp_directory_charset ?
+ add->ftp_directory_charset :
+ base->ftp_directory_charset;
+ return new;
+}
+
+
+static const char *
+ add_proxy(cmd_parms *cmd, void *dummy, const char *f1, const char *r1, int regex)
+{
+ server_rec *s = cmd->server;
+ proxy_server_conf *conf =
+ (proxy_server_conf *) ap_get_module_config(s->module_config, &proxy_module);
+ struct proxy_remote *new;
+ char *p, *q;
+ char *r, *f, *scheme;
+ regex_t *reg = NULL;
+ int port;
+
+ r = apr_pstrdup(cmd->pool, r1);
+ scheme = apr_pstrdup(cmd->pool, r1);
+ f = apr_pstrdup(cmd->pool, f1);
+ p = strchr(r, ':');
+ if (p == NULL || p[1] != '/' || p[2] != '/' || p[3] == '\0') {
+ if (regex)
+ return "ProxyRemoteMatch: Bad syntax for a remote proxy server";
+ else
+ return "ProxyRemote: Bad syntax for a remote proxy server";
+ }
+ else {
+ scheme[p-r] = 0;
+ }
+ q = strchr(p + 3, ':');
+ if (q != NULL) {
+ if (sscanf(q + 1, "%u", &port) != 1 || port > 65535) {
+ if (regex)
+ return "ProxyRemoteMatch: Bad syntax for a remote proxy server (bad port number)";
+ else
+ return "ProxyRemote: Bad syntax for a remote proxy server (bad port number)";
+ }
+ *q = '\0';
+ }
+ else
+ port = -1;
+ *p = '\0';
+ if (regex) {
+ reg = ap_pregcomp(cmd->pool, f, REG_EXTENDED);
+ if (!reg)
+ return "Regular expression for ProxyRemoteMatch could not be compiled.";
+ }
+ else
+ if (strchr(f, ':') == NULL)
+ ap_str_tolower(f); /* lowercase scheme */
+ ap_str_tolower(p + 3); /* lowercase hostname */
+
+ if (port == -1) {
+ port = apr_uri_port_of_scheme(scheme);
+ }
+
+ new = apr_array_push(conf->proxies);
+ new->scheme = f;
+ new->protocol = r;
+ new->hostname = p + 3;
+ new->port = port;
+ new->regexp = reg;
+ new->use_regex = regex;
+ return NULL;
+}
+
+static const char *
+ add_proxy_noregex(cmd_parms *cmd, void *dummy, const char *f1, const char *r1)
+{
+ return add_proxy(cmd, dummy, f1, r1, 0);
+}
+
+static const char *
+ add_proxy_regex(cmd_parms *cmd, void *dummy, const char *f1, const char *r1)
+{
+ return add_proxy(cmd, dummy, f1, r1, 1);
+}
+
+static const char *
+ add_pass(cmd_parms *cmd, void *dummy, const char *f, const char *r)
+{
+ server_rec *s = cmd->server;
+ proxy_server_conf *conf =
+ (proxy_server_conf *) ap_get_module_config(s->module_config, &proxy_module);
+ struct proxy_alias *new;
+ if (r!=NULL && cmd->path == NULL ) {
+ new = apr_array_push(conf->aliases);
+ new->fake = f;
+ new->real = r;
+ } else if (r==NULL && cmd->path != NULL) {
+ new = apr_array_push(conf->aliases);
+ new->fake = cmd->path;
+ new->real = f;
+ } else {
+ if ( r== NULL)
+ return "ProxyPass needs a path when not defined in a location";
+ else
+ return "ProxyPass can not have a path when defined in a location";
+ }
+
+ return NULL;
+}
+
+static const char *
+ add_pass_reverse(cmd_parms *cmd, void *dummy, const char *f, const char *r)
+{
+ server_rec *s = cmd->server;
+ proxy_server_conf *conf;
+ struct proxy_alias *new;
+
+ conf = (proxy_server_conf *)ap_get_module_config(s->module_config,
+ &proxy_module);
+ if (r!=NULL && cmd->path == NULL ) {
+ new = apr_array_push(conf->raliases);
+ new->fake = f;
+ new->real = r;
+ } else if (r==NULL && cmd->path != NULL) {
+ new = apr_array_push(conf->raliases);
+ new->fake = cmd->path;
+ new->real = f;
+ } else {
+ if ( r == NULL)
+ return "ProxyPassReverse needs a path when not defined in a location";
+ else
+ return "ProxyPassReverse can not have a path when defined in a location";
+ }
+
+ return NULL;
+}
+
+static const char *
+ set_proxy_exclude(cmd_parms *parms, void *dummy, const char *arg)
+{
+ server_rec *s = parms->server;
+ proxy_server_conf *conf =
+ ap_get_module_config(s->module_config, &proxy_module);
+ struct noproxy_entry *new;
+ struct noproxy_entry *list = (struct noproxy_entry *) conf->noproxies->elts;
+ struct apr_sockaddr_t *addr;
+ int found = 0;
+ int i;
+
+ /* Don't duplicate entries */
+ for (i = 0; i < conf->noproxies->nelts; i++) {
+ if (apr_strnatcasecmp(arg, list[i].name) == 0) { /* ignore case for host names */
+ found = 1;
+ }
+ }
+
+ if (!found) {
+ new = apr_array_push(conf->noproxies);
+ new->name = arg;
+ if (APR_SUCCESS == apr_sockaddr_info_get(&addr, new->name, APR_UNSPEC, 0, 0, parms->pool)) {
+ new->addr = addr;
+ }
+ else {
+ new->addr = NULL;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Set the ports CONNECT can use
+ */
+static const char *
+ set_allowed_ports(cmd_parms *parms, void *dummy, const char *arg)
+{
+ server_rec *s = parms->server;
+ proxy_server_conf *conf =
+ ap_get_module_config(s->module_config, &proxy_module);
+ int *New;
+
+ if (!apr_isdigit(arg[0]))
+ return "AllowCONNECT: port number must be numeric";
+
+ New = apr_array_push(conf->allowed_connect_ports);
+ *New = atoi(arg);
+ return NULL;
+}
+
+/* Similar to set_proxy_exclude(), but defining directly connected hosts,
+ * which should never be accessed via the configured ProxyRemote servers
+ */
+static const char *
+ set_proxy_dirconn(cmd_parms *parms, void *dummy, const char *arg)
+{
+ server_rec *s = parms->server;
+ proxy_server_conf *conf =
+ ap_get_module_config(s->module_config, &proxy_module);
+ struct dirconn_entry *New;
+ struct dirconn_entry *list = (struct dirconn_entry *) conf->dirconn->elts;
+ int found = 0;
+ int i;
+
+ /* Don't duplicate entries */
+ for (i = 0; i < conf->dirconn->nelts; i++) {
+ if (strcasecmp(arg, list[i].name) == 0)
+ found = 1;
+ }
+
+ if (!found) {
+ New = apr_array_push(conf->dirconn);
+ New->name = apr_pstrdup(parms->pool, arg);
+ New->hostaddr = NULL;
+
+ if (ap_proxy_is_ipaddr(New, parms->pool)) {
+#if DEBUGGING
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "Parsed addr %s", inet_ntoa(New->addr));
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "Parsed mask %s", inet_ntoa(New->mask));
+#endif
+ }
+ else if (ap_proxy_is_domainname(New, parms->pool)) {
+ ap_str_tolower(New->name);
+#if DEBUGGING
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "Parsed domain %s", New->name);
+#endif
+ }
+ else if (ap_proxy_is_hostname(New, parms->pool)) {
+ ap_str_tolower(New->name);
+#if DEBUGGING
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "Parsed host %s", New->name);
+#endif
+ }
+ else {
+ ap_proxy_is_word(New, parms->pool);
+#if DEBUGGING
+ fprintf(stderr, "Parsed word %s\n", New->name);
+#endif
+ }
+ }
+ return NULL;
+}
+
+static const char *
+ set_proxy_domain(cmd_parms *parms, void *dummy, const char *arg)
+{
+ proxy_server_conf *psf =
+ ap_get_module_config(parms->server->module_config, &proxy_module);
+
+ if (arg[0] != '.')
+ return "ProxyDomain: domain name must start with a dot.";
+
+ psf->domain = arg;
+ return NULL;
+}
+
+static const char *
+ set_proxy_req(cmd_parms *parms, void *dummy, int flag)
+{
+ proxy_server_conf *psf =
+ ap_get_module_config(parms->server->module_config, &proxy_module);
+
+ psf->req = flag;
+ psf->req_set = 1;
+ return NULL;
+}
+static const char *
+ set_proxy_error_override(cmd_parms *parms, void *dummy, int flag)
+{
+ proxy_server_conf *psf =
+ ap_get_module_config(parms->server->module_config, &proxy_module);
+
+ psf->error_override = flag;
+ psf->error_override_set = 1;
+ return NULL;
+}
+static const char *
+ set_preserve_host(cmd_parms *parms, void *dummy, int flag)
+{
+ proxy_server_conf *psf =
+ ap_get_module_config(parms->server->module_config, &proxy_module);
+
+ psf->preserve_host = flag;
+ psf->preserve_host_set = 1;
+ return NULL;
+}
+
+static const char *
+ set_recv_buffer_size(cmd_parms *parms, void *dummy, const char *arg)
+{
+ proxy_server_conf *psf =
+ ap_get_module_config(parms->server->module_config, &proxy_module);
+ int s = atoi(arg);
+ if (s < 512 && s != 0) {
+ return "ProxyReceiveBufferSize must be >= 512 bytes, or 0 for system default.";
+ }
+
+ psf->recv_buffer_size = s;
+ psf->recv_buffer_size_set = 1;
+ return NULL;
+}
+
+static const char *
+ set_io_buffer_size(cmd_parms *parms, void *dummy, const char *arg)
+{
+ proxy_server_conf *psf =
+ ap_get_module_config(parms->server->module_config, &proxy_module);
+ long s = atol(arg);
+
+ psf->io_buffer_size = ((s > AP_IOBUFSIZE) ? s : AP_IOBUFSIZE);
+ psf->io_buffer_size_set = 1;
+ return NULL;
+}
+
+static const char *
+ set_max_forwards(cmd_parms *parms, void *dummy, const char *arg)
+{
+ proxy_server_conf *psf =
+ ap_get_module_config(parms->server->module_config, &proxy_module);
+ long s = atol(arg);
+ if (s < 0) {
+ return "ProxyMaxForwards must be greater or equal to zero..";
+ }
+
+ psf->maxfwd = s;
+ psf->maxfwd_set = 1;
+ return NULL;
+}
+static const char*
+ set_proxy_timeout(cmd_parms *parms, void *dummy, const char *arg)
+{
+ proxy_server_conf *psf =
+ ap_get_module_config(parms->server->module_config, &proxy_module);
+ int timeout;
+
+ timeout=atoi(arg);
+ if (timeout<1) {
+ return "Proxy Timeout must be at least 1 second.";
+ }
+ psf->timeout_set=1;
+ psf->timeout=apr_time_from_sec(timeout);
+
+ return NULL;
+}
+
+static const char*
+ set_via_opt(cmd_parms *parms, void *dummy, const char *arg)
+{
+ proxy_server_conf *psf =
+ ap_get_module_config(parms->server->module_config, &proxy_module);
+
+ if (strcasecmp(arg, "Off") == 0)
+ psf->viaopt = via_off;
+ else if (strcasecmp(arg, "On") == 0)
+ psf->viaopt = via_on;
+ else if (strcasecmp(arg, "Block") == 0)
+ psf->viaopt = via_block;
+ else if (strcasecmp(arg, "Full") == 0)
+ psf->viaopt = via_full;
+ else {
+ return "ProxyVia must be one of: "
+ "off | on | full | block";
+ }
+
+ psf->viaopt_set = 1;
+ return NULL;
+}
+
+static const char*
+ set_bad_opt(cmd_parms *parms, void *dummy, const char *arg)
+{
+ proxy_server_conf *psf =
+ ap_get_module_config(parms->server->module_config, &proxy_module);
+
+ if (strcasecmp(arg, "IsError") == 0)
+ psf->badopt = bad_error;
+ else if (strcasecmp(arg, "Ignore") == 0)
+ psf->badopt = bad_ignore;
+ else if (strcasecmp(arg, "StartBody") == 0)
+ psf->badopt = bad_body;
+ else {
+ return "ProxyBadHeader must be one of: "
+ "IsError | Ignore | StartBody";
+ }
+
+ psf->badopt_set = 1;
+ return NULL;
+}
+
+static const char* set_ftp_directory_charset(cmd_parms *cmd, void *dconf,
+ const char *arg)
+{
+ proxy_dir_conf *conf = dconf;
+
+ conf->ftp_directory_charset = arg;
+ return NULL;
+}
+
+static void ap_add_per_proxy_conf(server_rec *s, ap_conf_vector_t *dir_config)
+{
+ proxy_server_conf *sconf = ap_get_module_config(s->module_config,
+ &proxy_module);
+ void **new_space = (void **)apr_array_push(sconf->sec_proxy);
+
+ *new_space = dir_config;
+}
+
+static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg)
+{
+ const char *errmsg;
+ const char *endp = ap_strrchr_c(arg, '>');
+ int old_overrides = cmd->override;
+ char *old_path = cmd->path;
+ proxy_dir_conf *conf;
+ ap_conf_vector_t *new_dir_conf = ap_create_per_dir_config(cmd->pool);
+ regex_t *r = NULL;
+ const command_rec *thiscmd = cmd->cmd;
+
+ const char *err = ap_check_cmd_context(cmd,
+ NOT_IN_DIR_LOC_FILE|NOT_IN_LIMIT);
+ if (err != NULL) {
+ return err;
+ }
+
+ if (endp == NULL) {
+ return apr_pstrcat(cmd->pool, cmd->cmd->name,
+ "> directive missing closing '>'", NULL);
+ }
+
+ arg=apr_pstrndup(cmd->pool, arg, endp-arg);
+
+ if (!arg) {
+ if (thiscmd->cmd_data)
+ return "<ProxyMatch > block must specify a path";
+ else
+ return "<Proxy > block must specify a path";
+ }
+
+ cmd->path = ap_getword_conf(cmd->pool, &arg);
+ cmd->override = OR_ALL|ACCESS_CONF;
+
+ if (!strncasecmp(cmd->path, "proxy:", 6))
+ cmd->path += 6;
+
+ /* XXX Ignore case? What if we proxy a case-insensitive server?!?
+ * While we are at it, shouldn't we also canonicalize the entire
+ * scheme? See proxy_fixup()
+ */
+ if (thiscmd->cmd_data) { /* <ProxyMatch> */
+ r = ap_pregcomp(cmd->pool, cmd->path, REG_EXTENDED);
+ if (!r) {
+ return "Regex could not be compiled";
+ }
+ }
+ else if (!strcmp(cmd->path, "~")) {
+ cmd->path = ap_getword_conf(cmd->pool, &arg);
+ if (!cmd->path)
+ return "<Proxy ~ > block must specify a path";
+ if (strncasecmp(cmd->path, "proxy:", 6))
+ cmd->path += 6;
+ r = ap_pregcomp(cmd->pool, cmd->path, REG_EXTENDED);
+ if (!r) {
+ return "Regex could not be compiled";
+ }
+ }
+
+ /* initialize our config and fetch it */
+ conf = ap_set_config_vectors(cmd->server, new_dir_conf, cmd->path,
+ &proxy_module, cmd->pool);
+
+ errmsg = ap_walk_config(cmd->directive->first_child, cmd, new_dir_conf);
+ if (errmsg != NULL)
+ return errmsg;
+
+ conf->r = r;
+ conf->p = cmd->path;
+ conf->p_is_fnmatch = apr_fnmatch_test(conf->p);
+
+ ap_add_per_proxy_conf(cmd->server, new_dir_conf);
+
+ if (*arg != '\0') {
+ return apr_pstrcat(cmd->pool, "Multiple ", thiscmd->name,
+ "> arguments not (yet) supported.", NULL);
+ }
+
+ cmd->path = old_path;
+ cmd->override = old_overrides;
+
+ return NULL;
+}
+
+static const command_rec proxy_cmds[] =
+{
+ AP_INIT_RAW_ARGS("<Proxy", proxysection, NULL, RSRC_CONF,
+ "Container for directives affecting resources located in the proxied "
+ "location"),
+ AP_INIT_RAW_ARGS("<ProxyMatch", proxysection, (void*)1, RSRC_CONF,
+ "Container for directives affecting resources located in the proxied "
+ "location, in regular expression syntax"),
+ AP_INIT_FLAG("ProxyRequests", set_proxy_req, NULL, RSRC_CONF,
+ "on if the true proxy requests should be accepted"),
+ AP_INIT_TAKE2("ProxyRemote", add_proxy_noregex, NULL, RSRC_CONF,
+ "a scheme, partial URL or '*' and a proxy server"),
+ AP_INIT_TAKE2("ProxyRemoteMatch", add_proxy_regex, NULL, RSRC_CONF,
+ "a regex pattern and a proxy server"),
+ AP_INIT_TAKE12("ProxyPass", add_pass, NULL, RSRC_CONF|ACCESS_CONF,
+ "a virtual path and a URL"),
+ AP_INIT_TAKE12("ProxyPassReverse", add_pass_reverse, NULL, RSRC_CONF|ACCESS_CONF,
+ "a virtual path and a URL for reverse proxy behaviour"),
+ AP_INIT_ITERATE("ProxyBlock", set_proxy_exclude, NULL, RSRC_CONF,
+ "A list of names, hosts or domains to which the proxy will not connect"),
+ AP_INIT_TAKE1("ProxyReceiveBufferSize", set_recv_buffer_size, NULL, RSRC_CONF,
+ "Receive buffer size for outgoing HTTP and FTP connections in bytes"),
+ AP_INIT_TAKE1("ProxyIOBufferSize", set_io_buffer_size, NULL, RSRC_CONF,
+ "IO buffer size for outgoing HTTP and FTP connections in bytes"),
+ AP_INIT_TAKE1("ProxyMaxForwards", set_max_forwards, NULL, RSRC_CONF,
+ "The maximum number of proxies a request may be forwarded through."),
+ AP_INIT_ITERATE("NoProxy", set_proxy_dirconn, NULL, RSRC_CONF,
+ "A list of domains, hosts, or subnets to which the proxy will connect directly"),
+ AP_INIT_TAKE1("ProxyDomain", set_proxy_domain, NULL, RSRC_CONF,
+ "The default intranet domain name (in absence of a domain in the URL)"),
+ AP_INIT_ITERATE("AllowCONNECT", set_allowed_ports, NULL, RSRC_CONF,
+ "A list of ports which CONNECT may connect to"),
+ AP_INIT_TAKE1("ProxyVia", set_via_opt, NULL, RSRC_CONF,
+ "Configure Via: proxy header header to one of: on | off | block | full"),
+ AP_INIT_FLAG("ProxyErrorOverride", set_proxy_error_override, NULL, RSRC_CONF,
+ "use our error handling pages instead of the servers' we are proxying"),
+ AP_INIT_FLAG("ProxyPreserveHost", set_preserve_host, NULL, RSRC_CONF,
+ "on if we should preserve host header while proxying"),
+ AP_INIT_TAKE1("ProxyTimeout", set_proxy_timeout, NULL, RSRC_CONF,
+ "Set the timeout (in seconds) for a proxied connection. "
+ "This overrides the server timeout"),
+ AP_INIT_TAKE1("ProxyBadHeader", set_bad_opt, NULL, RSRC_CONF,
+ "How to handle bad header line in response: IsError | Ignore | StartBody"),
+ AP_INIT_TAKE1("ProxyFtpDirCharset", set_ftp_directory_charset, NULL,
+ RSRC_CONF|ACCESS_CONF, "Define the character set for proxied FTP listings"),
+ {NULL}
+};
+
+APR_DECLARE_OPTIONAL_FN(int, ssl_proxy_enable, (conn_rec *));
+APR_DECLARE_OPTIONAL_FN(int, ssl_engine_disable, (conn_rec *));
+
+static APR_OPTIONAL_FN_TYPE(ssl_proxy_enable) *proxy_ssl_enable = NULL;
+static APR_OPTIONAL_FN_TYPE(ssl_engine_disable) *proxy_ssl_disable = NULL;
+
+PROXY_DECLARE(int) ap_proxy_ssl_enable(conn_rec *c)
+{
+ /*
+ * if c == NULL just check if the optional function was imported
+ * else run the optional function so ssl filters are inserted
+ */
+ if (proxy_ssl_enable) {
+ return c ? proxy_ssl_enable(c) : 1;
+ }
+
+ return 0;
+}
+
+PROXY_DECLARE(int) ap_proxy_ssl_disable(conn_rec *c)
+{
+ if (proxy_ssl_disable) {
+ return proxy_ssl_disable(c);
+ }
+
+ return 0;
+}
+
+static int proxy_post_config(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ proxy_ssl_enable = APR_RETRIEVE_OPTIONAL_FN(ssl_proxy_enable);
+ proxy_ssl_disable = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_disable);
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ /* fixup before mod_rewrite, so that the proxied url will not
+ * escaped accidentally by our fixup.
+ */
+ static const char * const aszSucc[]={ "mod_rewrite.c", NULL };
+
+ /* handler */
+ ap_hook_handler(proxy_handler, NULL, NULL, APR_HOOK_FIRST);
+ /* filename-to-URI translation */
+ ap_hook_translate_name(proxy_trans, NULL, NULL, APR_HOOK_FIRST);
+ /* walk <Proxy > entries and suppress default TRACE behavior */
+ ap_hook_map_to_storage(proxy_map_location, NULL,NULL, APR_HOOK_FIRST);
+ /* fixups */
+ ap_hook_fixups(proxy_fixup, NULL, aszSucc, APR_HOOK_FIRST);
+ /* post read_request handling */
+ ap_hook_post_read_request(proxy_detect, NULL, NULL, APR_HOOK_FIRST);
+ /* post config handling */
+ ap_hook_post_config(proxy_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA proxy_module =
+{
+ STANDARD20_MODULE_STUFF,
+ create_proxy_dir_config, /* create per-directory config structure */
+ merge_proxy_dir_config, /* merge per-directory config structures */
+ create_proxy_config, /* create per-server config structure */
+ merge_proxy_config, /* merge per-server config structures */
+ proxy_cmds, /* command table */
+ register_hooks
+};
+
+APR_HOOK_STRUCT(
+ APR_HOOK_LINK(scheme_handler)
+ APR_HOOK_LINK(canon_handler)
+)
+
+APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(proxy, PROXY, int, scheme_handler,
+ (request_rec *r, proxy_server_conf *conf,
+ char *url, const char *proxyhost,
+ apr_port_t proxyport),(r,conf,url,
+ proxyhost,proxyport),DECLINED)
+APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(proxy, PROXY, int, canon_handler,
+ (request_rec *r, char *url),(r,
+ url),DECLINED)
+APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(proxy, PROXY, int, fixups,
+ (request_rec *r), (r),
+ OK, DECLINED)
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy.dsp b/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy.dsp
new file mode 100644
index 00000000..9fa9feb0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy.dsp
@@ -0,0 +1,140 @@
+# Microsoft Developer Studio Project File - Name="mod_proxy" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_proxy - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy.mak" CFG="mod_proxy - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_proxy - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "PROXY_DECLARE_EXPORT" /Fd"Release\mod_proxy_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x809 /d "NDEBUG"
+# ADD RSC /l 0x809 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /out:"Release/mod_proxy.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_proxy.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_proxy - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "PROXY_DECLARE_EXPORT" /Fd"Debug\mod_proxy_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x809 /d "_DEBUG"
+# ADD RSC /l 0x809 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_proxy.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_proxy.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_proxy - Win32 Release"
+# Name "mod_proxy - Win32 Debug"
+# Begin Group "Source Files"
+
+# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90"
+# Begin Source File
+
+SOURCE=.\mod_proxy.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\proxy_util.c
+# End Source File
+# End Group
+# Begin Group "Header Files"
+
+# PROP Default_Filter "h;hpp;hxx;hm;inl;fi;fd"
+# Begin Source File
+
+SOURCE=.\mod_proxy.h
+# End Source File
+# End Group
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_proxy - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_proxy.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_proxy.so "proxy_module for Apache" ../../include/ap_release.h > .\mod_proxy.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_proxy - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_proxy.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_proxy.so "proxy_module for Apache" ../../include/ap_release.h > .\mod_proxy.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy.h b/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy.h
new file mode 100644
index 00000000..d1ed7d46
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy.h
@@ -0,0 +1,255 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MOD_PROXY_H
+#define MOD_PROXY_H
+
+/*
+ * Main include file for the Apache proxy
+ */
+
+/*
+
+ Also note numerous FIXMEs and CHECKMEs which should be eliminated.
+
+ This code is once again experimental!
+
+ Things to do:
+
+ 1. Make it completely work (for FTP too)
+
+ 2. HTTP/1.1
+
+ Chuck Murcko <chuck@topsail.org> 02-06-01
+
+ */
+
+#define CORE_PRIVATE
+
+#include "apr_hooks.h"
+#include "apr.h"
+#include "apr_lib.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_md5.h"
+#include "apr_network_io.h"
+#include "apr_pools.h"
+#include "apr_strings.h"
+#include "apr_uri.h"
+#include "apr_date.h"
+#include "apr_fnmatch.h"
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "ap_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_main.h"
+#include "http_log.h"
+#include "http_connection.h"
+#include "util_filter.h"
+#include "util_ebcdic.h"
+
+#if APR_HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+#if APR_HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+
+/* for proxy_canonenc() */
+enum enctype {
+ enc_path, enc_search, enc_user, enc_fpath, enc_parm
+};
+
+#if APR_CHARSET_EBCDIC
+#define CRLF "\r\n"
+#else /*APR_CHARSET_EBCDIC*/
+#define CRLF "\015\012"
+#endif /*APR_CHARSET_EBCDIC*/
+
+/* default Max-Forwards header setting */
+#define DEFAULT_MAX_FORWARDS 10
+
+/* static information about a remote proxy */
+struct proxy_remote {
+ const char *scheme; /* the schemes handled by this proxy, or '*' */
+ const char *protocol; /* the scheme used to talk to this proxy */
+ const char *hostname; /* the hostname of this proxy */
+ apr_port_t port; /* the port for this proxy */
+ regex_t *regexp; /* compiled regex (if any) for the remote */
+ int use_regex; /* simple boolean. True if we have a regex pattern */
+};
+
+struct proxy_alias {
+ const char *real;
+ const char *fake;
+};
+
+struct dirconn_entry {
+ char *name;
+ struct in_addr addr, mask;
+ struct apr_sockaddr_t *hostaddr;
+ int (*matcher) (struct dirconn_entry * This, request_rec *r);
+};
+
+struct noproxy_entry {
+ const char *name;
+ struct apr_sockaddr_t *addr;
+};
+
+typedef struct {
+ apr_array_header_t *proxies;
+ apr_array_header_t *sec_proxy;
+ apr_array_header_t *aliases;
+ apr_array_header_t *raliases;
+ apr_array_header_t *noproxies;
+ apr_array_header_t *dirconn;
+ apr_array_header_t *allowed_connect_ports;
+ const char *domain; /* domain name to use in absence of a domain name in the request */
+ int req; /* true if proxy requests are enabled */
+ char req_set;
+ enum {
+ via_off,
+ via_on,
+ via_block,
+ via_full
+ } viaopt; /* how to deal with proxy Via: headers */
+ char viaopt_set;
+ apr_size_t recv_buffer_size;
+ char recv_buffer_size_set;
+ apr_size_t io_buffer_size;
+ char io_buffer_size_set;
+ long maxfwd;
+ char maxfwd_set;
+ /**
+ * the following setting masks the error page
+ * returned from the 'proxied server' and just
+ * forwards the status code upwards.
+ * This allows the main server (us) to generate
+ * the error page, (so it will look like a error
+ * returned from the rest of the system
+ */
+ int error_override;
+ int error_override_set;
+ int preserve_host;
+ int preserve_host_set;
+ apr_interval_time_t timeout;
+ apr_interval_time_t timeout_set;
+ enum {
+ bad_error,
+ bad_ignore,
+ bad_body
+ } badopt; /* how to deal with bad headers */
+ char badopt_set;
+
+} proxy_server_conf;
+
+typedef struct {
+ const char *p; /* The path */
+ int p_is_fnmatch; /* Is this path an fnmatch candidate? */
+ regex_t *r; /* Is this a regex? */
+ const char *ftp_directory_charset;
+} proxy_dir_conf;
+
+typedef struct {
+ conn_rec *connection;
+ char *hostname;
+ apr_port_t port;
+ int is_ssl;
+} proxy_conn_rec;
+
+typedef struct {
+ float cache_completion; /* completion percentage */
+ int content_length; /* length of the content */
+} proxy_completion;
+
+
+/* hooks */
+
+/* Create a set of PROXY_DECLARE(type), PROXY_DECLARE_NONSTD(type) and
+ * PROXY_DECLARE_DATA with appropriate export and import tags for the platform
+ */
+#if !defined(WIN32)
+#define PROXY_DECLARE(type) type
+#define PROXY_DECLARE_NONSTD(type) type
+#define PROXY_DECLARE_DATA
+#elif defined(PROXY_DECLARE_STATIC)
+#define PROXY_DECLARE(type) type __stdcall
+#define PROXY_DECLARE_NONSTD(type) type
+#define PROXY_DECLARE_DATA
+#elif defined(PROXY_DECLARE_EXPORT)
+#define PROXY_DECLARE(type) __declspec(dllexport) type __stdcall
+#define PROXY_DECLARE_NONSTD(type) __declspec(dllexport) type
+#define PROXY_DECLARE_DATA __declspec(dllexport)
+#else
+#define PROXY_DECLARE(type) __declspec(dllimport) type __stdcall
+#define PROXY_DECLARE_NONSTD(type) __declspec(dllimport) type
+#define PROXY_DECLARE_DATA __declspec(dllimport)
+#endif
+
+/**
+ * Hook an optional proxy hook. Unlike static hooks, this uses a macro
+ * instead of a function.
+ */
+#define PROXY_OPTIONAL_HOOK(name,fn,pre,succ,order) \
+ APR_OPTIONAL_HOOK(proxy,name,fn,pre,succ,order)
+
+APR_DECLARE_EXTERNAL_HOOK(proxy, PROXY, int, scheme_handler, (request_rec *r,
+ proxy_server_conf *conf, char *url,
+ const char *proxyhost, apr_port_t proxyport))
+APR_DECLARE_EXTERNAL_HOOK(proxy, PROXY, int, canon_handler, (request_rec *r,
+ char *url))
+
+APR_DECLARE_EXTERNAL_HOOK(proxy, PROXY, int, create_req, (request_rec *r, request_rec *pr))
+APR_DECLARE_EXTERNAL_HOOK(proxy, PROXY, int, fixups, (request_rec *r))
+
+/* proxy_util.c */
+
+PROXY_DECLARE(request_rec *)ap_proxy_make_fake_req(conn_rec *c, request_rec *r);
+PROXY_DECLARE(int) ap_proxy_hex2c(const char *x);
+PROXY_DECLARE(void) ap_proxy_c2hex(int ch, char *x);
+PROXY_DECLARE(char *)ap_proxy_canonenc(apr_pool_t *p, const char *x, int len, enum enctype t,
+ int isenc);
+PROXY_DECLARE(char *)ap_proxy_canon_netloc(apr_pool_t *p, char **const urlp, char **userp,
+ char **passwordp, char **hostp, apr_port_t *port);
+PROXY_DECLARE(const char *)ap_proxy_date_canon(apr_pool_t *p, const char *x);
+PROXY_DECLARE(apr_table_t *)ap_proxy_read_headers(request_rec *r, request_rec *rp, char *buffer, int size, conn_rec *c);
+PROXY_DECLARE(int) ap_proxy_liststr(const char *list, const char *val);
+PROXY_DECLARE(char *)ap_proxy_removestr(apr_pool_t *pool, const char *list, const char *val);
+PROXY_DECLARE(int) ap_proxy_hex2sec(const char *x);
+PROXY_DECLARE(void) ap_proxy_sec2hex(int t, char *y);
+PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message);
+PROXY_DECLARE(int) ap_proxy_is_ipaddr(struct dirconn_entry *This, apr_pool_t *p);
+PROXY_DECLARE(int) ap_proxy_is_domainname(struct dirconn_entry *This, apr_pool_t *p);
+PROXY_DECLARE(int) ap_proxy_is_hostname(struct dirconn_entry *This, apr_pool_t *p);
+PROXY_DECLARE(int) ap_proxy_is_word(struct dirconn_entry *This, apr_pool_t *p);
+PROXY_DECLARE(int) ap_proxy_checkproxyblock(request_rec *r, proxy_server_conf *conf, apr_sockaddr_t *uri_addr);
+PROXY_DECLARE(int) ap_proxy_pre_http_request(conn_rec *c, request_rec *r);
+PROXY_DECLARE(apr_status_t) ap_proxy_string_read(conn_rec *c, apr_bucket_brigade *bb, char *buff, size_t bufflen, int *eos);
+PROXY_DECLARE(void) ap_proxy_table_unmerge(apr_pool_t *p, apr_table_t *t, char *key);
+PROXY_DECLARE(int) ap_proxy_connect_to_backend(apr_socket_t **, const char *, apr_sockaddr_t *, const char *, proxy_server_conf *, server_rec *, apr_pool_t *);
+PROXY_DECLARE(int) ap_proxy_ssl_enable(conn_rec *c);
+PROXY_DECLARE(int) ap_proxy_ssl_disable(conn_rec *c);
+
+/* For proxy_util */
+extern module AP_MODULE_DECLARE_DATA proxy_module;
+
+#endif /*MOD_PROXY_H*/
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy_connect.dsp b/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy_connect.dsp
new file mode 100644
index 00000000..0c2a12b0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy_connect.dsp
@@ -0,0 +1,136 @@
+# Microsoft Developer Studio Project File - Name="mod_proxy_connect" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_proxy_connect - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_connect.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_connect.mak" CFG="mod_proxy_connect - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy_connect - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy_connect - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_proxy_connect - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_proxy_connect_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x809 /d "NDEBUG"
+# ADD RSC /l 0x809 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /out:"Release/mod_proxy_connect.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_connect.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_proxy_connect.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_connect.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_proxy_connect - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_proxy_connect_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x809 /d "_DEBUG"
+# ADD RSC /l 0x809 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_proxy_connect.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_connect.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_proxy_connect.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_connect.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_proxy_connect - Win32 Release"
+# Name "mod_proxy_connect - Win32 Debug"
+# Begin Group "Source Files"
+
+# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90"
+# Begin Source File
+
+SOURCE=.\proxy_connect.c
+# End Source File
+# End Group
+# Begin Group "Header Files"
+
+# PROP Default_Filter ".h"
+# Begin Source File
+
+SOURCE=.\mod_proxy.h
+# End Source File
+# End Group
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_proxy_connect - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_proxy_connect.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_proxy_connect.so "proxy_connect_module for Apache" ../../include/ap_release.h > .\mod_proxy_connect.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_proxy_connect - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_proxy_connect.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_proxy_connect.so "proxy_connect_module for Apache" ../../include/ap_release.h > .\mod_proxy_connect.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy_ftp.dsp b/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy_ftp.dsp
new file mode 100644
index 00000000..3dfe0b7e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy_ftp.dsp
@@ -0,0 +1,136 @@
+# Microsoft Developer Studio Project File - Name="mod_proxy_ftp" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_proxy_ftp - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_ftp.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_ftp.mak" CFG="mod_proxy_ftp - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy_ftp - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy_ftp - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_proxy_ftp - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_proxy_ftp_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x809 /d "NDEBUG"
+# ADD RSC /l 0x809 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /out:"Release/mod_proxy_ftp.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_ftp.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_proxy_ftp.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_ftp.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_proxy_ftp - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_proxy_ftp_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x809 /d "_DEBUG"
+# ADD RSC /l 0x809 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_proxy_ftp.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_ftp.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_proxy_ftp.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_ftp.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_proxy_ftp - Win32 Release"
+# Name "mod_proxy_ftp - Win32 Debug"
+# Begin Group "Source Files"
+
+# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90"
+# Begin Source File
+
+SOURCE=.\proxy_ftp.c
+# End Source File
+# End Group
+# Begin Group "Header Files"
+
+# PROP Default_Filter ".h"
+# Begin Source File
+
+SOURCE=.\mod_proxy.h
+# End Source File
+# End Group
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_proxy_ftp - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_proxy_ftp.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_proxy_ftp.so "proxy_ftp_module for Apache" ../../include/ap_release.h > .\mod_proxy_ftp.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_proxy_ftp - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_proxy_ftp.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_proxy_ftp.so "proxy_ftp_module for Apache" ../../include/ap_release.h > .\mod_proxy_ftp.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy_http.dsp b/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy_http.dsp
new file mode 100644
index 00000000..d8f29006
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/mod_proxy_http.dsp
@@ -0,0 +1,136 @@
+# Microsoft Developer Studio Project File - Name="mod_proxy_http" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_proxy_http - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http.mak" CFG="mod_proxy_http - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy_http - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy_http - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_proxy_http - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_proxy_http_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x809 /d "NDEBUG"
+# ADD RSC /l 0x809 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /out:"Release/mod_proxy_http.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_proxy_http.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_proxy_http - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_proxy_http_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x809 /d "_DEBUG"
+# ADD RSC /l 0x809 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_proxy_http.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_proxy_http.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_proxy_http - Win32 Release"
+# Name "mod_proxy_http - Win32 Debug"
+# Begin Group "Source Files"
+
+# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90"
+# Begin Source File
+
+SOURCE=.\proxy_http.c
+# End Source File
+# End Group
+# Begin Group "Header Files"
+
+# PROP Default_Filter ".h"
+# Begin Source File
+
+SOURCE=.\mod_proxy.h
+# End Source File
+# End Group
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_proxy_http - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_proxy_http.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_proxy_http.so "proxy_http_module for Apache" ../../include/ap_release.h > .\mod_proxy_http.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_proxy_http - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_proxy_http.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_proxy_http.so "proxy_http_module for Apache" ../../include/ap_release.h > .\mod_proxy_http.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/modules.mk b/rubbos/app/httpd-2.0.64/modules/proxy/modules.mk
new file mode 100644
index 00000000..ceb52a1b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/modules.mk
@@ -0,0 +1,3 @@
+DISTCLEAN_TARGETS = modules.mk
+static =
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/proxy_connect.c b/rubbos/app/httpd-2.0.64/modules/proxy/proxy_connect.c
new file mode 100644
index 00000000..20e40ebb
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/proxy_connect.c
@@ -0,0 +1,377 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CONNECT method for Apache proxy */
+
+#define CORE_PRIVATE
+
+#include "mod_proxy.h"
+#include "apr_poll.h"
+
+module AP_MODULE_DECLARE_DATA proxy_connect_module;
+
+int ap_proxy_connect_canon(request_rec *r, char *url);
+int ap_proxy_connect_handler(request_rec *r, proxy_server_conf *conf,
+ char *url, const char *proxyname,
+ apr_port_t proxyport);
+
+/*
+ * This handles Netscape CONNECT method secure proxy requests.
+ * A connection is opened to the specified host and data is
+ * passed through between the WWW site and the browser.
+ *
+ * This code is based on the INTERNET-DRAFT document
+ * "Tunneling SSL Through a WWW Proxy" currently at
+ * http://www.mcom.com/newsref/std/tunneling_ssl.html.
+ *
+ * If proxyhost and proxyport are set, we send a CONNECT to
+ * the specified proxy..
+ *
+ * FIXME: this doesn't log the number of bytes sent, but
+ * that may be okay, since the data is supposed to
+ * be transparent. In fact, this doesn't log at all
+ * yet. 8^)
+ * FIXME: doesn't check any headers initally sent from the
+ * client.
+ * FIXME: should allow authentication, but hopefully the
+ * generic proxy authentication is good enough.
+ * FIXME: no check for r->assbackwards, whatever that is.
+ */
+
+static int
+allowed_port(proxy_server_conf *conf, int port)
+{
+ int i;
+ int *list = (int *) conf->allowed_connect_ports->elts;
+
+ for(i = 0; i < conf->allowed_connect_ports->nelts; i++) {
+ if(port == list[i])
+ return 1;
+ }
+ return 0;
+}
+
+/* canonicalise CONNECT URLs. */
+int ap_proxy_connect_canon(request_rec *r, char *url)
+{
+
+ if (r->method_number != M_CONNECT) {
+ return DECLINED;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CONNECT: canonicalising URL %s", url);
+
+ return OK;
+}
+
+/* CONNECT handler */
+int ap_proxy_connect_handler(request_rec *r, proxy_server_conf *conf,
+ char *url, const char *proxyname,
+ apr_port_t proxyport)
+{
+ apr_pool_t *p = r->pool;
+ apr_socket_t *sock;
+ apr_status_t err, rv;
+ apr_size_t i, o, nbytes;
+ char buffer[HUGE_STRING_LEN];
+ apr_socket_t *client_socket = ap_get_module_config(r->connection->conn_config, &core_module);
+ int failed;
+ apr_pollfd_t *pollfd;
+ apr_int32_t pollcnt;
+ apr_int16_t pollevent;
+ apr_sockaddr_t *uri_addr, *connect_addr;
+
+ apr_uri_t uri;
+ const char *connectname;
+ int connectport = 0;
+
+ /* is this for us? */
+ if (r->method_number != M_CONNECT) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CONNECT: declining URL %s", url);
+ return DECLINED;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CONNECT: serving URL %s", url);
+
+
+ /*
+ * Step One: Determine Who To Connect To
+ *
+ * Break up the URL to determine the host to connect to
+ */
+
+ /* we break the URL into host, port, uri */
+ if (APR_SUCCESS != apr_uri_parse_hostinfo(p, url, &uri)) {
+ return ap_proxyerror(r, HTTP_BAD_REQUEST,
+ apr_pstrcat(p, "URI cannot be parsed: ", url, NULL));
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CONNECT: connecting %s to %s:%d", url, uri.hostname, uri.port);
+
+ /* do a DNS lookup for the destination host */
+ err = apr_sockaddr_info_get(&uri_addr, uri.hostname, APR_UNSPEC, uri.port, 0, p);
+
+ /* are we connecting directly, or via a proxy? */
+ if (proxyname) {
+ connectname = proxyname;
+ connectport = proxyport;
+ err = apr_sockaddr_info_get(&connect_addr, proxyname, APR_UNSPEC, proxyport, 0, p);
+ }
+ else {
+ connectname = uri.hostname;
+ connectport = uri.port;
+ connect_addr = uri_addr;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CONNECT: connecting to remote proxy %s on port %d", connectname, connectport);
+
+ /* check if ProxyBlock directive on this host */
+ if (OK != ap_proxy_checkproxyblock(r, conf, uri_addr)) {
+ return ap_proxyerror(r, HTTP_FORBIDDEN,
+ "Connect to remote machine blocked");
+ }
+
+ /* Check if it is an allowed port */
+ if (conf->allowed_connect_ports->nelts == 0) {
+ /* Default setting if not overridden by AllowCONNECT */
+ switch (uri.port) {
+ case APR_URI_HTTPS_DEFAULT_PORT:
+ case APR_URI_SNEWS_DEFAULT_PORT:
+ break;
+ default:
+ /* XXX can we call ap_proxyerror() here to get a nice log message? */
+ return HTTP_FORBIDDEN;
+ }
+ } else if(!allowed_port(conf, uri.port)) {
+ /* XXX can we call ap_proxyerror() here to get a nice log message? */
+ return HTTP_FORBIDDEN;
+ }
+
+ /*
+ * Step Two: Make the Connection
+ *
+ * We have determined who to connect to. Now make the connection.
+ */
+
+ /* get all the possible IP addresses for the destname and loop through them
+ * until we get a successful connection
+ */
+ if (APR_SUCCESS != err) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, apr_pstrcat(p,
+ "DNS lookup failure for: ",
+ connectname, NULL));
+ }
+
+ /*
+ * At this point we have a list of one or more IP addresses of
+ * the machine to connect to. If configured, reorder this
+ * list so that the "best candidate" is first try. "best
+ * candidate" could mean the least loaded server, the fastest
+ * responding server, whatever.
+ *
+ * For now we do nothing, ie we get DNS round robin.
+ * XXX FIXME
+ */
+ failed = ap_proxy_connect_to_backend(&sock, "CONNECT", connect_addr,
+ connectname, conf, r->server,
+ r->pool);
+
+ /* handle a permanent error from the above loop */
+ if (failed) {
+ if (proxyname) {
+ return DECLINED;
+ }
+ else {
+ return HTTP_BAD_GATEWAY;
+ }
+ }
+
+ /*
+ * Step Three: Send the Request
+ *
+ * Send the HTTP/1.1 CONNECT request to the remote server
+ */
+
+ /* we are acting as a tunnel - the output filter stack should
+ * be completely empty, because when we are done here we are done completely.
+ * We add the NULL filter to the stack to do this...
+ */
+ r->output_filters = NULL;
+ r->connection->output_filters = NULL;
+
+
+ /* If we are connecting through a remote proxy, we need to pass
+ * the CONNECT request on to it.
+ */
+ if (proxyport) {
+ /* FIXME: Error checking ignored.
+ */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CONNECT: sending the CONNECT request to the remote proxy");
+ nbytes = apr_snprintf(buffer, sizeof(buffer),
+ "CONNECT %s HTTP/1.0" CRLF, r->uri);
+ apr_send(sock, buffer, &nbytes);
+ nbytes = apr_snprintf(buffer, sizeof(buffer),
+ "Proxy-agent: %s" CRLF CRLF, ap_get_server_version());
+ apr_send(sock, buffer, &nbytes);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CONNECT: Returning 200 OK Status");
+ nbytes = apr_snprintf(buffer, sizeof(buffer),
+ "HTTP/1.0 200 Connection Established" CRLF);
+ ap_xlate_proto_to_ascii(buffer, nbytes);
+ apr_send(client_socket, buffer, &nbytes);
+ nbytes = apr_snprintf(buffer, sizeof(buffer),
+ "Proxy-agent: %s" CRLF CRLF, ap_get_server_version());
+ ap_xlate_proto_to_ascii(buffer, nbytes);
+ apr_send(client_socket, buffer, &nbytes);
+#if 0
+ /* This is safer code, but it doesn't work yet. I'm leaving it
+ * here so that I can fix it later.
+ */
+ r->status = HTTP_OK;
+ r->header_only = 1;
+ apr_table_set(r->headers_out, "Proxy-agent: %s", ap_get_server_version());
+ ap_rflush(r);
+#endif
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CONNECT: setting up poll()");
+
+ /*
+ * Step Four: Handle Data Transfer
+ *
+ * Handle two way transfer of data over the socket (this is a tunnel).
+ */
+
+/* r->sent_bodyct = 1;*/
+
+ if((rv = apr_poll_setup(&pollfd, 2, r->pool)) != APR_SUCCESS)
+ {
+ apr_socket_close(sock);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: CONNECT: error apr_poll_setup()");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* Add client side to the poll */
+ apr_poll_socket_add(pollfd, client_socket, APR_POLLIN);
+
+ /* Add the server side to the poll */
+ apr_poll_socket_add(pollfd, sock, APR_POLLIN);
+
+ while (1) { /* Infinite loop until error (one side closes the connection) */
+/* ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: going to sleep (poll)");*/
+ if ((rv = apr_poll(pollfd, 2, &pollcnt, -1)) != APR_SUCCESS)
+ {
+ apr_socket_close(sock);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "proxy: CONNECT: error apr_poll()");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+/* ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CONNECT: woke from select(), i=%d", pollcnt);*/
+
+ if (pollcnt) {
+ apr_poll_revents_get(&pollevent, sock, pollfd);
+ if (pollevent & APR_POLLIN) {
+/* ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CONNECT: sock was set");*/
+ nbytes = sizeof(buffer);
+ if (apr_recv(sock, buffer, &nbytes) == APR_SUCCESS) {
+ o = 0;
+ i = nbytes;
+ while(i > 0)
+ {
+ nbytes = i;
+ /* This is just plain wrong. No module should ever write directly
+ * to the client. For now, this works, but this is high on my list of
+ * things to fix. The correct line is:
+ * if ((nbytes = ap_rwrite(buffer + o, nbytes, r)) < 0)
+ * rbb
+ */
+ if (apr_send(client_socket, buffer + o, &nbytes) != APR_SUCCESS)
+ break;
+ o += nbytes;
+ i -= nbytes;
+ }
+ }
+ else
+ break;
+ }
+ else if ((pollevent & APR_POLLERR) || (pollevent & APR_POLLHUP))
+ break;
+
+
+ apr_poll_revents_get(&pollevent, client_socket, pollfd);
+ if (pollevent & APR_POLLIN) {
+/* ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CONNECT: client was set");*/
+ nbytes = sizeof(buffer);
+ if (apr_recv(client_socket, buffer, &nbytes) == APR_SUCCESS) {
+ o = 0;
+ i = nbytes;
+ while(i > 0)
+ {
+ nbytes = i;
+ if (apr_send(sock, buffer + o, &nbytes) != APR_SUCCESS)
+ break;
+ o += nbytes;
+ i -= nbytes;
+ }
+ }
+ else
+ break;
+ }
+ else if ((pollevent & APR_POLLERR) || (pollevent & APR_POLLHUP))
+ break;
+ }
+ else
+ break;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CONNECT: finished with poll() - cleaning up");
+
+ /*
+ * Step Five: Clean Up
+ *
+ * Close the socket and clean up
+ */
+
+ apr_socket_close(sock);
+
+ return OK;
+}
+
+static void ap_proxy_connect_register_hook(apr_pool_t *p)
+{
+ proxy_hook_scheme_handler(ap_proxy_connect_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ proxy_hook_canon_handler(ap_proxy_connect_canon, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA proxy_connect_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ NULL, /* command apr_table_t */
+ ap_proxy_connect_register_hook /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/proxy_ftp.c b/rubbos/app/httpd-2.0.64/modules/proxy/proxy_ftp.c
new file mode 100644
index 00000000..cbbf23c9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/proxy_ftp.c
@@ -0,0 +1,1936 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* FTP routines for Apache proxy */
+
+#include "mod_proxy.h"
+#if APR_HAVE_TIME_H
+#include <time.h>
+#endif
+
+#define AUTODETECT_PWD
+/* Automatic timestamping (Last-Modified header) based on MDTM is used if:
+ * 1) the FTP server supports the MDTM command and
+ * 2) HAVE_TIMEGM (preferred) or HAVE_GMTOFF is available at compile time
+ */
+#define USE_MDTM
+
+
+module AP_MODULE_DECLARE_DATA proxy_ftp_module;
+
+int ap_proxy_ftp_canon(request_rec *r, char *url);
+int ap_proxy_ftp_handler(request_rec *r, proxy_server_conf *conf,
+ char *url, const char *proxyhost,
+ apr_port_t proxyport);
+apr_status_t ap_proxy_send_dir_filter(ap_filter_t * f,
+ apr_bucket_brigade *bb);
+
+
+/*
+ * Decodes a '%' escaped string, and returns the number of characters
+ */
+static int decodeenc(char *x)
+{
+ int i, j, ch;
+
+ if (x[0] == '\0')
+ return 0; /* special case for no characters */
+ for (i = 0, j = 0; x[i] != '\0'; i++, j++) {
+ /* decode it if not already done */
+ ch = x[i];
+ if (ch == '%' && apr_isxdigit(x[i + 1]) && apr_isxdigit(x[i + 2])) {
+ ch = ap_proxy_hex2c(&x[i + 1]);
+ i += 2;
+ }
+ x[j] = ch;
+ }
+ x[j] = '\0';
+ return j;
+}
+
+/*
+ * Escape the globbing characters in a path used as argument to
+ * the FTP commands (SIZE, CWD, RETR, MDTM, ...).
+ * ftpd assumes '\\' as a quoting character to escape special characters.
+ * Returns: escaped string
+ */
+#define FTP_GLOBBING_CHARS "*?[{~"
+static char *ftp_escape_globbingchars(apr_pool_t *p, const char *path)
+{
+ char *ret = apr_palloc(p, 2*strlen(path)+sizeof(""));
+ char *d;
+ for (d = ret; *path; ++path) {
+ if (strchr(FTP_GLOBBING_CHARS, *path) != NULL)
+ *d++ = '\\';
+ *d++ = *path;
+ }
+ *d = '\0';
+ return ret;
+}
+
+/*
+ * Check for globbing characters in a path used as argument to
+ * the FTP commands (SIZE, CWD, RETR, MDTM, ...).
+ * ftpd assumes '\\' as a quoting character to escape special characters.
+ * Returns: 0 (no globbing chars, or all globbing chars escaped), 1 (globbing chars)
+ */
+static int ftp_check_globbingchars(const char *path)
+{
+ for ( ; *path; ++path) {
+ if (*path == '\\')
+ ++path;
+ if (*path != '\0' && strchr(FTP_GLOBBING_CHARS, *path) != NULL)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/*
+ * checks an encoded ftp string for bad characters, namely, CR, LF or
+ * non-ascii character
+ */
+static int ftp_check_string(const char *x)
+{
+ int i, ch = 0;
+#if APR_CHARSET_EBCDIC
+ char buf[1];
+#endif
+
+ for (i = 0; x[i] != '\0'; i++) {
+ ch = x[i];
+ if (ch == '%' && apr_isxdigit(x[i + 1]) && apr_isxdigit(x[i + 2])) {
+ ch = ap_proxy_hex2c(&x[i + 1]);
+ i += 2;
+ }
+#if !APR_CHARSET_EBCDIC
+ if (ch == '\015' || ch == '\012' || (ch & 0x80))
+#else /* APR_CHARSET_EBCDIC */
+ if (ch == '\r' || ch == '\n')
+ return 0;
+ buf[0] = ch;
+ ap_xlate_proto_to_ascii(buf, 1);
+ if (buf[0] & 0x80)
+#endif /* APR_CHARSET_EBCDIC */
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Canonicalise ftp URLs.
+ */
+int ap_proxy_ftp_canon(request_rec *r, char *url)
+{
+ char *user, *password, *host, *path, *parms, *strp, sport[7];
+ apr_pool_t *p = r->pool;
+ const char *err;
+ apr_port_t port, def_port;
+
+ /* */
+ if (strncasecmp(url, "ftp:", 4) == 0) {
+ url += 4;
+ }
+ else {
+ return DECLINED;
+ }
+ def_port = apr_uri_port_of_scheme("ftp");
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: canonicalising URL %s", url);
+
+ port = def_port;
+ err = ap_proxy_canon_netloc(p, &url, &user, &password, &host, &port);
+ if (err)
+ return HTTP_BAD_REQUEST;
+ if (user != NULL && !ftp_check_string(user))
+ return HTTP_BAD_REQUEST;
+ if (password != NULL && !ftp_check_string(password))
+ return HTTP_BAD_REQUEST;
+
+ /* now parse path/parameters args, according to rfc1738 */
+ /*
+ * N.B. if this isn't a true proxy request, then the URL path (but not
+ * query args) has already been decoded. This gives rise to the problem
+ * of a ; being decoded into the path.
+ */
+ strp = strchr(url, ';');
+ if (strp != NULL) {
+ *(strp++) = '\0';
+ parms = ap_proxy_canonenc(p, strp, strlen(strp), enc_parm,
+ r->proxyreq);
+ if (parms == NULL)
+ return HTTP_BAD_REQUEST;
+ }
+ else
+ parms = "";
+
+ path = ap_proxy_canonenc(p, url, strlen(url), enc_path, r->proxyreq);
+ if (path == NULL)
+ return HTTP_BAD_REQUEST;
+ if (!ftp_check_string(path))
+ return HTTP_BAD_REQUEST;
+
+ if (r->proxyreq && r->args != NULL) {
+ if (strp != NULL) {
+ strp = ap_proxy_canonenc(p, r->args, strlen(r->args), enc_parm, 1);
+ if (strp == NULL)
+ return HTTP_BAD_REQUEST;
+ parms = apr_pstrcat(p, parms, "?", strp, NULL);
+ }
+ else {
+ strp = ap_proxy_canonenc(p, r->args, strlen(r->args), enc_fpath, 1);
+ if (strp == NULL)
+ return HTTP_BAD_REQUEST;
+ path = apr_pstrcat(p, path, "?", strp, NULL);
+ }
+ r->args = NULL;
+ }
+
+/* now, rebuild URL */
+
+ if (port != def_port)
+ apr_snprintf(sport, sizeof(sport), ":%d", port);
+ else
+ sport[0] = '\0';
+
+ if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */
+ host = apr_pstrcat(p, "[", host, "]", NULL);
+ }
+ r->filename = apr_pstrcat(p, "proxy:ftp://", (user != NULL) ? user : "",
+ (password != NULL) ? ":" : "",
+ (password != NULL) ? password : "",
+ (user != NULL) ? "@" : "", host, sport, "/", path,
+ (parms[0] != '\0') ? ";" : "", parms, NULL);
+
+ return OK;
+}
+
+/* we chop lines longer than 80 characters */
+#define MAX_LINE_LEN 80
+
+/*
+ * Reads response lines, returns both the ftp status code and
+ * remembers the response message in the supplied buffer
+ */
+static int ftp_getrc_msg(conn_rec *ftp_ctrl, apr_bucket_brigade *bb, char *msgbuf, int msglen)
+{
+ int status;
+ char response[MAX_LINE_LEN];
+ char buff[5];
+ char *mb = msgbuf, *me = &msgbuf[msglen];
+ apr_status_t rv;
+ int eos;
+
+ if (APR_SUCCESS != (rv = ap_proxy_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) {
+ return -1;
+ }
+/*
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL,
+ "proxy: <FTP: %s", response);
+*/
+ if (!apr_isdigit(response[0]) || !apr_isdigit(response[1]) ||
+ !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-'))
+ status = 0;
+ else
+ status = 100 * response[0] + 10 * response[1] + response[2] - 111 * '0';
+
+ mb = apr_cpystrn(mb, response + 4, me - mb);
+
+ if (response[3] == '-') {
+ memcpy(buff, response, 3);
+ buff[3] = ' ';
+ do {
+ if (APR_SUCCESS != (rv = ap_proxy_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) {
+ return -1;
+ }
+ mb = apr_cpystrn(mb, response + (' ' == response[0] ? 1 : 4), me - mb);
+ } while (memcmp(response, buff, 4) != 0);
+ }
+
+ return status;
+}
+
+/* this is a filter that turns a raw ASCII directory listing into pretty HTML */
+
+/* ideally, mod_proxy should simply send the raw directory list up the filter
+ * stack to mod_autoindex, which in theory should turn the raw ascii into
+ * pretty html along with all the bells and whistles it provides...
+ *
+ * all in good time...! :)
+ */
+
+typedef struct {
+ apr_bucket_brigade *in;
+ char buffer[MAX_STRING_LEN];
+ enum {
+ HEADER, BODY, FOOTER
+ } state;
+} proxy_dir_ctx_t;
+
+/* fallback regex for ls -s1; ($0..$2) == 3 */
+#define LS_REG_PATTERN "^ *([0-9]+) +([^ ]+)$"
+#define LS_REG_MATCH 3
+
+apr_status_t ap_proxy_send_dir_filter(ap_filter_t *f, apr_bucket_brigade *in)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ apr_pool_t *p = r->pool;
+ apr_bucket_brigade *out = apr_brigade_create(p, c->bucket_alloc);
+ apr_status_t rv;
+
+ register int n;
+ char *dir, *path, *reldir, *site, *str, *type;
+
+ const char *pwd = apr_table_get(r->notes, "Directory-PWD");
+ const char *readme = apr_table_get(r->notes, "Directory-README");
+
+ proxy_dir_ctx_t *ctx = f->ctx;
+
+ if (!ctx) {
+ f->ctx = ctx = apr_pcalloc(p, sizeof(*ctx));
+ ctx->in = apr_brigade_create(p, c->bucket_alloc);
+ ctx->buffer[0] = 0;
+ ctx->state = HEADER;
+ }
+
+ /* combine the stored and the new */
+ APR_BRIGADE_CONCAT(ctx->in, in);
+
+ if (HEADER == ctx->state) {
+
+ /* basedir is either "", or "/%2f" for the "squid %2f hack" */
+ const char *basedir = ""; /* By default, path is relative to the $HOME dir */
+ char *wildcard = NULL;
+
+ /* Save "scheme://site" prefix without password */
+ site = apr_uri_unparse(p, &f->r->parsed_uri, APR_URI_UNP_OMITPASSWORD | APR_URI_UNP_OMITPATHINFO);
+ /* ... and path without query args */
+ path = apr_uri_unparse(p, &f->r->parsed_uri, APR_URI_UNP_OMITSITEPART | APR_URI_UNP_OMITQUERY);
+
+ /* If path began with /%2f, change the basedir */
+ if (strncasecmp(path, "/%2f", 4) == 0) {
+ basedir = "/%2f";
+ }
+
+ /* Strip off a type qualifier. It is ignored for dir listings */
+ if ((type = strstr(path, ";type=")) != NULL)
+ *type++ = '\0';
+
+ (void)decodeenc(path);
+
+ while (path[1] == '/') /* collapse multiple leading slashes to one */
+ ++path;
+
+ reldir = strrchr(path, '/');
+ if (reldir != NULL && ftp_check_globbingchars(reldir)) {
+ wildcard = &reldir[1];
+ reldir[0] = '\0'; /* strip off the wildcard suffix */
+ }
+
+ /* Copy path, strip (all except the last) trailing slashes */
+ /* (the trailing slash is needed for the dir component loop below) */
+ path = dir = apr_pstrcat(p, path, "/", NULL);
+ for (n = strlen(path); n > 1 && path[n - 1] == '/' && path[n - 2] == '/'; --n)
+ path[n - 1] = '\0';
+
+ /* Add a link to the root directory (if %2f hack was used) */
+ str = (basedir[0] != '\0') ? "<a href=\"/%2f/\">%2f</a>/" : "";
+
+ /* print "ftp://host/" */
+ str = apr_psprintf(p, DOCTYPE_HTML_3_2
+ "<html>\n <head>\n <title>%s%s%s</title>\n"
+ " </head>\n"
+ " <body>\n <h2>Directory of "
+ "<a href=\"/\">%s</a>/%s",
+ site, basedir, ap_escape_html(p, path),
+ site, str);
+
+ APR_BRIGADE_INSERT_TAIL(out, apr_bucket_pool_create(str, strlen(str),
+ p, c->bucket_alloc));
+
+ for (dir = path+1; (dir = strchr(dir, '/')) != NULL; )
+ {
+ *dir = '\0';
+ if ((reldir = strrchr(path+1, '/'))==NULL) {
+ reldir = path+1;
+ }
+ else
+ ++reldir;
+ /* print "path/" component */
+ str = apr_psprintf(p, "<a href=\"%s%s/\">%s</a>/", basedir,
+ ap_escape_uri(p, path),
+ ap_escape_html(p, reldir));
+ *dir = '/';
+ while (*dir == '/')
+ ++dir;
+ APR_BRIGADE_INSERT_TAIL(out, apr_bucket_pool_create(str,
+ strlen(str), p,
+ c->bucket_alloc));
+ }
+ if (wildcard != NULL) {
+ wildcard = ap_escape_html(p, wildcard);
+ APR_BRIGADE_INSERT_TAIL(out, apr_bucket_pool_create(wildcard,
+ strlen(wildcard), p,
+ c->bucket_alloc));
+ }
+
+ /* If the caller has determined the current directory, and it differs */
+ /* from what the client requested, then show the real name */
+ if (pwd == NULL || strncmp(pwd, path, strlen(pwd)) == 0) {
+ str = apr_psprintf(p, "</h2>\n\n <hr />\n\n<pre>");
+ }
+ else {
+ str = apr_psprintf(p, "</h2>\n\n(%s)\n\n <hr />\n\n<pre>",
+ ap_escape_html(p, pwd));
+ }
+ APR_BRIGADE_INSERT_TAIL(out, apr_bucket_pool_create(str, strlen(str),
+ p, c->bucket_alloc));
+
+ /* print README */
+ if (readme) {
+ str = apr_psprintf(p, "%s\n</pre>\n\n<hr />\n\n<pre>\n",
+ ap_escape_html(p, readme));
+
+ APR_BRIGADE_INSERT_TAIL(out, apr_bucket_pool_create(str,
+ strlen(str), p,
+ c->bucket_alloc));
+ }
+
+ /* make sure page intro gets sent out */
+ APR_BRIGADE_INSERT_TAIL(out, apr_bucket_flush_create(c->bucket_alloc));
+ if (APR_SUCCESS != (rv = ap_pass_brigade(f->next, out))) {
+ return rv;
+ }
+ apr_brigade_cleanup(out);
+
+ ctx->state = BODY;
+ }
+
+ /* loop through each line of directory */
+ while (BODY == ctx->state) {
+ char *filename;
+ int found = 0;
+ int eos = 0;
+
+ regex_t *re = NULL;
+ regmatch_t re_result[LS_REG_MATCH];
+
+ /* Compile the output format of "ls -s1" as a fallback for non-unix ftp listings */
+ re = ap_pregcomp(p, LS_REG_PATTERN, REG_EXTENDED);
+ ap_assert(re != NULL);
+
+ /* get a complete line */
+ /* if the buffer overruns - throw data away */
+ while (!found && !APR_BRIGADE_EMPTY(ctx->in)) {
+ char *pos, *response;
+ apr_size_t len, max;
+ apr_bucket *e;
+
+ e = APR_BRIGADE_FIRST(ctx->in);
+ if (APR_BUCKET_IS_EOS(e)) {
+ eos = 1;
+ break;
+ }
+ if (APR_SUCCESS != (rv = apr_bucket_read(e, (const char **)&response, &len, APR_BLOCK_READ))) {
+ return rv;
+ }
+ pos = memchr(response, APR_ASCII_LF, len);
+ if (pos != NULL) {
+ if ((response + len) != (pos + 1)) {
+ len = pos - response + 1;
+ apr_bucket_split(e, pos - response + 1);
+ }
+ found = 1;
+ }
+ max = sizeof(ctx->buffer) - strlen(ctx->buffer) - 1;
+ if (len > max) {
+ len = max;
+ }
+
+ /* len+1 to leave space for the trailing nil char */
+ apr_cpystrn(ctx->buffer+strlen(ctx->buffer), response, len+1);
+
+ APR_BUCKET_REMOVE(e);
+ apr_bucket_destroy(e);
+ }
+
+ /* EOS? jump to footer */
+ if (eos) {
+ ctx->state = FOOTER;
+ break;
+ }
+
+ /* not complete? leave and try get some more */
+ if (!found) {
+ return APR_SUCCESS;
+ }
+
+ {
+ apr_size_t n = strlen(ctx->buffer);
+ if (ctx->buffer[n-1] == CRLF[1]) /* strip trailing '\n' */
+ ctx->buffer[--n] = '\0';
+ if (ctx->buffer[n-1] == CRLF[0]) /* strip trailing '\r' if present */
+ ctx->buffer[--n] = '\0';
+ }
+
+ /* a symlink? */
+ if (ctx->buffer[0] == 'l' && (filename = strstr(ctx->buffer, " -> ")) != NULL) {
+ char *link_ptr = filename;
+
+ do {
+ filename--;
+ } while (filename[0] != ' ' && filename > ctx->buffer);
+ if (filename > ctx->buffer)
+ *(filename++) = '\0';
+ *(link_ptr++) = '\0';
+ str = apr_psprintf(p, "%s <a href=\"%s\">%s %s</a>\n",
+ ap_escape_html(p, ctx->buffer),
+ ap_escape_uri(p, filename),
+ ap_escape_html(p, filename),
+ ap_escape_html(p, link_ptr));
+ }
+
+ /* a directory/file? */
+ else if (ctx->buffer[0] == 'd' || ctx->buffer[0] == '-' || ctx->buffer[0] == 'l' || apr_isdigit(ctx->buffer[0])) {
+ int searchidx = 0;
+ char *searchptr = NULL;
+ int firstfile = 1;
+ if (apr_isdigit(ctx->buffer[0])) { /* handle DOS dir */
+ searchptr = strchr(ctx->buffer, '<');
+ if (searchptr != NULL)
+ *searchptr = '[';
+ searchptr = strchr(ctx->buffer, '>');
+ if (searchptr != NULL)
+ *searchptr = ']';
+ }
+
+ filename = strrchr(ctx->buffer, ' ');
+ *(filename++) = '\0';
+
+ /* handle filenames with spaces in 'em */
+ if (!strcmp(filename, ".") || !strcmp(filename, "..") || firstfile) {
+ firstfile = 0;
+ searchidx = filename - ctx->buffer;
+ }
+ else if (searchidx != 0 && ctx->buffer[searchidx] != 0) {
+ *(--filename) = ' ';
+ ctx->buffer[searchidx - 1] = '\0';
+ filename = &ctx->buffer[searchidx];
+ }
+
+ /* Append a slash to the HREF link for directories */
+ if (!strcmp(filename, ".") || !strcmp(filename, "..") || ctx->buffer[0] == 'd') {
+ str = apr_psprintf(p, "%s <a href=\"%s/\">%s</a>\n",
+ ap_escape_html(p, ctx->buffer),
+ ap_escape_uri(p, filename),
+ ap_escape_html(p, filename));
+ }
+ else {
+ str = apr_psprintf(p, "%s <a href=\"%s\">%s</a>\n",
+ ap_escape_html(p, ctx->buffer),
+ ap_escape_uri(p, filename),
+ ap_escape_html(p, filename));
+ }
+ }
+ /* Try a fallback for listings in the format of "ls -s1" */
+ else if (0 == ap_regexec(re, ctx->buffer, LS_REG_MATCH, re_result, 0)) {
+
+ filename = apr_pstrndup(p, &ctx->buffer[re_result[2].rm_so], re_result[2].rm_eo - re_result[2].rm_so);
+
+ str = apr_pstrcat(p, ap_escape_html(p, apr_pstrndup(p, ctx->buffer, re_result[2].rm_so)),
+ "<a href=\"", ap_escape_uri(p, filename), "\">",
+ ap_escape_html(p, filename), "</a>\n", NULL);
+ }
+ else {
+ strcat(ctx->buffer, "\n"); /* re-append the newline */
+ str = ap_escape_html(p, ctx->buffer);
+ }
+
+ /* erase buffer for next time around */
+ ctx->buffer[0] = 0;
+
+ APR_BRIGADE_INSERT_TAIL(out, apr_bucket_pool_create(str, strlen(str), p,
+ c->bucket_alloc));
+ APR_BRIGADE_INSERT_TAIL(out, apr_bucket_flush_create(c->bucket_alloc));
+ if (APR_SUCCESS != (rv = ap_pass_brigade(f->next, out))) {
+ return rv;
+ }
+ apr_brigade_cleanup(out);
+
+ }
+
+ if (FOOTER == ctx->state) {
+ str = apr_psprintf(p, "</pre>\n\n <hr />\n\n %s\n\n </body>\n</html>\n", ap_psignature("", r));
+ APR_BRIGADE_INSERT_TAIL(out, apr_bucket_pool_create(str, strlen(str), p,
+ c->bucket_alloc));
+ APR_BRIGADE_INSERT_TAIL(out, apr_bucket_flush_create(c->bucket_alloc));
+ APR_BRIGADE_INSERT_TAIL(out, apr_bucket_eos_create(c->bucket_alloc));
+ if (APR_SUCCESS != (rv = ap_pass_brigade(f->next, out))) {
+ return rv;
+ }
+ apr_brigade_destroy(out);
+ }
+
+ return APR_SUCCESS;
+}
+
+/* Parse EPSV reply and return port, or zero on error. */
+static apr_port_t parse_epsv_reply(const char *reply)
+{
+ const char *p;
+ char *ep;
+ long port;
+
+ /* Reply syntax per RFC 2428: "229 blah blah (|||port|)" where '|'
+ * can be any character in ASCII from 33-126, obscurely. Verify
+ * the syntax. */
+ p = ap_strchr_c(reply, '(');
+ if (p == NULL || !p[1] || p[1] != p[2] || p[1] != p[3]
+ || p[4] == p[1]) {
+ return 0;
+ }
+
+ errno = 0;
+ port = strtol(p + 4, &ep, 10);
+ if (errno || port < 1 || port > 65535 || ep[0] != p[1] || ep[1] != ')') {
+ return 0;
+ }
+
+ return (apr_port_t)port;
+}
+
+/*
+ * Generic "send FTP command to server" routine, using the control socket.
+ * Returns the FTP returncode (3 digit code)
+ * Allows for tracing the FTP protocol (in LogLevel debug)
+ */
+static int
+proxy_ftp_command(const char *cmd, request_rec *r, conn_rec *ftp_ctrl,
+ apr_bucket_brigade *bb, char **pmessage)
+{
+ char *crlf;
+ int rc;
+ char message[HUGE_STRING_LEN];
+
+ /* If cmd == NULL, we retrieve the next ftp response line */
+ if (cmd != NULL) {
+ conn_rec *c = r->connection;
+ APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_pool_create(cmd, strlen(cmd), r->pool, c->bucket_alloc));
+ APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_flush_create(c->bucket_alloc));
+ ap_pass_brigade(ftp_ctrl->output_filters, bb);
+
+ /* strip off the CRLF for logging */
+ apr_cpystrn(message, cmd, sizeof(message));
+ if ((crlf = strchr(message, '\r')) != NULL ||
+ (crlf = strchr(message, '\n')) != NULL)
+ *crlf = '\0';
+ if (strncmp(message,"PASS ", 5) == 0)
+ strcpy(&message[5], "****");
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy:>FTP: %s", message);
+ }
+
+ rc = ftp_getrc_msg(ftp_ctrl, bb, message, sizeof message);
+ if (rc == -1 || rc == 421)
+ strcpy(message,"<unable to read result>");
+ if ((crlf = strchr(message, '\r')) != NULL ||
+ (crlf = strchr(message, '\n')) != NULL)
+ *crlf = '\0';
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy:<FTP: %3.3u %s", rc, message);
+
+ if (pmessage != NULL)
+ *pmessage = apr_pstrdup(r->pool, message);
+
+ return rc;
+}
+
+/* Set ftp server to TYPE {A,I,E} before transfer of a directory or file */
+static int ftp_set_TYPE(char xfer_type, request_rec *r, conn_rec *ftp_ctrl,
+ apr_bucket_brigade *bb, char **pmessage)
+{
+ char old_type[2] = { 'A', '\0' }; /* After logon, mode is ASCII */
+ int ret = HTTP_OK;
+ int rc;
+
+ /* set desired type */
+ old_type[0] = xfer_type;
+
+ rc = proxy_ftp_command(apr_pstrcat(r->pool, "TYPE ", old_type, CRLF, NULL),
+ r, ftp_ctrl, bb, pmessage);
+/* responses: 200, 421, 500, 501, 504, 530 */
+ /* 200 Command okay. */
+ /* 421 Service not available, closing control connection. */
+ /* 500 Syntax error, command unrecognized. */
+ /* 501 Syntax error in parameters or arguments. */
+ /* 504 Command not implemented for that parameter. */
+ /* 530 Not logged in. */
+ if (rc == -1 || rc == 421) {
+ ret = ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+ else if (rc != 200 && rc != 504) {
+ ret = ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Unable to set transfer type");
+ }
+/* Allow not implemented */
+ else if (rc == 504)
+ /* ignore it silently */;
+
+ return ret;
+}
+
+
+/* Return the current directory which we have selected on the FTP server, or NULL */
+static char *ftp_get_PWD(request_rec *r, conn_rec *ftp_ctrl, apr_bucket_brigade *bb)
+{
+ char *cwd = NULL;
+ char *ftpmessage = NULL;
+
+ /* responses: 257, 500, 501, 502, 421, 550 */
+ /* 257 "<directory-name>" <commentary> */
+ /* 421 Service not available, closing control connection. */
+ /* 500 Syntax error, command unrecognized. */
+ /* 501 Syntax error in parameters or arguments. */
+ /* 502 Command not implemented. */
+ /* 550 Requested action not taken. */
+ switch (proxy_ftp_command("PWD" CRLF, r, ftp_ctrl, bb, &ftpmessage)) {
+ case -1:
+ case 421:
+ case 550:
+ ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Failed to read PWD on ftp server");
+ break;
+
+ case 257: {
+ const char *dirp = ftpmessage;
+ cwd = ap_getword_conf(r->pool, &dirp);
+ }
+ }
+ return cwd;
+}
+
+
+/* Common routine for failed authorization (i.e., missing or wrong password)
+ * to an ftp service. This causes most browsers to retry the request
+ * with username and password (which was presumably queried from the user)
+ * supplied in the Authorization: header.
+ * Note that we "invent" a realm name which consists of the
+ * ftp://user@host part of the reqest (sans password -if supplied but invalid-)
+ */
+static int ftp_unauthorized(request_rec *r, int log_it)
+{
+ r->proxyreq = PROXYREQ_NONE;
+ /*
+ * Log failed requests if they supplied a password (log username/password
+ * guessing attempts)
+ */
+ if (log_it)
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
+ "proxy: missing or failed auth to %s",
+ apr_uri_unparse(r->pool,
+ &r->parsed_uri, APR_URI_UNP_OMITPATHINFO));
+
+ apr_table_setn(r->err_headers_out, "WWW-Authenticate",
+ apr_pstrcat(r->pool, "Basic realm=\"",
+ apr_uri_unparse(r->pool, &r->parsed_uri,
+ APR_URI_UNP_OMITPASSWORD | APR_URI_UNP_OMITPATHINFO),
+ "\"", NULL));
+
+ return HTTP_UNAUTHORIZED;
+}
+
+
+/*
+ * Handles direct access of ftp:// URLs
+ * Original (Non-PASV) version from
+ * Troy Morrison <spiffnet@zoom.com>
+ * PASV added by Chuck
+ * Filters by [Graham Leggett <minfrin@sharp.fm>]
+ */
+int ap_proxy_ftp_handler(request_rec *r, proxy_server_conf *conf,
+ char *url, const char *proxyhost,
+ apr_port_t proxyport)
+{
+ apr_pool_t *p = r->pool;
+ conn_rec *c = r->connection;
+ proxy_conn_rec *backend;
+ apr_socket_t *sock, *local_sock, *data_sock = NULL;
+ apr_sockaddr_t *connect_addr;
+ apr_status_t rv;
+ conn_rec *origin, *data = NULL;
+ int err;
+ apr_bucket_brigade *bb = apr_brigade_create(p, c->bucket_alloc);
+ char *buf, *connectname;
+ apr_port_t connectport;
+ char buffer[MAX_STRING_LEN];
+ char *ftpmessage = NULL;
+ char *path, *strp, *type_suffix, *cwd = NULL;
+ apr_uri_t uri;
+ char *user = NULL;
+/* char *account = NULL; how to supply an account in a URL? */
+ const char *password = NULL;
+ int len, rc;
+ int one = 1;
+ char *size = NULL;
+ apr_socket_t *origin_sock = NULL;
+ char xfer_type = 'A'; /* after ftp login, the default is ASCII */
+ int dirlisting = 0;
+#if defined(USE_MDTM) && (defined(HAVE_TIMEGM) || defined(HAVE_GMTOFF))
+ apr_time_t mtime = 0L;
+#endif
+
+ /* stuff for PASV mode */
+ int connect = 0, use_port = 0;
+ char dates[APR_RFC822_DATE_LEN];
+
+ /* is this for us? */
+ if (proxyhost) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: declining URL %s - proxyhost %s specified:", url, proxyhost);
+ return DECLINED; /* proxy connections are via HTTP */
+ }
+ if (strncasecmp(url, "ftp:", 4)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: declining URL %s - not ftp:", url);
+ return DECLINED; /* only interested in FTP */
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: serving URL %s", url);
+
+ /* create space for state information */
+ backend = (proxy_conn_rec *) ap_get_module_config(c->conn_config, &proxy_ftp_module);
+ if (!backend) {
+ backend = apr_pcalloc(c->pool, sizeof(proxy_conn_rec));
+ backend->connection = NULL;
+ backend->hostname = NULL;
+ backend->port = 0;
+ ap_set_module_config(c->conn_config, &proxy_ftp_module, backend);
+ }
+ if (backend->connection)
+ origin_sock = ap_get_module_config(backend->connection->conn_config, &core_module);
+
+
+ /*
+ * I: Who Do I Connect To? -----------------------
+ *
+ * Break up the URL to determine the host to connect to
+ */
+
+ /* we only support GET and HEAD */
+ if (r->method_number != M_GET)
+ return HTTP_NOT_IMPLEMENTED;
+
+ /* We break the URL into host, port, path-search */
+ if (r->parsed_uri.hostname == NULL) {
+ if (APR_SUCCESS != apr_uri_parse(p, url, &uri)) {
+ return ap_proxyerror(r, HTTP_BAD_REQUEST,
+ apr_psprintf(p, "URI cannot be parsed: %s", url));
+ }
+ connectname = uri.hostname;
+ connectport = uri.port;
+ path = apr_pstrdup(p, uri.path);
+ }
+ else {
+ connectname = r->parsed_uri.hostname;
+ connectport = r->parsed_uri.port;
+ path = apr_pstrdup(p, r->parsed_uri.path);
+ }
+ if (connectport == 0) {
+ connectport = apr_uri_port_of_scheme("ftp");
+ }
+ path = (path != NULL && path[0] != '\0') ? &path[1] : "";
+
+ type_suffix = strchr(path, ';');
+ if (type_suffix != NULL)
+ *(type_suffix++) = '\0';
+
+ if (type_suffix != NULL && strncmp(type_suffix, "type=", 5) == 0
+ && apr_isalpha(type_suffix[5])) {
+ /* "type=d" forces a dir listing.
+ * The other types (i|a|e) are directly used for the ftp TYPE command
+ */
+ if ( ! (dirlisting = (apr_tolower(type_suffix[5]) == 'd')))
+ xfer_type = apr_toupper(type_suffix[5]);
+
+ /* Check valid types, rather than ignoring invalid types silently: */
+ if (strchr("AEI", xfer_type) == NULL)
+ return ap_proxyerror(r, HTTP_BAD_REQUEST, apr_pstrcat(r->pool,
+ "ftp proxy supports only types 'a', 'i', or 'e': \"",
+ type_suffix, "\" is invalid.", NULL));
+ }
+ else {
+ /* make binary transfers the default */
+ xfer_type = 'I';
+ }
+
+
+ /*
+ * The "Authorization:" header must be checked first. We allow the user
+ * to "override" the URL-coded user [ & password ] in the Browsers'
+ * User&Password Dialog. NOTE that this is only marginally more secure
+ * than having the password travel in plain as part of the URL, because
+ * Basic Auth simply uuencodes the plain text password. But chances are
+ * still smaller that the URL is logged regularly.
+ */
+ if ((password = apr_table_get(r->headers_in, "Authorization")) != NULL
+ && strcasecmp(ap_getword(r->pool, &password, ' '), "Basic") == 0
+ && (password = ap_pbase64decode(r->pool, password))[0] != ':') {
+ /* Check the decoded string for special characters. */
+ if (!ftp_check_string(password)) {
+ return ap_proxyerror(r, HTTP_BAD_REQUEST,
+ "user credentials contained invalid character");
+ }
+ /*
+ * Note that this allocation has to be made from r->connection->pool
+ * because it has the lifetime of the connection. The other
+ * allocations are temporary and can be tossed away any time.
+ */
+ user = ap_getword_nulls(r->connection->pool, &password, ':');
+ r->ap_auth_type = "Basic";
+ r->user = r->parsed_uri.user = user;
+ }
+ else if ((user = r->parsed_uri.user) != NULL) {
+ user = apr_pstrdup(p, user);
+ decodeenc(user);
+ if ((password = r->parsed_uri.password) != NULL) {
+ char *tmp = apr_pstrdup(p, password);
+ decodeenc(tmp);
+ password = tmp;
+ }
+ }
+ else {
+ user = "anonymous";
+ password = "apache-proxy@";
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: connecting %s to %s:%d", url, connectname, connectport);
+
+ /* do a DNS lookup for the destination host */
+ err = apr_sockaddr_info_get(&connect_addr, connectname, APR_UNSPEC, connectport, 0, p);
+
+ /* check if ProxyBlock directive on this host */
+ if (OK != ap_proxy_checkproxyblock(r, conf, connect_addr)) {
+ return ap_proxyerror(r, HTTP_FORBIDDEN,
+ "Connect to remote machine blocked");
+ }
+
+
+ /*
+ * II: Make the Connection -----------------------
+ *
+ * We have determined who to connect to. Now make the connection.
+ */
+
+ /*
+ * get all the possible IP addresses for the destname and loop through
+ * them until we get a successful connection
+ */
+ if (APR_SUCCESS != err) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, apr_pstrcat(p,
+ "DNS lookup failure for: ",
+ connectname, NULL));
+ }
+
+ /*
+ * At this point we have a list of one or more IP addresses of the
+ * machine to connect to. If configured, reorder this list so that the
+ * "best candidate" is first try. "best candidate" could mean the least
+ * loaded server, the fastest responding server, whatever.
+ *
+ * For now we do nothing, ie we get DNS round robin. XXX FIXME
+ */
+
+
+ /* try each IP address until we connect successfully */
+ {
+ int failed = 1;
+ while (connect_addr) {
+
+ if ((rv = apr_socket_create(&sock, connect_addr->family, SOCK_STREAM, r->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: FTP: error creating socket");
+ connect_addr = connect_addr->next;
+ continue;
+ }
+
+#if !defined(TPF) && !defined(BEOS)
+ if (conf->recv_buffer_size > 0
+ && (rv = apr_socket_opt_set(sock, APR_SO_RCVBUF,
+ conf->recv_buffer_size))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "apr_socket_opt_set(APR_SO_RCVBUF): Failed to set ProxyReceiveBufferSize, using default");
+ }
+#endif
+
+ if (APR_SUCCESS != (rv = apr_socket_opt_set(sock, APR_SO_REUSEADDR, one))) {
+ apr_socket_close(sock);
+#ifndef _OSD_POSIX /* BS2000 has this option "always on" */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: FTP: error setting reuseaddr option: apr_socket_opt_set(APR_SO_REUSEADDR)");
+ connect_addr = connect_addr->next;
+ continue;
+#endif /* _OSD_POSIX */
+ }
+
+ /* Set a timeout on the socket */
+ if (conf->timeout_set == 1) {
+ apr_socket_timeout_set(sock, conf->timeout);
+ }
+ else {
+ apr_socket_timeout_set(sock, r->server->timeout);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: fam %d socket created, trying to connect to %pI (%s)...",
+ connect_addr->family, connect_addr, connectname);
+
+ /* make the connection out of the socket */
+ rv = apr_connect(sock, connect_addr);
+
+ /* if an error occurred, loop round and try again */
+ if (rv != APR_SUCCESS) {
+ apr_socket_close(sock);
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "proxy: FTP: attempt to connect to %pI (%s) failed", connect_addr, connectname);
+ connect_addr = connect_addr->next;
+ continue;
+ }
+
+ /* if we get here, all is well */
+ failed = 0;
+ break;
+ }
+
+ /* handle a permanent error from the above loop */
+ if (failed) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, apr_psprintf(r->pool,
+ "Could not connect to remote machine: %s port %d",
+ connectname, connectport));
+ }
+ }
+
+ /* the socket is now open, create a new connection */
+ origin = ap_run_create_connection(p, r->server, sock, r->connection->id,
+ r->connection->sbh, c->bucket_alloc);
+ if (!origin) {
+ /*
+ * the peer reset the connection already; ap_run_create_connection() closed
+ * the socket
+ */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: an error occurred creating a new connection to %pI (%s)", connect_addr, connectname);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* if a keepalive connection is floating around, close it first! */
+ /* we might support ftp keepalives later, but not now... */
+ if (backend->connection) {
+ apr_socket_close(origin_sock);
+ backend->connection = NULL;
+ origin_sock = NULL;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: control connection complete");
+
+
+ /*
+ * III: Send Control Request -------------------------
+ *
+ * Log into the ftp server, send the username & password, change to the
+ * correct directory...
+ */
+
+ /* set up the connection filters */
+ rc = ap_run_pre_connection(origin, sock);
+ if (rc != OK && rc != DONE) {
+ origin->aborted = 1;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: pre_connection setup failed (%d)",
+ rc);
+ return rc;
+ }
+
+ /* possible results: */
+ /* 120 Service ready in nnn minutes. */
+ /* 220 Service ready for new user. */
+ /* 421 Service not available, closing control connection. */
+ rc = proxy_ftp_command(NULL, r, origin, bb, &ftpmessage);
+ if (rc == -1 || rc == 421) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, "Error reading from remote server");
+ }
+ if (rc == 120) {
+ /*
+ * RFC2616 states: 14.37 Retry-After
+ *
+ * The Retry-After response-header field can be used with a 503 (Service
+ * Unavailable) response to indicate how long the service is expected
+ * to be unavailable to the requesting client. [...] The value of
+ * this field can be either an HTTP-date or an integer number of
+ * seconds (in decimal) after the time of the response. Retry-After
+ * = "Retry-After" ":" ( HTTP-date | delta-seconds )
+ */
+ char *secs_str = ftpmessage;
+ time_t secs;
+
+ /* Look for a number, preceded by whitespace */
+ while (*secs_str)
+ if ((secs_str==ftpmessage || apr_isspace(secs_str[-1])) &&
+ apr_isdigit(secs_str[0]))
+ break;
+ if (*secs_str != '\0') {
+ secs = atol(secs_str);
+ apr_table_add(r->headers_out, "Retry-After",
+ apr_psprintf(p, "%lu", (unsigned long)(60 * secs)));
+ }
+ return ap_proxyerror(r, HTTP_SERVICE_UNAVAILABLE, ftpmessage);
+ }
+ if (rc != 220) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, ftpmessage);
+ }
+
+ rc = proxy_ftp_command(apr_pstrcat(p, "USER ", user, CRLF, NULL),
+ r, origin, bb, &ftpmessage);
+ /* possible results; 230, 331, 332, 421, 500, 501, 530 */
+ /* states: 1 - error, 2 - success; 3 - send password, 4,5 fail */
+ /* 230 User logged in, proceed. */
+ /* 331 User name okay, need password. */
+ /* 332 Need account for login. */
+ /* 421 Service not available, closing control connection. */
+ /* 500 Syntax error, command unrecognized. */
+ /* (This may include errors such as command line too long.) */
+ /* 501 Syntax error in parameters or arguments. */
+ /* 530 Not logged in. */
+ if (rc == -1 || rc == 421) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, "Error reading from remote server");
+ }
+ if (rc == 530) {
+ return ftp_unauthorized(r, 1); /* log it: user name guessing
+ * attempt? */
+ }
+ if (rc != 230 && rc != 331) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, ftpmessage);
+ }
+
+ if (rc == 331) { /* send password */
+ if (password == NULL) {
+ return ftp_unauthorized(r, 0);
+ }
+
+ rc = proxy_ftp_command(apr_pstrcat(p, "PASS ", password, CRLF, NULL),
+ r, origin, bb, &ftpmessage);
+ /* possible results 202, 230, 332, 421, 500, 501, 503, 530 */
+ /* 230 User logged in, proceed. */
+ /* 332 Need account for login. */
+ /* 421 Service not available, closing control connection. */
+ /* 500 Syntax error, command unrecognized. */
+ /* 501 Syntax error in parameters or arguments. */
+ /* 503 Bad sequence of commands. */
+ /* 530 Not logged in. */
+ if (rc == -1 || rc == 421) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+ if (rc == 332) {
+ return ap_proxyerror(r, HTTP_UNAUTHORIZED,
+ apr_pstrcat(p, "Need account for login: ", ftpmessage, NULL));
+ }
+ /* @@@ questionable -- we might as well return a 403 Forbidden here */
+ if (rc == 530) {
+ return ftp_unauthorized(r, 1); /* log it: passwd guessing
+ * attempt? */
+ }
+ if (rc != 230 && rc != 202) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, ftpmessage);
+ }
+ }
+ apr_table_set(r->notes, "Directory-README", ftpmessage);
+
+
+ /* Special handling for leading "%2f": this enforces a "cwd /"
+ * out of the $HOME directory which was the starting point after login
+ */
+ if (strncasecmp(path, "%2f", 3) == 0) {
+ path += 3;
+ while (*path == '/') /* skip leading '/' (after root %2f) */
+ ++path;
+
+ rc = proxy_ftp_command("CWD /" CRLF, r, origin, bb, &ftpmessage);
+ if (rc == -1 || rc == 421)
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+
+ /*
+ * set the directory (walk directory component by component): this is
+ * what we must do if we don't know the OS type of the remote machine
+ */
+ for (;;) {
+ strp = strchr(path, '/');
+ if (strp == NULL)
+ break;
+ *strp = '\0';
+
+ len = decodeenc(path); /* Note! This decodes a %2f -> "/" */
+
+ if (strchr(path, '/')) { /* are there now any '/' characters? */
+ return ap_proxyerror(r, HTTP_BAD_REQUEST,
+ "Use of /%2f is only allowed at the base directory");
+ }
+
+ /* NOTE: FTP servers do globbing on the path.
+ * So we need to escape the URI metacharacters.
+ * We use a special glob-escaping routine to escape globbing chars.
+ * We could also have extended gen_test_char.c with a special T_ESCAPE_FTP_PATH
+ */
+ rc = proxy_ftp_command(apr_pstrcat(p, "CWD ",
+ ftp_escape_globbingchars(p, path), CRLF, NULL),
+ r, origin, bb, &ftpmessage);
+ *strp = '/';
+ /* responses: 250, 421, 500, 501, 502, 530, 550 */
+ /* 250 Requested file action okay, completed. */
+ /* 421 Service not available, closing control connection. */
+ /* 500 Syntax error, command unrecognized. */
+ /* 501 Syntax error in parameters or arguments. */
+ /* 502 Command not implemented. */
+ /* 530 Not logged in. */
+ /* 550 Requested action not taken. */
+ if (rc == -1 || rc == 421) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+ if (rc == 550) {
+ return ap_proxyerror(r, HTTP_NOT_FOUND, ftpmessage);
+ }
+ if (rc != 250) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, ftpmessage);
+ }
+
+ path = strp + 1;
+ }
+
+ /*
+ * IV: Make Data Connection? -------------------------
+ *
+ * Try EPSV, if that fails... try PASV, if that fails... try PORT.
+ */
+/* this temporarily switches off EPSV/PASV */
+/*goto bypass;*/
+
+ /* set up data connection - EPSV */
+ {
+ apr_sockaddr_t *data_addr;
+ char *data_ip;
+ apr_port_t data_port;
+
+ /*
+ * The EPSV command replaces PASV where both IPV4 and IPV6 is
+ * supported. Only the port is returned, the IP address is always the
+ * same as that on the control connection. Example: Entering Extended
+ * Passive Mode (|||6446|)
+ */
+ rc = proxy_ftp_command("EPSV" CRLF,
+ r, origin, bb, &ftpmessage);
+ /* possible results: 227, 421, 500, 501, 502, 530 */
+ /* 227 Entering Passive Mode (h1,h2,h3,h4,p1,p2). */
+ /* 421 Service not available, closing control connection. */
+ /* 500 Syntax error, command unrecognized. */
+ /* 501 Syntax error in parameters or arguments. */
+ /* 502 Command not implemented. */
+ /* 530 Not logged in. */
+ if (rc == -1 || rc == 421) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+ if (rc != 229 && rc != 500 && rc != 501 && rc != 502) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, ftpmessage);
+ }
+ else if (rc == 229) {
+ /* Parse the port out of the EPSV reply. */
+ data_port = parse_epsv_reply(ftpmessage);
+
+ if (data_port) {
+ apr_sockaddr_t *epsv_addr;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: EPSV contacting remote host on port %d",
+ data_port);
+
+ if ((rv = apr_socket_create(&data_sock, connect_addr->family, SOCK_STREAM, r->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: FTP: error creating EPSV socket");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+#if !defined (TPF) && !defined(BEOS)
+ if (conf->recv_buffer_size > 0
+ && (rv = apr_socket_opt_set(data_sock, APR_SO_RCVBUF,
+ conf->recv_buffer_size))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: FTP: apr_socket_opt_set(SO_RCVBUF): Failed to set ProxyReceiveBufferSize, using default");
+ }
+#endif
+
+ /* make the connection */
+ apr_socket_addr_get(&data_addr, APR_REMOTE, sock);
+ apr_sockaddr_ip_get(&data_ip, data_addr);
+ apr_sockaddr_info_get(&epsv_addr, data_ip, connect_addr->family, data_port, 0, p);
+ rv = apr_connect(data_sock, epsv_addr);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "proxy: FTP: EPSV attempt to connect to %pI failed - Firewall/NAT?", epsv_addr);
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, apr_psprintf(r->pool,
+ "EPSV attempt to connect to %pI failed - firewall/NAT?", epsv_addr));
+ }
+ else {
+ connect = 1;
+ }
+ }
+ }
+ }
+
+ /* set up data connection - PASV */
+ if (!connect) {
+ rc = proxy_ftp_command("PASV" CRLF,
+ r, origin, bb, &ftpmessage);
+ /* possible results: 227, 421, 500, 501, 502, 530 */
+ /* 227 Entering Passive Mode (h1,h2,h3,h4,p1,p2). */
+ /* 421 Service not available, closing control connection. */
+ /* 500 Syntax error, command unrecognized. */
+ /* 501 Syntax error in parameters or arguments. */
+ /* 502 Command not implemented. */
+ /* 530 Not logged in. */
+ if (rc == -1 || rc == 421) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+ if (rc != 227 && rc != 502) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, ftpmessage);
+ }
+ else if (rc == 227) {
+ unsigned int h0, h1, h2, h3, p0, p1;
+ char *pstr;
+ char *tok_cntx;
+
+/* FIXME: Check PASV against RFC1123 */
+
+ pstr = ftpmessage;
+ pstr = apr_strtok(pstr, " ", &tok_cntx); /* separate result code */
+ if (pstr != NULL) {
+ if (*(pstr + strlen(pstr) + 1) == '=') {
+ pstr += strlen(pstr) + 2;
+ }
+ else {
+ pstr = apr_strtok(NULL, "(", &tok_cntx); /* separate address &
+ * port params */
+ if (pstr != NULL)
+ pstr = apr_strtok(NULL, ")", &tok_cntx);
+ }
+ }
+
+/* FIXME: Only supports IPV4 - fix in RFC2428 */
+
+ if (pstr != NULL && (sscanf(pstr,
+ "%d,%d,%d,%d,%d,%d", &h3, &h2, &h1, &h0, &p1, &p0) == 6)) {
+
+ apr_sockaddr_t *pasv_addr;
+ apr_port_t pasvport = (p1 << 8) + p0;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: PASV contacting host %d.%d.%d.%d:%d",
+ h3, h2, h1, h0, pasvport);
+
+ if ((rv = apr_socket_create(&data_sock, connect_addr->family, SOCK_STREAM, r->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: error creating PASV socket");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+#if !defined (TPF) && !defined(BEOS)
+ if (conf->recv_buffer_size > 0
+ && (rv = apr_socket_opt_set(data_sock, APR_SO_RCVBUF,
+ conf->recv_buffer_size))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: FTP: apr_socket_opt_set(SO_RCVBUF): Failed to set ProxyReceiveBufferSize, using default");
+ }
+#endif
+
+ /* make the connection */
+ apr_sockaddr_info_get(&pasv_addr, apr_psprintf(p, "%d.%d.%d.%d", h3, h2, h1, h0), connect_addr->family, pasvport, 0, p);
+ rv = apr_connect(data_sock, pasv_addr);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "proxy: FTP: PASV attempt to connect to %pI failed - Firewall/NAT?", pasv_addr);
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, apr_psprintf(r->pool,
+ "PASV attempt to connect to %pI failed - firewall/NAT?", pasv_addr));
+ }
+ else {
+ connect = 1;
+ }
+ }
+ }
+ }
+/*bypass:*/
+
+ /* set up data connection - PORT */
+ if (!connect) {
+ apr_sockaddr_t *local_addr;
+ char *local_ip;
+ apr_port_t local_port;
+ unsigned int h0, h1, h2, h3, p0, p1;
+
+ if ((rv = apr_socket_create(&local_sock, connect_addr->family, SOCK_STREAM, r->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: FTP: error creating local socket");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ apr_socket_addr_get(&local_addr, APR_LOCAL, sock);
+ apr_sockaddr_port_get(&local_port, local_addr);
+ apr_sockaddr_ip_get(&local_ip, local_addr);
+
+ if ((rv = apr_socket_opt_set(local_sock, APR_SO_REUSEADDR, one))
+ != APR_SUCCESS) {
+#ifndef _OSD_POSIX /* BS2000 has this option "always on" */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: FTP: error setting reuseaddr option");
+ return HTTP_INTERNAL_SERVER_ERROR;
+#endif /* _OSD_POSIX */
+ }
+
+ apr_sockaddr_info_get(&local_addr, local_ip, APR_UNSPEC, local_port, 0, r->pool);
+
+ if ((rv = apr_bind(local_sock, local_addr)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: FTP: error binding to ftp data socket %pI", local_addr);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* only need a short queue */
+ if ((rv = apr_listen(local_sock, 2)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: FTP: error listening to ftp data socket %pI", local_addr);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+/* FIXME: Sent PORT here */
+
+ if (local_ip && (sscanf(local_ip,
+ "%d.%d.%d.%d", &h3, &h2, &h1, &h0) == 4)) {
+ p1 = (local_port >> 8);
+ p0 = (local_port & 0xFF);
+
+ rc = proxy_ftp_command(apr_psprintf(p, "PORT %d,%d,%d,%d,%d,%d" CRLF, h3, h2, h1, h0, p1, p0),
+ r, origin, bb, &ftpmessage);
+ /* possible results: 200, 421, 500, 501, 502, 530 */
+ /* 200 Command okay. */
+ /* 421 Service not available, closing control connection. */
+ /* 500 Syntax error, command unrecognized. */
+ /* 501 Syntax error in parameters or arguments. */
+ /* 502 Command not implemented. */
+ /* 530 Not logged in. */
+ if (rc == -1 || rc == 421) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+ if (rc != 200) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, buffer);
+ }
+
+ /* signal that we must use the EPRT/PORT loop */
+ use_port = 1;
+ }
+ else {
+/* IPV6 FIXME:
+ * The EPRT command replaces PORT where both IPV4 and IPV6 is supported. The first
+ * number (1,2) indicates the protocol type. Examples:
+ * EPRT |1|132.235.1.2|6275|
+ * EPRT |2|1080::8:800:200C:417A|5282|
+ */
+ return ap_proxyerror(r, HTTP_NOT_IMPLEMENTED, "Connect to IPV6 ftp server using EPRT not supported. Enable EPSV.");
+ }
+ }
+
+
+ /*
+ * V: Set The Headers -------------------
+ *
+ * Get the size of the request, set up the environment for HTTP.
+ */
+
+ /* set request; "path" holds last path component */
+ len = decodeenc(path);
+
+ if (strchr(path, '/')) { /* are there now any '/' characters? */
+ return ap_proxyerror(r, HTTP_BAD_REQUEST,
+ "Use of /%2f is only allowed at the base directory");
+ }
+
+ /* If len == 0 then it must be a directory (you can't RETR nothing)
+ * Also, don't allow to RETR by wildcard. Instead, create a dirlisting
+ */
+ if (len == 0 || ftp_check_globbingchars(path)) {
+ dirlisting = 1;
+ }
+ else {
+ /* (from FreeBSD ftpd):
+ * SIZE is not in RFC959, but Postel has blessed it and
+ * it will be in the updated RFC.
+ *
+ * Return size of file in a format suitable for
+ * using with RESTART (we just count bytes).
+ */
+ /* from draft-ietf-ftpext-mlst-14.txt:
+ * This value will
+ * change depending on the current STRUcture, MODE and TYPE of the data
+ * connection, or a data connection which would be created were one
+ * created now. Thus, the result of the SIZE command is dependent on
+ * the currently established STRU, MODE and TYPE parameters.
+ */
+ /* Therefore: switch to binary if the user did not specify ";type=a" */
+ ftp_set_TYPE(xfer_type, r, origin, bb, &ftpmessage);
+ rc = proxy_ftp_command(apr_pstrcat(p, "SIZE ",
+ ftp_escape_globbingchars(p, path), CRLF, NULL),
+ r, origin, bb, &ftpmessage);
+ if (rc == -1 || rc == 421) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+ else if (rc == 213) {/* Size command ok */
+ int j;
+ for (j = 0; apr_isdigit(ftpmessage[j]); j++)
+ ;
+ ftpmessage[j] = '\0';
+ if (ftpmessage[0] != '\0')
+ size = ftpmessage; /* already pstrdup'ed: no copy necessary */
+ }
+ else if (rc == 550) { /* Not a regular file */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: SIZE shows this is a directory");
+ dirlisting = 1;
+ rc = proxy_ftp_command(apr_pstrcat(p, "CWD ",
+ ftp_escape_globbingchars(p, path), CRLF, NULL),
+ r, origin, bb, &ftpmessage);
+ /* possible results: 250, 421, 500, 501, 502, 530, 550 */
+ /* 250 Requested file action okay, completed. */
+ /* 421 Service not available, closing control connection. */
+ /* 500 Syntax error, command unrecognized. */
+ /* 501 Syntax error in parameters or arguments. */
+ /* 502 Command not implemented. */
+ /* 530 Not logged in. */
+ /* 550 Requested action not taken. */
+ if (rc == -1 || rc == 421) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+ if (rc == 550) {
+ return ap_proxyerror(r, HTTP_NOT_FOUND, ftpmessage);
+ }
+ if (rc != 250) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, ftpmessage);
+ }
+ path = "";
+ len = 0;
+ }
+ }
+
+ cwd = ftp_get_PWD(r, origin, bb);
+ if (cwd != NULL) {
+ apr_table_set(r->notes, "Directory-PWD", cwd);
+ }
+
+ if (dirlisting) {
+ ftp_set_TYPE('A', r, origin, bb, NULL);
+ /* If the current directory contains no slash, we are talking to
+ * a non-unix ftp system. Try LIST instead of "LIST -lag", it
+ * should return a long listing anyway (unlike NLST).
+ * Some exotic FTP servers might choke on the "-lag" switch.
+ */
+ /* Note that we do not escape the path here, to allow for
+ * queries like: ftp://user@host/apache/src/server/http_*.c
+ */
+ if (len != 0)
+ buf = apr_pstrcat(p, "LIST ", path, CRLF, NULL);
+ else if (cwd == NULL || strchr(cwd, '/') != NULL)
+ buf = apr_pstrcat(p, "LIST -lag", CRLF, NULL);
+ else
+ buf = "LIST" CRLF;
+ }
+ else {
+ /* switch to binary if the user did not specify ";type=a" */
+ ftp_set_TYPE(xfer_type, r, origin, bb, &ftpmessage);
+#if defined(USE_MDTM) && (defined(HAVE_TIMEGM) || defined(HAVE_GMTOFF))
+ /* from draft-ietf-ftpext-mlst-14.txt:
+ * The FTP command, MODIFICATION TIME (MDTM), can be used to determine
+ * when a file in the server NVFS was last modified. <..>
+ * The syntax of a time value is:
+ * time-val = 14DIGIT [ "." 1*DIGIT ] <..>
+ * Symbolically, a time-val may be viewed as
+ * YYYYMMDDHHMMSS.sss
+ * The "." and subsequent digits ("sss") are optional. <..>
+ * Time values are always represented in UTC (GMT)
+ */
+ rc = proxy_ftp_command(apr_pstrcat(p, "MDTM ", ftp_escape_globbingchars(p, path), CRLF, NULL),
+ r, origin, bb, &ftpmessage);
+ /* then extract the Last-Modified time from it (YYYYMMDDhhmmss or YYYYMMDDhhmmss.xxx GMT). */
+ if (rc == 213) {
+ struct {
+ char YYYY[4+1];
+ char MM[2+1];
+ char DD[2+1];
+ char hh[2+1];
+ char mm[2+1];
+ char ss[2+1];
+ } time_val;
+ if (6 == sscanf(ftpmessage, "%4[0-9]%2[0-9]%2[0-9]%2[0-9]%2[0-9]%2[0-9]",
+ time_val.YYYY, time_val.MM, time_val.DD, time_val.hh, time_val.mm, time_val.ss)) {
+ struct tm tms;
+ memset (&tms, '\0', sizeof tms);
+ tms.tm_year = atoi(time_val.YYYY) - 1900;
+ tms.tm_mon = atoi(time_val.MM) - 1;
+ tms.tm_mday = atoi(time_val.DD);
+ tms.tm_hour = atoi(time_val.hh);
+ tms.tm_min = atoi(time_val.mm);
+ tms.tm_sec = atoi(time_val.ss);
+#ifdef HAVE_TIMEGM /* Does system have timegm()? */
+ mtime = timegm(&tms);
+ mtime *= APR_USEC_PER_SEC;
+#elif HAVE_GMTOFF /* does struct tm have a member tm_gmtoff? */
+ /* mktime will subtract the local timezone, which is not what we want.
+ * Add it again because the MDTM string is GMT
+ */
+ mtime = mktime(&tms);
+ mtime += tms.tm_gmtoff;
+ mtime *= APR_USEC_PER_SEC;
+#else
+ mtime = 0L;
+#endif
+ }
+ }
+#endif /* USE_MDTM */
+/* FIXME: Handle range requests - send REST */
+ buf = apr_pstrcat(p, "RETR ", ftp_escape_globbingchars(p, path), CRLF, NULL);
+ }
+ rc = proxy_ftp_command(buf, r, origin, bb, &ftpmessage);
+ /* rc is an intermediate response for the LIST or RETR commands */
+
+ /*
+ * RETR: 110, 125, 150, 226, 250, 421, 425, 426, 450, 451, 500, 501, 530,
+ * 550 NLST: 125, 150, 226, 250, 421, 425, 426, 450, 451, 500, 501, 502,
+ * 530
+ */
+ /* 110 Restart marker reply. */
+ /* 125 Data connection already open; transfer starting. */
+ /* 150 File status okay; about to open data connection. */
+ /* 226 Closing data connection. */
+ /* 250 Requested file action okay, completed. */
+ /* 421 Service not available, closing control connection. */
+ /* 425 Can't open data connection. */
+ /* 426 Connection closed; transfer aborted. */
+ /* 450 Requested file action not taken. */
+ /* 451 Requested action aborted. Local error in processing. */
+ /* 500 Syntax error, command unrecognized. */
+ /* 501 Syntax error in parameters or arguments. */
+ /* 530 Not logged in. */
+ /* 550 Requested action not taken. */
+ if (rc == -1 || rc == 421) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+ if (rc == 550) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: RETR failed, trying LIST instead");
+
+ /* Directory Listings should always be fetched in ASCII mode */
+ dirlisting = 1;
+ ftp_set_TYPE('A', r, origin, bb, NULL);
+
+ rc = proxy_ftp_command(apr_pstrcat(p, "CWD ",
+ ftp_escape_globbingchars(p, path), CRLF, NULL),
+ r, origin, bb, &ftpmessage);
+ /* possible results: 250, 421, 500, 501, 502, 530, 550 */
+ /* 250 Requested file action okay, completed. */
+ /* 421 Service not available, closing control connection. */
+ /* 500 Syntax error, command unrecognized. */
+ /* 501 Syntax error in parameters or arguments. */
+ /* 502 Command not implemented. */
+ /* 530 Not logged in. */
+ /* 550 Requested action not taken. */
+ if (rc == -1 || rc == 421) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+ if (rc == 550) {
+ return ap_proxyerror(r, HTTP_NOT_FOUND, ftpmessage);
+ }
+ if (rc != 250) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, ftpmessage);
+ }
+
+ /* Update current directory after CWD */
+ cwd = ftp_get_PWD(r, origin, bb);
+ if (cwd != NULL) {
+ apr_table_set(r->notes, "Directory-PWD", cwd);
+ }
+
+ /* See above for the "LIST" vs. "LIST -lag" discussion. */
+ rc = proxy_ftp_command((cwd == NULL || strchr(cwd, '/') != NULL)
+ ? "LIST -lag" CRLF : "LIST" CRLF,
+ r, origin, bb, &ftpmessage);
+
+ /* rc is an intermediate response for the LIST command (125 transfer starting, 150 opening data connection) */
+ if (rc == -1 || rc == 421)
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+ if (rc != 125 && rc != 150 && rc != 226 && rc != 250) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY, ftpmessage);
+ }
+
+ r->status = HTTP_OK;
+ r->status_line = "200 OK";
+
+ apr_rfc822_date(dates, r->request_time);
+ apr_table_setn(r->headers_out, "Date", dates);
+ apr_table_setn(r->headers_out, "Server", ap_get_server_version());
+
+ /* set content-type */
+ if (dirlisting) {
+ ap_set_content_type(r, "text/html; charset=ISO-8859-1");
+ }
+ else {
+ if (r->content_type) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: Content-Type set to %s", r->content_type);
+ }
+ else {
+ ap_set_content_type(r, ap_default_type(r));
+ }
+ if (xfer_type != 'A' && size != NULL) {
+ /* We "trust" the ftp server to really serve (size) bytes... */
+ apr_table_setn(r->headers_out, "Content-Length", size);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: Content-Length set to %s", size);
+ }
+ }
+ apr_table_setn(r->headers_out, "Content-Type", r->content_type);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: Content-Type set to %s", r->content_type);
+
+#if defined(USE_MDTM) && (defined(HAVE_TIMEGM) || defined(HAVE_GMTOFF))
+ if (mtime != 0L) {
+ char datestr[APR_RFC822_DATE_LEN];
+ apr_rfc822_date(datestr, mtime);
+ apr_table_set(r->headers_out, "Last-Modified", datestr);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: Last-Modified set to %s", datestr);
+ }
+#endif /* USE_MDTM */
+
+ /* If an encoding has been set by mistake, delete it.
+ * @@@ FIXME (e.g., for ftp://user@host/file*.tar.gz,
+ * @@@ the encoding is currently set to x-gzip)
+ */
+ if (dirlisting && r->content_encoding != NULL)
+ r->content_encoding = NULL;
+
+ /* set content-encoding (not for dir listings, they are uncompressed)*/
+ if (r->content_encoding != NULL && r->content_encoding[0] != '\0') {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: Content-Encoding set to %s", r->content_encoding);
+ apr_table_setn(r->headers_out, "Content-Encoding", r->content_encoding);
+ }
+
+ /* wait for connection */
+ if (use_port) {
+ for (;;) {
+ rv = apr_accept(&data_sock, local_sock, r->pool);
+ if (rv == APR_EINTR) {
+ continue;
+ }
+ else if (rv == APR_SUCCESS) {
+ break;
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: FTP: failed to accept data connection");
+ return HTTP_BAD_GATEWAY;
+ }
+ }
+ }
+
+ /* the transfer socket is now open, create a new connection */
+ data = ap_run_create_connection(p, r->server, data_sock, r->connection->id,
+ r->connection->sbh, c->bucket_alloc);
+ if (!data) {
+ /*
+ * the peer reset the connection already; ap_run_create_connection() closed
+ * the socket
+ */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: an error occurred creating the transfer connection");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* set up the connection filters */
+ rc = ap_run_pre_connection(data, data_sock);
+ if (rc != OK && rc != DONE) {
+ data->aborted = 1;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: pre_connection setup failed (%d)",
+ rc);
+ return rc;
+ }
+
+ /*
+ * VI: Receive the Response ------------------------
+ *
+ * Get response from the remote ftp socket, and pass it up the filter chain.
+ */
+
+ /* send response */
+ r->sent_bodyct = 1;
+
+ if (dirlisting) {
+ /* insert directory filter */
+ ap_add_output_filter("PROXY_SEND_DIR", NULL, r, r->connection);
+ }
+
+ /* send body */
+ if (!r->header_only) {
+ apr_bucket *e;
+ int finish = FALSE;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: start body send");
+
+ /* read the body, pass it to the output filters */
+ while (ap_get_brigade(data->input_filters,
+ bb,
+ AP_MODE_READBYTES,
+ APR_BLOCK_READ,
+ conf->io_buffer_size) == APR_SUCCESS) {
+#if DEBUGGING
+ {
+ apr_off_t readbytes;
+ apr_brigade_length(bb, 0, &readbytes);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
+ r->server, "proxy (PID %d): readbytes: %#x",
+ getpid(), readbytes);
+ }
+#endif
+ /* sanity check */
+ if (APR_BRIGADE_EMPTY(bb)) {
+ apr_brigade_cleanup(bb);
+ break;
+ }
+
+ /* found the last brigade? */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
+ /* if this is the last brigade, cleanup the
+ * backend connection first to prevent the
+ * backend server from hanging around waiting
+ * for a slow client to eat these bytes
+ */
+ ap_flush_conn(data);
+ if (data_sock) {
+ apr_socket_close(data_sock);
+ }
+ data_sock = NULL;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: data connection closed");
+ /* signal that we must leave */
+ finish = TRUE;
+ }
+
+ /* if no EOS yet, then we must flush */
+ if (FALSE == finish) {
+ e = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ }
+
+ /* try send what we read */
+ if (ap_pass_brigade(r->output_filters, bb) != APR_SUCCESS
+ || c->aborted) {
+ /* Ack! Phbtt! Die! User aborted! */
+ finish = TRUE;
+ }
+
+ /* make sure we always clean up after ourselves */
+ apr_brigade_cleanup(bb);
+
+ /* if we are done, leave */
+ if (TRUE == finish) {
+ break;
+ }
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: end body send");
+
+ }
+ if (data_sock) {
+ ap_flush_conn(data);
+ apr_socket_close(data_sock);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: FTP: data connection closed");
+ }
+
+ /* Retrieve the final response for the RETR or LIST commands */
+ rc = proxy_ftp_command(NULL, r, origin, bb, &ftpmessage);
+ apr_brigade_cleanup(bb);
+
+ /*
+ * VII: Clean Up -------------
+ *
+ * If there are no KeepAlives, or if the connection has been signalled to
+ * close, close the socket and clean up
+ */
+
+ /* finish */
+ rc = proxy_ftp_command("QUIT" CRLF,
+ r, origin, bb, &ftpmessage);
+ /* responses: 221, 500 */
+ /* 221 Service closing control connection. */
+ /* 500 Syntax error, command unrecognized. */
+ ap_flush_conn(origin);
+ if (origin_sock) {
+ apr_socket_close(origin_sock);
+ origin_sock = NULL;
+ }
+ apr_brigade_destroy(bb);
+ return OK;
+}
+
+static void ap_proxy_ftp_register_hook(apr_pool_t *p)
+{
+ /* hooks */
+ proxy_hook_scheme_handler(ap_proxy_ftp_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ proxy_hook_canon_handler(ap_proxy_ftp_canon, NULL, NULL, APR_HOOK_MIDDLE);
+ /* filters */
+ ap_register_output_filter("PROXY_SEND_DIR", ap_proxy_send_dir_filter,
+ NULL, AP_FTYPE_RESOURCE);
+}
+
+module AP_MODULE_DECLARE_DATA proxy_ftp_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ NULL, /* command apr_table_t */
+ ap_proxy_ftp_register_hook /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/proxy_http.c b/rubbos/app/httpd-2.0.64/modules/proxy/proxy_http.c
new file mode 100644
index 00000000..ca5f038b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/proxy_http.c
@@ -0,0 +1,1824 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* HTTP routines for Apache proxy */
+
+#include "mod_proxy.h"
+
+module AP_MODULE_DECLARE_DATA proxy_http_module;
+
+int ap_proxy_http_canon(request_rec *r, char *url);
+int ap_proxy_http_handler(request_rec *r, proxy_server_conf *conf,
+ char *url, const char *proxyname,
+ apr_port_t proxyport);
+
+typedef struct {
+ const char *name;
+ apr_port_t port;
+ apr_sockaddr_t *addr;
+ apr_socket_t *sock;
+ int close;
+} proxy_http_conn_t;
+
+static apr_status_t ap_proxy_http_cleanup(request_rec *r,
+ proxy_http_conn_t *p_conn,
+ proxy_conn_rec *backend);
+
+/*
+ * Canonicalise http-like URLs.
+ * scheme is the scheme for the URL
+ * url is the URL starting with the first '/'
+ * def_port is the default port for this scheme.
+ */
+int ap_proxy_http_canon(request_rec *r, char *url)
+{
+ char *host, *path, *search, sport[7];
+ const char *err;
+ const char *scheme;
+ apr_port_t port, def_port;
+
+ /* ap_port_of_scheme() */
+ if (strncasecmp(url, "http:", 5) == 0) {
+ url += 5;
+ scheme = "http";
+ }
+ else if (strncasecmp(url, "https:", 6) == 0) {
+ url += 6;
+ scheme = "https";
+ }
+ else {
+ return DECLINED;
+ }
+ def_port = apr_uri_port_of_scheme(scheme);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: HTTP: canonicalising URL %s", url);
+
+ /* do syntatic check.
+ * We break the URL into host, port, path, search
+ */
+ port = def_port;
+ err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port);
+ if (err) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "error parsing URL %s: %s",
+ url, err);
+ return HTTP_BAD_REQUEST;
+ }
+
+ /* now parse path/search args, according to rfc1738 */
+ /* N.B. if this isn't a true proxy request, then the URL _path_
+ * has already been decoded. True proxy requests have r->uri
+ * == r->unparsed_uri, and no others have that property.
+ */
+ if (r->uri == r->unparsed_uri) {
+ search = strchr(url, '?');
+ if (search != NULL)
+ *(search++) = '\0';
+ }
+ else
+ search = r->args;
+
+ /* process path */
+ path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, r->proxyreq);
+ if (path == NULL)
+ return HTTP_BAD_REQUEST;
+
+ if (port != def_port)
+ apr_snprintf(sport, sizeof(sport), ":%d", port);
+ else
+ sport[0] = '\0';
+
+ if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */
+ host = apr_pstrcat(r->pool, "[", host, "]", NULL);
+ }
+ r->filename = apr_pstrcat(r->pool, "proxy:", scheme, "://", host, sport,
+ "/", path, (search) ? "?" : "", (search) ? search : "", NULL);
+ return OK;
+}
+
+static const char *ap_proxy_location_reverse_map(request_rec *r, proxy_server_conf *conf, const char *url)
+{
+ struct proxy_alias *ent;
+ int i, l1, l2;
+ char *u;
+
+ /* XXX FIXME: Make sure this handled the ambiguous case of the :80
+ * after the hostname */
+
+ l1 = strlen(url);
+ ent = (struct proxy_alias *)conf->raliases->elts;
+ for (i = 0; i < conf->raliases->nelts; i++) {
+ l2 = strlen(ent[i].real);
+ if (l1 >= l2 && strncmp(ent[i].real, url, l2) == 0) {
+ u = apr_pstrcat(r->pool, ent[i].fake, &url[l2], NULL);
+ return ap_construct_url(r->pool, u, r);
+ }
+ }
+ return url;
+}
+
+/* Clear all connection-based headers from the incoming headers table */
+static void ap_proxy_clear_connection(apr_pool_t *p, apr_table_t *headers)
+{
+ const char *name;
+ char *next = apr_pstrdup(p, apr_table_get(headers, "Connection"));
+
+ apr_table_unset(headers, "Proxy-Connection");
+ if (!next)
+ return;
+
+ while (*next) {
+ name = next;
+ while (*next && !apr_isspace(*next) && (*next != ',')) {
+ ++next;
+ }
+ while (*next && (apr_isspace(*next) || (*next == ','))) {
+ *next = '\0';
+ ++next;
+ }
+ apr_table_unset(headers, name);
+ }
+ apr_table_unset(headers, "Connection");
+}
+
+static
+apr_status_t ap_proxy_http_determine_connection(apr_pool_t *p, request_rec *r,
+ proxy_http_conn_t *p_conn,
+ conn_rec *c,
+ proxy_server_conf *conf,
+ apr_uri_t *uri,
+ char **url,
+ const char *proxyname,
+ apr_port_t proxyport,
+ char *server_portstr,
+ int server_portstr_size) {
+ int server_port;
+ apr_status_t err;
+ apr_sockaddr_t *uri_addr;
+ /*
+ * Break up the URL to determine the host to connect to
+ */
+
+ /* we break the URL into host, port, uri */
+ if (APR_SUCCESS != apr_uri_parse(p, *url, uri)) {
+ return ap_proxyerror(r, HTTP_BAD_REQUEST,
+ apr_pstrcat(p,"URI cannot be parsed: ", *url,
+ NULL));
+ }
+ if (!uri->port) {
+ uri->port = apr_uri_port_of_scheme(uri->scheme);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: HTTP connecting %s to %s:%d", *url, uri->hostname,
+ uri->port);
+
+ /* do a DNS lookup for the destination host */
+ /* see memory note above */
+ err = apr_sockaddr_info_get(&uri_addr, apr_pstrdup(c->pool, uri->hostname),
+ APR_UNSPEC, uri->port, 0, c->pool);
+
+ /* allocate these out of the connection pool - the check on
+ * r->connection->id makes sure that this string does not get accessed
+ * past the connection lifetime */
+ /* are we connecting directly, or via a proxy? */
+ if (proxyname) {
+ p_conn->name = apr_pstrdup(c->pool, proxyname);
+ p_conn->port = proxyport;
+ /* see memory note above */
+ err = apr_sockaddr_info_get(&p_conn->addr, p_conn->name, APR_UNSPEC,
+ p_conn->port, 0, c->pool);
+ } else {
+ p_conn->name = apr_pstrdup(c->pool, uri->hostname);
+ p_conn->port = uri->port;
+ p_conn->addr = uri_addr;
+ *url = apr_pstrcat(p, uri->path, uri->query ? "?" : "",
+ uri->query ? uri->query : "",
+ uri->fragment ? "#" : "",
+ uri->fragment ? uri->fragment : "", NULL);
+ }
+
+ if (err != APR_SUCCESS) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ apr_pstrcat(p, "DNS lookup failure for: ",
+ p_conn->name, NULL));
+ }
+
+ /* Get the server port for the Via headers */
+ {
+ server_port = ap_get_server_port(r);
+ if (ap_is_default_port(server_port, r)) {
+ strcpy(server_portstr,"");
+ } else {
+ apr_snprintf(server_portstr, server_portstr_size, ":%d",
+ server_port);
+ }
+ }
+
+ /* check if ProxyBlock directive on this host */
+ if (OK != ap_proxy_checkproxyblock(r, conf, uri_addr)) {
+ return ap_proxyerror(r, HTTP_FORBIDDEN,
+ "Connect to remote machine blocked");
+ }
+ return OK;
+}
+
+static
+apr_status_t ap_proxy_http_create_connection(apr_pool_t *p, request_rec *r,
+ proxy_http_conn_t *p_conn,
+ conn_rec *c, conn_rec **origin,
+ proxy_conn_rec *backend,
+ proxy_server_conf *conf,
+ const char *proxyname) {
+ int failed=0, new=0;
+ apr_socket_t *client_socket = NULL;
+
+ /* We have determined who to connect to. Now make the connection, supporting
+ * a KeepAlive connection.
+ */
+
+ /* get all the possible IP addresses for the destname and loop through them
+ * until we get a successful connection
+ */
+
+ /* if a keepalive socket is already open, check whether it must stay
+ * open, or whether it should be closed and a new socket created.
+ */
+ /* see memory note above */
+ if (backend->connection) {
+ client_socket = ap_get_module_config(backend->connection->conn_config, &core_module);
+ if ((backend->connection->id == c->id) &&
+ (backend->port == p_conn->port) &&
+ (backend->hostname) &&
+ (!apr_strnatcasecmp(backend->hostname, p_conn->name))) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: keepalive address match (keep original socket)");
+ } else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: keepalive address mismatch / connection has"
+ " changed (close old socket (%s/%s, %d/%d))",
+ p_conn->name, backend->hostname, p_conn->port,
+ backend->port);
+ apr_socket_close(client_socket);
+ backend->connection = NULL;
+ }
+ }
+
+ /* get a socket - either a keepalive one, or a new one */
+ new = 1;
+ if ((backend->connection) && (backend->connection->id == c->id)) {
+ apr_size_t buffer_len = 1;
+ char test_buffer[1];
+ apr_status_t socket_status;
+ apr_interval_time_t current_timeout;
+
+ /* use previous keepalive socket */
+ *origin = backend->connection;
+ p_conn->sock = client_socket;
+ new = 0;
+
+ /* save timeout */
+ apr_socket_timeout_get(p_conn->sock, &current_timeout);
+ /* set no timeout */
+ apr_socket_timeout_set(p_conn->sock, 0);
+ socket_status = apr_recv(p_conn->sock, test_buffer, &buffer_len);
+ /* put back old timeout */
+ apr_socket_timeout_set(p_conn->sock, current_timeout);
+ if ( APR_STATUS_IS_EOF(socket_status) ) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL,
+ "proxy: previous connection is closed, creating a new connection.");
+ new = 1;
+ }
+ }
+ if (new) {
+ int rc;
+
+ /* create a new socket */
+ backend->connection = NULL;
+
+ /*
+ * At this point we have a list of one or more IP addresses of
+ * the machine to connect to. If configured, reorder this
+ * list so that the "best candidate" is first try. "best
+ * candidate" could mean the least loaded server, the fastest
+ * responding server, whatever.
+ *
+ * For now we do nothing, ie we get DNS round robin.
+ * XXX FIXME
+ */
+ failed = ap_proxy_connect_to_backend(&p_conn->sock, "HTTP",
+ p_conn->addr, p_conn->name,
+ conf, r->server, c->pool);
+
+ /* handle a permanent error on the connect */
+ if (failed) {
+ if (proxyname) {
+ return DECLINED;
+ } else {
+ return HTTP_BAD_GATEWAY;
+ }
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: socket is connected");
+
+ /* the socket is now open, create a new backend server connection */
+ *origin = ap_run_create_connection(c->pool, r->server, p_conn->sock,
+ r->connection->id,
+ r->connection->sbh, c->bucket_alloc);
+ if (!*origin) {
+ /* the peer reset the connection already; ap_run_create_connection()
+ * closed the socket
+ */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
+ r->server, "proxy: an error occurred creating a "
+ "new connection to %pI (%s)", p_conn->addr,
+ p_conn->name);
+ apr_socket_close(p_conn->sock);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ backend->connection = *origin;
+ backend->hostname = apr_pstrdup(c->pool, p_conn->name);
+ backend->port = p_conn->port;
+
+ if (backend->is_ssl) {
+ if (!ap_proxy_ssl_enable(backend->connection)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0,
+ r->server, "proxy: failed to enable ssl support "
+ "for %pI (%s)", p_conn->addr, p_conn->name);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+ else {
+ ap_proxy_ssl_disable(backend->connection);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: connection complete to %pI (%s)",
+ p_conn->addr, p_conn->name);
+
+ /* set up the connection filters */
+ rc = ap_run_pre_connection(*origin, p_conn->sock);
+ if (rc != OK && rc != DONE) {
+ (*origin)->aborted = 1;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: HTTP: pre_connection setup failed (%d)",
+ rc);
+ return rc;
+ }
+ }
+ return OK;
+}
+
+static void add_te_chunked(apr_pool_t *p,
+ apr_bucket_alloc_t *bucket_alloc,
+ apr_bucket_brigade *header_brigade)
+{
+ apr_bucket *e;
+ char *buf;
+ const char te_hdr[] = "Transfer-Encoding: chunked" CRLF;
+
+ buf = apr_pmemdup(p, te_hdr, sizeof(te_hdr)-1);
+ ap_xlate_proto_to_ascii(buf, sizeof(te_hdr)-1);
+
+ e = apr_bucket_pool_create(buf, sizeof(te_hdr)-1, p, bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+}
+
+static void add_cl(apr_pool_t *p,
+ apr_bucket_alloc_t *bucket_alloc,
+ apr_bucket_brigade *header_brigade,
+ const char *cl_val)
+{
+ apr_bucket *e;
+ char *buf;
+
+ buf = apr_pstrcat(p, "Content-Length: ",
+ cl_val,
+ CRLF,
+ NULL);
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+}
+
+#define ASCII_CRLF "\015\012"
+#define ASCII_ZERO "\060"
+
+static void terminate_headers(apr_bucket_alloc_t *bucket_alloc,
+ apr_bucket_brigade *header_brigade)
+{
+ apr_bucket *e;
+
+ /* add empty line at the end of the headers */
+ e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+}
+
+static apr_status_t pass_brigade(apr_bucket_alloc_t *bucket_alloc,
+ request_rec *r, proxy_http_conn_t *p_conn,
+ conn_rec *origin, apr_bucket_brigade *bb,
+ int flush)
+{
+ apr_status_t status;
+
+ if (flush) {
+ apr_bucket *e = apr_bucket_flush_create(bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ }
+ status = ap_pass_brigade(origin->output_filters, bb);
+ if (status != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
+ "proxy: pass request body failed to %pI (%s)",
+ p_conn->addr, p_conn->name);
+ return status;
+ }
+ apr_brigade_cleanup(bb);
+ return APR_SUCCESS;
+}
+
+static apr_status_t stream_reqbody_chunked(apr_pool_t *p,
+ request_rec *r,
+ proxy_http_conn_t *p_conn,
+ conn_rec *origin,
+ apr_bucket_brigade *header_brigade,
+ apr_bucket_brigade *input_brigade)
+{
+ int seen_eos = 0;
+ apr_size_t hdr_len;
+ apr_off_t bytes;
+ apr_status_t status;
+ apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc;
+ apr_bucket_brigade *bb;
+ apr_bucket *e;
+
+ add_te_chunked(p, bucket_alloc, header_brigade);
+ terminate_headers(bucket_alloc, header_brigade);
+
+ while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade)))
+ {
+ char chunk_hdr[20]; /* must be here due to transient bucket. */
+
+ /* If this brigade contains EOS, either stop or remove it. */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
+ seen_eos = 1;
+
+ /* We can't pass this EOS to the output_filters. */
+ e = APR_BRIGADE_LAST(input_brigade);
+ apr_bucket_delete(e);
+ }
+
+ apr_brigade_length(input_brigade, 1, &bytes);
+
+ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr),
+ "%" APR_UINT64_T_HEX_FMT CRLF,
+ (apr_uint64_t)bytes);
+
+ ap_xlate_proto_to_ascii(chunk_hdr, hdr_len);
+ e = apr_bucket_transient_create(chunk_hdr, hdr_len,
+ bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(input_brigade, e);
+
+ /*
+ * Append the end-of-chunk CRLF
+ */
+ e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(input_brigade, e);
+
+ if (header_brigade) {
+ /* we never sent the header brigade, so go ahead and
+ * take care of that now
+ */
+ bb = header_brigade;
+
+ /*
+ * Save input_brigade in bb brigade. (At least) in the SSL case
+ * input_brigade contains transient buckets whose data would get
+ * overwritten during the next call of ap_get_brigade in the loop.
+ * ap_save_brigade ensures these buckets to be set aside.
+ * Calling ap_save_brigade with NULL as filter is OK, because
+ * bb brigade already has been created and does not need to get
+ * created by ap_save_brigade.
+ */
+ status = ap_save_brigade(NULL, &bb, &input_brigade, p);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ header_brigade = NULL;
+ }
+ else {
+ bb = input_brigade;
+ }
+
+ /* The request is flushed below this loop with chunk EOS header */
+ status = pass_brigade(bucket_alloc, r, p_conn, origin, bb, 0);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ if (seen_eos) {
+ break;
+ }
+
+ status = ap_get_brigade(r->input_filters, input_brigade,
+ AP_MODE_READBYTES, APR_BLOCK_READ,
+ HUGE_STRING_LEN);
+
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ if (header_brigade) {
+ /* we never sent the header brigade because there was no request body;
+ * send it now
+ */
+ bb = header_brigade;
+ }
+ else {
+ if (!APR_BRIGADE_EMPTY(input_brigade)) {
+ /* input brigade still has an EOS which we can't pass to the output_filters. */
+ e = APR_BRIGADE_LAST(input_brigade);
+ AP_DEBUG_ASSERT(APR_BUCKET_IS_EOS(e));
+ apr_bucket_delete(e);
+ }
+ bb = input_brigade;
+ }
+
+ e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF
+ /* <trailers> */
+ ASCII_CRLF,
+ 5, bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+
+ /* Now we have headers-only, or the chunk EOS mark; flush it */
+ status = pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1);
+ return status;
+}
+
+static apr_status_t stream_reqbody_cl(apr_pool_t *p,
+ request_rec *r,
+ proxy_http_conn_t *p_conn,
+ conn_rec *origin,
+ apr_bucket_brigade *header_brigade,
+ apr_bucket_brigade *input_brigade,
+ const char *old_cl_val)
+{
+ int seen_eos = 0;
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc;
+ apr_bucket_brigade *bb;
+ apr_bucket *e;
+ apr_off_t cl_val = 0;
+ apr_off_t bytes;
+ apr_off_t bytes_streamed = 0;
+
+ if (old_cl_val) {
+ add_cl(p, bucket_alloc, header_brigade, old_cl_val);
+ cl_val = atol(old_cl_val);
+ }
+ terminate_headers(bucket_alloc, header_brigade);
+
+ while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade)))
+ {
+ apr_brigade_length(input_brigade, 1, &bytes);
+ bytes_streamed += bytes;
+
+ /* If this brigade contains EOS, either stop or remove it. */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
+ seen_eos = 1;
+
+ /* We can't pass this EOS to the output_filters. */
+ e = APR_BRIGADE_LAST(input_brigade);
+ apr_bucket_delete(e);
+ }
+
+ /* C-L < bytes streamed?!?
+ * We will error out after the body is completely
+ * consumed, but we can't stream more bytes at the
+ * back end since they would in part be interpreted
+ * as another request! If nothing is sent, then
+ * just send nothing.
+ *
+ * Prevents HTTP Response Splitting.
+ */
+ if (bytes_streamed > cl_val)
+ continue;
+
+ if (header_brigade) {
+ /* we never sent the header brigade, so go ahead and
+ * take care of that now
+ */
+ bb = header_brigade;
+
+ /*
+ * Save input_brigade in bb brigade. (At least) in the SSL case
+ * input_brigade contains transient buckets whose data would get
+ * overwritten during the next call of ap_get_brigade in the loop.
+ * ap_save_brigade ensures these buckets to be set aside.
+ * Calling ap_save_brigade with NULL as filter is OK, because
+ * bb brigade already has been created and does not need to get
+ * created by ap_save_brigade.
+ */
+ status = ap_save_brigade(NULL, &bb, &input_brigade, p);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ header_brigade = NULL;
+ }
+ else {
+ bb = input_brigade;
+ }
+
+ /* Once we hit EOS, we are ready to flush. */
+ status = pass_brigade(bucket_alloc, r, p_conn, origin, bb, seen_eos);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ if (seen_eos) {
+ break;
+ }
+
+ status = ap_get_brigade(r->input_filters, input_brigade,
+ AP_MODE_READBYTES, APR_BLOCK_READ,
+ HUGE_STRING_LEN);
+
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ if (bytes_streamed != cl_val) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "proxy: client %s given Content-Length did not match"
+ " number of body bytes read", r->connection->remote_ip);
+ return APR_EOF;
+ }
+
+ if (header_brigade) {
+ /* we never sent the header brigade since there was no request
+ * body; send it now with the flush flag
+ */
+ bb = header_brigade;
+ status = pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1);
+ }
+ return status;
+}
+
+#define MAX_MEM_SPOOL 16384
+
+static apr_status_t spool_reqbody_cl(apr_pool_t *p,
+ request_rec *r,
+ proxy_http_conn_t *p_conn,
+ conn_rec *origin,
+ apr_bucket_brigade *header_brigade,
+ apr_bucket_brigade *input_brigade,
+ int force_cl)
+{
+ int seen_eos = 0;
+ apr_status_t status;
+ apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc;
+ apr_bucket_brigade *body_brigade;
+ apr_bucket *e;
+ apr_off_t bytes, bytes_spooled = 0, fsize = 0;
+ apr_file_t *tmpfile = NULL;
+
+ body_brigade = apr_brigade_create(p, bucket_alloc);
+
+ while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade)))
+ {
+ /* If this brigade contains EOS, either stop or remove it. */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
+ seen_eos = 1;
+
+ /* We can't pass this EOS to the output_filters. */
+ e = APR_BRIGADE_LAST(input_brigade);
+ apr_bucket_delete(e);
+ }
+
+ apr_brigade_length(input_brigade, 1, &bytes);
+
+ if (bytes_spooled + bytes > MAX_MEM_SPOOL) {
+ /* can't spool any more in memory; write latest brigade to disk */
+ if (tmpfile == NULL) {
+ const char *temp_dir;
+ char *template;
+
+ status = apr_temp_dir_get(&temp_dir, p);
+ if (status != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
+ "proxy: search for temporary directory failed");
+ return status;
+ }
+ apr_filepath_merge(&template, temp_dir,
+ "modproxy.tmp.XXXXXX",
+ APR_FILEPATH_NATIVE, p);
+ status = apr_file_mktemp(&tmpfile, template, 0, p);
+ if (status != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
+ "proxy: creation of temporary file in directory %s failed",
+ temp_dir);
+ return status;
+ }
+ }
+ for (e = APR_BRIGADE_FIRST(input_brigade);
+ e != APR_BRIGADE_SENTINEL(input_brigade);
+ e = APR_BUCKET_NEXT(e)) {
+ const char *data;
+ apr_size_t bytes_read, bytes_written;
+
+ apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ);
+ status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written);
+ if (status != APR_SUCCESS) {
+ const char *tmpfile_name;
+
+ if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) {
+ tmpfile_name = "(unknown)";
+ }
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
+ "proxy: write to temporary file %s failed",
+ tmpfile_name);
+ return status;
+ }
+ AP_DEBUG_ASSERT(bytes_read == bytes_written);
+ fsize += bytes_written;
+ }
+ apr_brigade_cleanup(input_brigade);
+ }
+ else {
+
+ /*
+ * Save input_brigade in body_brigade. (At least) in the SSL case
+ * input_brigade contains transient buckets whose data would get
+ * overwritten during the next call of ap_get_brigade in the loop.
+ * ap_save_brigade ensures these buckets to be set aside.
+ * Calling ap_save_brigade with NULL as filter is OK, because
+ * body_brigade already has been created and does not need to get
+ * created by ap_save_brigade.
+ */
+ status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ }
+
+ bytes_spooled += bytes;
+
+ if (seen_eos) {
+ break;
+ }
+
+ status = ap_get_brigade(r->input_filters, input_brigade,
+ AP_MODE_READBYTES, APR_BLOCK_READ,
+ HUGE_STRING_LEN);
+
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ if (bytes_spooled || force_cl) {
+ add_cl(p, bucket_alloc, header_brigade, apr_off_t_toa(p, bytes_spooled));
+ }
+ terminate_headers(bucket_alloc, header_brigade);
+ APR_BRIGADE_CONCAT(header_brigade, body_brigade);
+ if (tmpfile) {
+ /* For platforms where the size of the file may be larger than
+ * that which can be stored in a single bucket (where the
+ * length field is an apr_size_t), split it into several
+ * buckets: */
+ if (sizeof(apr_off_t) > sizeof(apr_size_t)
+ && fsize > AP_MAX_SENDFILE) {
+ e = apr_bucket_file_create(tmpfile, 0, AP_MAX_SENDFILE, p,
+ bucket_alloc);
+ while (fsize > AP_MAX_SENDFILE) {
+ apr_bucket *ce;
+ apr_bucket_copy(e, &ce);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, ce);
+ e->start += AP_MAX_SENDFILE;
+ fsize -= AP_MAX_SENDFILE;
+ }
+ e->length = (apr_size_t)fsize; /* Resize just the last bucket */
+ }
+ else {
+ e = apr_bucket_file_create(tmpfile, 0, (apr_size_t)fsize, p,
+ bucket_alloc);
+ }
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+ }
+ /* This is all a single brigade, pass with flush flagged */
+ status = pass_brigade(bucket_alloc, r, p_conn, origin, header_brigade, 1);
+ return status;
+}
+
+static
+apr_status_t ap_proxy_http_request(apr_pool_t *p, request_rec *r,
+ proxy_http_conn_t *p_conn, conn_rec *origin,
+ proxy_server_conf *conf,
+ apr_uri_t *uri,
+ char *url,
+ apr_bucket_brigade *header_brigade,
+ char *server_portstr)
+{
+ conn_rec *c = r->connection;
+ apr_bucket_alloc_t *bucket_alloc = c->bucket_alloc;
+ apr_bucket_brigade *input_brigade;
+ apr_bucket_brigade *temp_brigade;
+ apr_bucket *e;
+ char *buf;
+ const apr_array_header_t *headers_in_array;
+ const apr_table_entry_t *headers_in;
+ int counter;
+ apr_status_t status;
+ enum rb_methods {RB_INIT, RB_STREAM_CL, RB_STREAM_CHUNKED, RB_SPOOL_CL};
+ enum rb_methods rb_method = RB_INIT;
+ const char *old_cl_val = NULL;
+ const char *old_te_val = NULL;
+ apr_off_t bytes_read = 0;
+ apr_off_t bytes;
+ int force10;
+
+ /*
+ * Send the HTTP/1.1 request to the remote server
+ */
+
+ /* strip connection listed hop-by-hop headers from the request */
+ /* even though in theory a connection: close coming from the client
+ * should not affect the connection to the server, it's unlikely
+ * that subsequent client requests will hit this thread/process,
+ * so we cancel server keepalive if the client does.
+ */
+ if (ap_proxy_liststr(apr_table_get(r->headers_in,
+ "Connection"), "close")) {
+ p_conn->close++;
+ /* XXX: we are abusing r->headers_in rather than a copy,
+ * give the core output handler a clue the client would
+ * rather just close.
+ */
+ c->keepalive = AP_CONN_CLOSE;
+ }
+ ap_proxy_clear_connection(p, r->headers_in);
+
+ if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) {
+ buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.0" CRLF, NULL);
+ force10 = 1;
+ p_conn->close++;
+ } else {
+ buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.1" CRLF, NULL);
+ force10 = 0;
+ }
+ if (apr_table_get(r->subprocess_env, "proxy-nokeepalive")) {
+ origin->keepalive = AP_CONN_CLOSE;
+ p_conn->close++;
+ }
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+ if (conf->preserve_host == 0) {
+ if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) {
+ buf = apr_pstrcat(p, "Host: ", uri->hostname, ":", uri->port_str,
+ CRLF, NULL);
+ } else {
+ buf = apr_pstrcat(p, "Host: ", uri->hostname, CRLF, NULL);
+ }
+ }
+ else {
+ /* don't want to use r->hostname, as the incoming header might have a
+ * port attached
+ */
+ const char* hostname = apr_table_get(r->headers_in,"Host");
+ if (!hostname) {
+ hostname = r->server->server_hostname;
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
+ "proxy: no HTTP 0.9 request (with no host line) "
+ "on incoming request and preserve host set "
+ "forcing hostname to be %s for uri %s",
+ hostname,
+ r->uri );
+ }
+ buf = apr_pstrcat(p, "Host: ", hostname, CRLF, NULL);
+ }
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+
+ /* handle Via */
+ if (conf->viaopt == via_block) {
+ /* Block all outgoing Via: headers */
+ apr_table_unset(r->headers_in, "Via");
+ } else if (conf->viaopt != via_off) {
+ const char *server_name = ap_get_server_name(r);
+ /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host,
+ * then the server name returned by ap_get_server_name() is the
+ * origin server name (which does make too much sense with Via: headers)
+ * so we use the proxy vhost's name instead.
+ */
+ if (server_name == r->hostname)
+ server_name = r->server->server_hostname;
+ /* Create a "Via:" request header entry and merge it */
+ /* Generate outgoing Via: header with/without server comment: */
+ apr_table_mergen(r->headers_in, "Via",
+ (conf->viaopt == via_full)
+ ? apr_psprintf(p, "%d.%d %s%s (%s)",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name, server_portstr,
+ AP_SERVER_BASEVERSION)
+ : apr_psprintf(p, "%d.%d %s%s",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name, server_portstr)
+ );
+ }
+
+ /* X-Forwarded-*: handling
+ *
+ * XXX Privacy Note:
+ * -----------------
+ *
+ * These request headers are only really useful when the mod_proxy
+ * is used in a reverse proxy configuration, so that useful info
+ * about the client can be passed through the reverse proxy and on
+ * to the backend server, which may require the information to
+ * function properly.
+ *
+ * In a forward proxy situation, these options are a potential
+ * privacy violation, as information about clients behind the proxy
+ * are revealed to arbitrary servers out there on the internet.
+ *
+ * The HTTP/1.1 Via: header is designed for passing client
+ * information through proxies to a server, and should be used in
+ * a forward proxy configuation instead of X-Forwarded-*. See the
+ * ProxyVia option for details.
+ */
+
+ if (PROXYREQ_REVERSE == r->proxyreq) {
+ const char *buf;
+
+ /* Add X-Forwarded-For: so that the upstream has a chance to
+ * determine, where the original request came from.
+ */
+ apr_table_mergen(r->headers_in, "X-Forwarded-For",
+ r->connection->remote_ip);
+
+ /* Add X-Forwarded-Host: so that upstream knows what the
+ * original request hostname was.
+ */
+ if ((buf = apr_table_get(r->headers_in, "Host"))) {
+ apr_table_mergen(r->headers_in, "X-Forwarded-Host", buf);
+ }
+
+ /* Add X-Forwarded-Server: so that upstream knows what the
+ * name of this proxy server is (if there are more than one)
+ * XXX: This duplicates Via: - do we strictly need it?
+ */
+ apr_table_mergen(r->headers_in, "X-Forwarded-Server",
+ r->server->server_hostname);
+ }
+
+ /* send request headers */
+ proxy_run_fixups(r);
+ headers_in_array = apr_table_elts(r->headers_in);
+ headers_in = (const apr_table_entry_t *) headers_in_array->elts;
+ for (counter = 0; counter < headers_in_array->nelts; counter++) {
+ if (headers_in[counter].key == NULL
+ || headers_in[counter].val == NULL
+
+ /* Already sent */
+ || !strcasecmp(headers_in[counter].key, "Host")
+
+ /* Clear out hop-by-hop request headers not to send
+ * RFC2616 13.5.1 says we should strip these headers
+ */
+ || !strcasecmp(headers_in[counter].key, "Keep-Alive")
+ || !strcasecmp(headers_in[counter].key, "TE")
+ || !strcasecmp(headers_in[counter].key, "Trailer")
+ || !strcasecmp(headers_in[counter].key, "Upgrade")
+
+ /* XXX: @@@ FIXME: "Proxy-Authorization" should *only* be
+ * suppressed if THIS server requested the authentication,
+ * not when a frontend proxy requested it!
+ *
+ * The solution to this problem is probably to strip out
+ * the Proxy-Authorisation header in the authorisation
+ * code itself, not here. This saves us having to signal
+ * somehow whether this request was authenticated or not.
+ */
+ || !strcasecmp(headers_in[counter].key,"Proxy-Authorization")
+ || !strcasecmp(headers_in[counter].key,"Proxy-Authenticate")) {
+ continue;
+ }
+
+ /* Skip Transfer-Encoding and Content-Length for now.
+ */
+ if (!strcasecmp(headers_in[counter].key, "Transfer-Encoding")) {
+ old_te_val = headers_in[counter].val;
+ continue;
+ }
+ if (!strcasecmp(headers_in[counter].key, "Content-Length")) {
+ old_cl_val = headers_in[counter].val;
+ continue;
+ }
+
+ /* for sub-requests, ignore freshness/expiry headers */
+ if (r->main) {
+ if ( !strcasecmp(headers_in[counter].key, "If-Match")
+ || !strcasecmp(headers_in[counter].key, "If-Modified-Since")
+ || !strcasecmp(headers_in[counter].key, "If-Range")
+ || !strcasecmp(headers_in[counter].key, "If-Unmodified-Since")
+ || !strcasecmp(headers_in[counter].key, "If-None-Match")) {
+ continue;
+ }
+ }
+
+ buf = apr_pstrcat(p, headers_in[counter].key, ": ",
+ headers_in[counter].val, CRLF,
+ NULL);
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+ }
+
+ /* We have headers, let's figure out our request body... */
+ input_brigade = apr_brigade_create(p, bucket_alloc);
+
+ /* sub-requests never use keepalives, and mustn't pass request bodies.
+ * Because the new logic looks at input_brigade, we will self-terminate
+ * input_brigade and jump past all of the request body logic...
+ * Reading anything with ap_get_brigade is likely to consume the
+ * main request's body or read beyond EOS - which would be unplesant.
+ */
+ if (r->main) {
+ p_conn->close++;
+ if (old_cl_val) {
+ old_cl_val = NULL;
+ apr_table_unset(r->headers_in, "Content-Length");
+ }
+ if (old_te_val) {
+ old_te_val = NULL;
+ apr_table_unset(r->headers_in, "Transfer-Encoding");
+ }
+ rb_method = RB_STREAM_CL;
+ e = apr_bucket_eos_create(input_brigade->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(input_brigade, e);
+ goto skip_body;
+ }
+
+ /* WE only understand chunked. Other modules might inject
+ * (and therefore, decode) other flavors but we don't know
+ * that the can and have done so unless they they remove
+ * their decoding from the headers_in T-E list.
+ * XXX: Make this extensible, but in doing so, presume the
+ * encoding has been done by the extensions' handler, and
+ * do not modify add_te_chunked's logic
+ */
+ if (old_te_val && strcmp(old_te_val, "chunked") != 0) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "proxy: %s Transfer-Encoding is not supported",
+ old_te_val);
+ return APR_EINVAL;
+ }
+
+ if (old_cl_val && old_te_val) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_ENOTIMPL, r->server,
+ "proxy: client %s (%s) requested Transfer-Encoding body"
+ " with Content-Length (C-L ignored)",
+ c->remote_ip, c->remote_host ? c->remote_host: "");
+ apr_table_unset(r->headers_in, "Content-Length");
+ old_cl_val = NULL;
+ origin->keepalive = AP_CONN_CLOSE;
+ p_conn->close++;
+ }
+
+ /* Prefetch MAX_MEM_SPOOL bytes
+ *
+ * This helps us avoid any election of C-L v.s. T-E
+ * request bodies, since we are willing to keep in
+ * memory this much data, in any case. This gives
+ * us an instant C-L election if the body is of some
+ * reasonable size.
+ */
+ temp_brigade = apr_brigade_create(p, bucket_alloc);
+ do {
+ status = ap_get_brigade(r->input_filters, temp_brigade,
+ AP_MODE_READBYTES, APR_BLOCK_READ,
+ MAX_MEM_SPOOL - bytes_read);
+ if (status != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
+ "proxy: prefetch request body failed to %s"
+ " from %s (%s)",
+ p_conn->name ? p_conn->name: "",
+ c->remote_ip, c->remote_host ? c->remote_host: "");
+ return status;
+ }
+
+ apr_brigade_length(temp_brigade, 1, &bytes);
+ bytes_read += bytes;
+
+ /*
+ * Save temp_brigade in input_brigade. (At least) in the SSL case
+ * temp_brigade contains transient buckets whose data would get
+ * overwritten during the next call of ap_get_brigade in the loop.
+ * ap_save_brigade ensures these buckets to be set aside.
+ * Calling ap_save_brigade with NULL as filter is OK, because
+ * input_brigade already has been created and does not need to get
+ * created by ap_save_brigade.
+ */
+ status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p);
+ if (status != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
+ "proxy: processing prefetched request body failed"
+ " to %s from %s (%s)",
+ p_conn->name ? p_conn->name: "",
+ c->remote_ip, c->remote_host ? c->remote_host: "");
+ return status;
+ }
+
+ /* Ensure we don't hit a wall where we have a buffer too small
+ * for ap_get_brigade's filters to fetch us another bucket,
+ * surrender once we hit 80 bytes less than MAX_MEM_SPOOL
+ * (an arbitrary value.)
+ */
+ } while ((bytes_read < MAX_MEM_SPOOL - 80)
+ && !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)));
+
+ /* Use chunked request body encoding or send a content-length body?
+ *
+ * Prefer C-L when:
+ *
+ * We have no request body (handled by RB_STREAM_CL)
+ *
+ * We have a request body length <= MAX_MEM_SPOOL
+ *
+ * The administrator has setenv force-proxy-request-1.0
+ *
+ * The client sent a C-L body, and the administrator has
+ * not setenv proxy-sendchunked or has set setenv proxy-sendcl
+ *
+ * The client sent a T-E body, and the administrator has
+ * setenv proxy-sendcl, and not setenv proxy-sendchunked
+ *
+ * If both proxy-sendcl and proxy-sendchunked are set, the
+ * behavior is the same as if neither were set, large bodies
+ * that can't be read will be forwarded in their original
+ * form of C-L, or T-E.
+ *
+ * To ensure maximum compatibility, setenv proxy-sendcl
+ * To reduce server resource use, setenv proxy-sendchunked
+ *
+ * Then address specific servers with conditional setenv
+ * options to restore the default behavior where desireable.
+ *
+ * We have to compute content length by reading the entire request
+ * body; if request body is not small, we'll spool the remaining
+ * input to a temporary file. Chunked is always preferable.
+ *
+ * We can only trust the client-provided C-L if the T-E header
+ * is absent, and the filters are unchanged (the body won't
+ * be resized by another content filter).
+ */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
+ /* The whole thing fit, so our decision is trivial, use
+ * the filtered bytes read from the client for the request
+ * body Content-Length.
+ *
+ * If we expected no body, and read no body, do not set
+ * the Content-Length.
+ */
+ if (old_cl_val || old_te_val || bytes_read) {
+ old_cl_val = apr_off_t_toa(r->pool, bytes_read);
+ }
+ rb_method = RB_STREAM_CL;
+ }
+ else if (old_te_val) {
+ if (force10
+ || (apr_table_get(r->subprocess_env, "proxy-sendcl")
+ && !apr_table_get(r->subprocess_env, "proxy-sendchunks")
+ && !apr_table_get(r->subprocess_env, "proxy-sendchunked"))) {
+ rb_method = RB_SPOOL_CL;
+ }
+ else {
+ rb_method = RB_STREAM_CHUNKED;
+ }
+ }
+ else if (old_cl_val) {
+ if (r->input_filters == r->proto_input_filters) {
+ rb_method = RB_STREAM_CL;
+ }
+ else if (!force10
+ && (apr_table_get(r->subprocess_env, "proxy-sendchunks")
+ || apr_table_get(r->subprocess_env, "proxy-sendchunked"))
+ && !apr_table_get(r->subprocess_env, "proxy-sendcl")) {
+ rb_method = RB_STREAM_CHUNKED;
+ }
+ else {
+ rb_method = RB_SPOOL_CL;
+ }
+ }
+ else {
+ /* This is an appropriate default; very efficient for no-body
+ * requests, and has the behavior that it will not add any C-L
+ * when the old_cl_val is NULL.
+ */
+ rb_method = RB_SPOOL_CL;
+ }
+
+/* Yes I hate gotos. This is the subrequest shortcut */
+skip_body:
+ /* Handle Connection: header */
+ if (!force10 && p_conn->close) {
+ buf = apr_pstrdup(p, "Connection: close" CRLF);
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+ }
+
+ /* send the request body, if any. */
+ switch(rb_method) {
+ case RB_STREAM_CHUNKED:
+ status = stream_reqbody_chunked(p, r, p_conn, origin, header_brigade,
+ input_brigade);
+ break;
+ case RB_STREAM_CL:
+ status = stream_reqbody_cl(p, r, p_conn, origin, header_brigade,
+ input_brigade, old_cl_val);
+ break;
+ case RB_SPOOL_CL:
+ status = spool_reqbody_cl(p, r, p_conn, origin, header_brigade,
+ input_brigade, (old_cl_val != NULL)
+ || (old_te_val != NULL)
+ || (bytes_read > 0));
+ break;
+ default:
+ ap_assert(1 != 1);
+ break;
+ }
+
+ if (status != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
+ "proxy: pass request body failed to %pI (%s)"
+ " from %s (%s)",
+ p_conn->addr, p_conn->name ? p_conn->name: "",
+ c->remote_ip, c->remote_host ? c->remote_host: "");
+ return status;
+ }
+
+ return APR_SUCCESS;
+}
+
+static int addit_dammit(void *v, const char *key, const char *val)
+{
+ apr_table_addn(v, key, val);
+ return 1;
+}
+
+/*
+ * Limit the number of interim respones we sent back to the client. Otherwise
+ * we suffer from a memory build up. Besides there is NO sense in sending back
+ * an unlimited number of interim responses to the client. Thus if we cross
+ * this limit send back a 502 (Bad Gateway).
+ */
+#ifndef AP_MAX_INTERIM_RESPONSES
+#define AP_MAX_INTERIM_RESPONSES 10
+#endif
+
+static
+apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
+ proxy_http_conn_t *p_conn,
+ conn_rec *origin,
+ proxy_conn_rec *backend,
+ proxy_server_conf *conf,
+ apr_bucket_brigade *bb,
+ char *server_portstr) {
+ conn_rec *c = r->connection;
+ char buffer[HUGE_STRING_LEN];
+ const char *buf;
+ char keepchar;
+ request_rec *rp;
+ apr_bucket *e;
+ apr_table_t *save_table;
+ int len, backasswards;
+ int received_continue = 1; /* flag to indicate if we should
+ * loop over response parsing logic
+ * in the case that the origin told us
+ * to HTTP_CONTINUE
+ */
+
+ /* Get response from the remote server, and pass it up the
+ * filter chain
+ */
+
+ rp = ap_proxy_make_fake_req(origin, r);
+ /* In case anyone needs to know, this is a fake request that is really a
+ * response.
+ */
+ rp->proxyreq = PROXYREQ_RESPONSE;
+
+ while (received_continue && (received_continue <= AP_MAX_INTERIM_RESPONSES)) {
+ apr_brigade_cleanup(bb);
+
+ len = ap_getline(buffer, sizeof(buffer), rp, 0);
+ if (len == 0) {
+ /* handle one potential stray CRLF */
+ len = ap_getline(buffer, sizeof(buffer), rp, 0);
+ }
+ if (len <= 0) {
+ apr_socket_close(p_conn->sock);
+ backend->connection = NULL;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "proxy: error reading status line from remote "
+ "server %s", p_conn->name);
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Error reading from remote server");
+ }
+
+ /* Is it an HTTP/1 response?
+ * This is buggy if we ever see an HTTP/1.10
+ */
+ if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) {
+ int major, minor;
+
+ if (2 != sscanf(buffer, "HTTP/%u.%u", &major, &minor)) {
+ major = 1;
+ minor = 1;
+ }
+ /* If not an HTTP/1 message or
+ * if the status line was > 8192 bytes
+ */
+ else if ((buffer[5] != '1') || (len >= sizeof(buffer)-1)) {
+ apr_socket_close(p_conn->sock);
+ backend->connection = NULL;
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ apr_pstrcat(p, "Corrupt status line returned by remote "
+ "server: ", buffer, NULL));
+ }
+ backasswards = 0;
+
+ keepchar = buffer[12];
+ buffer[12] = '\0';
+ r->status = atoi(&buffer[9]);
+
+ if (keepchar != '\0') {
+ buffer[12] = keepchar;
+ } else {
+ /* 2616 requires the space in Status-Line; the origin
+ * server may have sent one but ap_rgetline_core will
+ * have stripped it. */
+ buffer[12] = ' ';
+ buffer[13] = '\0';
+ }
+ r->status_line = apr_pstrdup(p, &buffer[9]);
+
+ /* read the headers. */
+ /* N.B. for HTTP/1.0 clients, we have to fold line-wrapped headers*/
+ /* Also, take care with headers with multiple occurences. */
+
+ /* First, tuck away all already existing cookies */
+ save_table = apr_table_make(r->pool, 2);
+ apr_table_do(addit_dammit, save_table, r->headers_out,
+ "Set-Cookie", NULL);
+
+ r->headers_out = ap_proxy_read_headers(r, rp, buffer,
+ sizeof(buffer), origin);
+ if (r->headers_out == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0,
+ r->server, "proxy: bad HTTP/%d.%d header "
+ "returned by %s (%s)", major, minor, r->uri,
+ r->method);
+ p_conn->close += 1;
+ /*
+ * ap_send_error relies on a headers_out to be present. we
+ * are in a bad position here.. so force everything we send out
+ * to have nothing to do with the incoming packet
+ */
+ r->headers_out = apr_table_make(r->pool,1);
+ r->status = HTTP_BAD_GATEWAY;
+ r->status_line = "bad gateway";
+ return r->status;
+ }
+
+ /* Now, add in the just read cookies */
+ apr_table_do(addit_dammit, save_table, r->headers_out,
+ "Set-Cookie", NULL);
+
+ /* and now load 'em all in */
+ if (!apr_is_empty_table(save_table)) {
+ apr_table_unset(r->headers_out, "Set-Cookie");
+ r->headers_out = apr_table_overlay(r->pool,
+ r->headers_out,
+ save_table);
+ }
+
+ /* can't have both Content-Length and Transfer-Encoding */
+ if (apr_table_get(r->headers_out, "Transfer-Encoding")
+ && apr_table_get(r->headers_out, "Content-Length")) {
+ /* 2616 section 4.4, point 3: "if both Transfer-Encoding
+ * and Content-Length are received, the latter MUST be
+ * ignored"; so unset it here to prevent any confusion
+ * later. */
+ apr_table_unset(r->headers_out, "Content-Length");
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
+ r->server,
+ "proxy: server %s returned Transfer-Encoding and Content-Length",
+ p_conn->name);
+ p_conn->close += 1;
+ }
+
+ /* strip connection listed hop-by-hop headers from response */
+ p_conn->close += ap_proxy_liststr(apr_table_get(r->headers_out,
+ "Connection"),
+ "close");
+ ap_proxy_clear_connection(p, r->headers_out);
+ if ((buf = apr_table_get(r->headers_out, "Content-Type"))) {
+ ap_set_content_type(r, apr_pstrdup(p, buf));
+ }
+ if (!ap_is_HTTP_INFO(r->status)) {
+ ap_proxy_pre_http_request(origin, rp);
+ }
+
+ /* handle Via header in response */
+ if (conf->viaopt != via_off && conf->viaopt != via_block) {
+ const char *server_name = ap_get_server_name(r);
+ /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host,
+ * then the server name returned by ap_get_server_name() is the
+ * origin server name (which does make too much sense with Via: headers)
+ * so we use the proxy vhost's name instead.
+ */
+ if (server_name == r->hostname)
+ server_name = r->server->server_hostname;
+
+ /* create a "Via:" response header entry and merge it */
+ apr_table_mergen(r->headers_out, "Via",
+ (conf->viaopt == via_full)
+ ? apr_psprintf(p, "%d.%d %s%s (%s)",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name,
+ server_portstr,
+ AP_SERVER_BASEVERSION)
+ : apr_psprintf(p, "%d.%d %s%s",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name,
+ server_portstr)
+ );
+ }
+
+ /* cancel keepalive if HTTP/1.0 or less */
+ if ((major < 1) || (minor < 1)) {
+ p_conn->close += 1;
+ origin->keepalive = AP_CONN_CLOSE;
+ }
+ } else {
+ /* an http/0.9 response */
+ backasswards = 1;
+ r->status = 200;
+ r->status_line = "200 OK";
+ p_conn->close += 1;
+ }
+
+ if ( r->status != HTTP_CONTINUE ) {
+ received_continue = 0;
+ } else {
+ received_continue++;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL,
+ "proxy: HTTP: received 100 CONTINUE");
+ }
+
+ /* we must accept 3 kinds of date, but generate only 1 kind of date */
+ if ((buf = apr_table_get(r->headers_out, "Date")) != NULL) {
+ apr_table_set(r->headers_out, "Date",
+ ap_proxy_date_canon(p, buf));
+ }
+ if ((buf = apr_table_get(r->headers_out, "Expires")) != NULL) {
+ apr_table_set(r->headers_out, "Expires",
+ ap_proxy_date_canon(p, buf));
+ }
+ if ((buf = apr_table_get(r->headers_out, "Last-Modified")) != NULL) {
+ apr_table_set(r->headers_out, "Last-Modified",
+ ap_proxy_date_canon(p, buf));
+ }
+
+ /* munge the Location and URI response headers according to
+ * ProxyPassReverse
+ */
+ if ((buf = apr_table_get(r->headers_out, "Location")) != NULL) {
+ apr_table_set(r->headers_out, "Location",
+ ap_proxy_location_reverse_map(r, conf, buf));
+ }
+ if ((buf = apr_table_get(r->headers_out, "Content-Location")) != NULL) {
+ apr_table_set(r->headers_out, "Content-Location",
+ ap_proxy_location_reverse_map(r, conf, buf));
+ }
+ if ((buf = apr_table_get(r->headers_out, "URI")) != NULL) {
+ apr_table_set(r->headers_out, "URI",
+ ap_proxy_location_reverse_map(r, conf, buf));
+ }
+
+ if ((r->status == 401) && (conf->error_override != 0)) {
+ const char *wa = "WWW-Authenticate";
+ if ((buf = apr_table_get(r->headers_out, wa))) {
+ apr_table_set(r->err_headers_out, wa, buf);
+ } else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: origin server sent 401 without WWW-Authenticate header");
+ }
+ }
+
+ r->sent_bodyct = 1;
+ /* Is it an HTTP/0.9 response? If so, send the extra data */
+ if (backasswards) {
+ apr_ssize_t cntr = len;
+ e = apr_bucket_heap_create(buffer, cntr, NULL, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ }
+
+ /* send body - but only if a body is expected */
+ if ((!r->header_only) && /* not HEAD request */
+ (r->status > 199) && /* not any 1xx response */
+ (r->status != HTTP_NO_CONTENT) && /* not 204 */
+ (r->status != HTTP_RESET_CONTENT) && /* not 205 */
+ (r->status != HTTP_NOT_MODIFIED)) { /* not 304 */
+
+ /* We need to copy the output headers and treat them as input
+ * headers as well. BUT, we need to do this before we remove
+ * TE, so that they are preserved accordingly for
+ * ap_http_filter to know where to end.
+ */
+ rp->headers_in = apr_table_copy(r->pool, r->headers_out);
+
+ apr_table_unset(r->headers_out,"Transfer-Encoding");
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: start body send");
+
+ /*
+ * if we are overriding the errors, we can't put the content
+ * of the page into the brigade
+ */
+ if ( (conf->error_override ==0) || r->status < 400 ) {
+
+ /* read the body, pass it to the output filters */
+ int finish = FALSE;
+ while (ap_get_brigade(rp->input_filters,
+ bb,
+ AP_MODE_READBYTES,
+ APR_BLOCK_READ,
+ conf->io_buffer_size) == APR_SUCCESS) {
+#if DEBUGGING
+ {
+ apr_off_t readbytes;
+ apr_brigade_length(bb, 0, &readbytes);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
+ r->server, "proxy (PID %d): readbytes: %#x",
+ getpid(), readbytes);
+ }
+#endif
+ /* sanity check */
+ if (APR_BRIGADE_EMPTY(bb)) {
+ apr_brigade_cleanup(bb);
+ break;
+ }
+
+ /* found the last brigade? */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
+ /* if this is the last brigade, cleanup the
+ * backend connection first to prevent the
+ * backend server from hanging around waiting
+ * for a slow client to eat these bytes
+ */
+ ap_proxy_http_cleanup(r, p_conn, backend);
+ /* signal that we must leave */
+ finish = TRUE;
+ }
+
+ /* try send what we read */
+ if (ap_pass_brigade(r->output_filters, bb) != APR_SUCCESS
+ || c->aborted) {
+ /* Ack! Phbtt! Die! User aborted! */
+ p_conn->close = 1; /* this causes socket close below */
+ finish = TRUE;
+ }
+
+ /* make sure we always clean up after ourselves */
+ apr_brigade_cleanup(bb);
+
+ /* if we are done, leave */
+ if (TRUE == finish) {
+ break;
+ }
+ }
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: end body send");
+ } else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: header only");
+ }
+ }
+
+ /* See define of AP_MAX_INTERIM_RESPONSES for why */
+ if (received_continue > AP_MAX_INTERIM_RESPONSES) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ apr_psprintf(p,
+ "Too many (%d) interim responses from origin server",
+ received_continue));
+ }
+
+ if ( conf->error_override ) {
+ /* the code above this checks for 'OK' which is what the hook expects */
+ if ( r->status == HTTP_OK )
+ return OK;
+ else {
+ /* clear r->status for override error, otherwise ErrorDocument
+ * thinks that this is a recursive error, and doesn't find the
+ * custom error page
+ */
+ int status = r->status;
+ r->status = HTTP_OK;
+ /* Discard body, if one is expected */
+ if ((status > 199) && /* not any 1xx response */
+ (status != HTTP_NO_CONTENT) && /* not 204 */
+ (status != HTTP_RESET_CONTENT) && /* not 205 */
+ (status != HTTP_NOT_MODIFIED)) { /* not 304 */
+ ap_discard_request_body(rp);
+ }
+ return status;
+ }
+ } else
+ return OK;
+}
+
+static
+apr_status_t ap_proxy_http_cleanup(request_rec *r, proxy_http_conn_t *p_conn,
+ proxy_conn_rec *backend) {
+ /* If there are no KeepAlives, or if the connection has been signalled
+ * to close, close the socket and clean up
+ */
+
+ /* if the connection is < HTTP/1.1, or Connection: close,
+ * we close the socket, otherwise we leave it open for KeepAlive support
+ */
+ if (p_conn->close || (r->proto_num < HTTP_VERSION(1,1))) {
+ if (p_conn->sock) {
+ apr_socket_close(p_conn->sock);
+ p_conn->sock = NULL;
+ backend->connection = NULL;
+ }
+ }
+ return OK;
+}
+
+/*
+ * This handles http:// URLs, and other URLs using a remote proxy over http
+ * If proxyhost is NULL, then contact the server directly, otherwise
+ * go via the proxy.
+ * Note that if a proxy is used, then URLs other than http: can be accessed,
+ * also, if we have trouble which is clearly specific to the proxy, then
+ * we return DECLINED so that we can try another proxy. (Or the direct
+ * route.)
+ */
+int ap_proxy_http_handler(request_rec *r, proxy_server_conf *conf,
+ char *url, const char *proxyname,
+ apr_port_t proxyport)
+{
+ int status;
+ char server_portstr[32];
+ conn_rec *origin = NULL;
+ proxy_conn_rec *backend = NULL;
+ int is_ssl = 0;
+
+ /* Note: Memory pool allocation.
+ * A downstream keepalive connection is always connected to the existence
+ * (or not) of an upstream keepalive connection. If this is not done then
+ * load balancing against multiple backend servers breaks (one backend
+ * server ends up taking 100% of the load), and the risk is run of
+ * downstream keepalive connections being kept open unnecessarily. This
+ * keeps webservers busy and ties up resources.
+ *
+ * As a result, we allocate all sockets out of the upstream connection
+ * pool, and when we want to reuse a socket, we check first whether the
+ * connection ID of the current upstream connection is the same as that
+ * of the connection when the socket was opened.
+ */
+ apr_pool_t *p = r->connection->pool;
+ conn_rec *c = r->connection;
+ apr_bucket_brigade *bb = apr_brigade_create(p, c->bucket_alloc);
+ apr_uri_t *uri = apr_palloc(r->connection->pool, sizeof(*uri));
+ proxy_http_conn_t *p_conn = apr_pcalloc(r->connection->pool,
+ sizeof(*p_conn));
+
+ /* is it for us? */
+ if (strncasecmp(url, "https:", 6) == 0) {
+ if (!ap_proxy_ssl_enable(NULL)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: HTTPS: declining URL %s"
+ " (mod_ssl not configured?)", url);
+ return DECLINED;
+ }
+ is_ssl = 1;
+ }
+ else if (!(strncasecmp(url, "http:", 5)==0 || (strncasecmp(url, "ftp:", 4)==0 && proxyname))) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: HTTP: declining URL %s", url);
+ return DECLINED; /* only interested in HTTP, or FTP via proxy */
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: HTTP: serving URL %s", url);
+
+
+ /* only use stored info for top-level pages. Sub requests don't share
+ * in keepalives
+ */
+ if (!r->main) {
+ backend = (proxy_conn_rec *) ap_get_module_config(c->conn_config,
+ &proxy_http_module);
+ }
+ /* create space for state information */
+ if (!backend) {
+ backend = apr_pcalloc(c->pool, sizeof(proxy_conn_rec));
+ backend->connection = NULL;
+ backend->hostname = NULL;
+ backend->port = 0;
+ if (!r->main) {
+ ap_set_module_config(c->conn_config, &proxy_http_module, backend);
+ }
+ }
+
+ backend->is_ssl = is_ssl;
+
+ /* Step One: Determine Who To Connect To */
+ status = ap_proxy_http_determine_connection(p, r, p_conn, c, conf, uri,
+ &url, proxyname, proxyport,
+ server_portstr,
+ sizeof(server_portstr));
+ if ( status != OK ) {
+ return status;
+ }
+
+ /* Step Two: Make the Connection */
+ status = ap_proxy_http_create_connection(p, r, p_conn, c, &origin, backend,
+ conf, proxyname);
+ if ( status != OK ) {
+ return status;
+ }
+
+ /* Step Three: Send the Request */
+ status = ap_proxy_http_request(p, r, p_conn, origin, conf, uri, url, bb,
+ server_portstr);
+ if ( status != OK ) {
+ return status;
+ }
+
+ /* Step Four: Receive the Response */
+ status = ap_proxy_http_process_response(p, r, p_conn, origin, backend, conf,
+ bb, server_portstr);
+ if ( status != OK ) {
+ /* clean up even if there is an error */
+ ap_proxy_http_cleanup(r, p_conn, backend);
+ return status;
+ }
+
+ /* Step Five: Clean Up */
+ status = ap_proxy_http_cleanup(r, p_conn, backend);
+ if ( status != OK ) {
+ return status;
+ }
+
+ return OK;
+}
+
+static void ap_proxy_http_register_hook(apr_pool_t *p)
+{
+ proxy_hook_scheme_handler(ap_proxy_http_handler, NULL, NULL, APR_HOOK_FIRST);
+ proxy_hook_canon_handler(ap_proxy_http_canon, NULL, NULL, APR_HOOK_FIRST);
+}
+
+module AP_MODULE_DECLARE_DATA proxy_http_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ NULL, /* command apr_table_t */
+ ap_proxy_http_register_hook/* register hooks */
+};
+
diff --git a/rubbos/app/httpd-2.0.64/modules/proxy/proxy_util.c b/rubbos/app/httpd-2.0.64/modules/proxy/proxy_util.c
new file mode 100644
index 00000000..bab945f5
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/proxy/proxy_util.c
@@ -0,0 +1,1120 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Utility routines for Apache proxy */
+#include "mod_proxy.h"
+
+
+static int proxy_match_ipaddr(struct dirconn_entry *This, request_rec *r);
+static int proxy_match_domainname(struct dirconn_entry *This, request_rec *r);
+static int proxy_match_hostname(struct dirconn_entry *This, request_rec *r);
+static int proxy_match_word(struct dirconn_entry *This, request_rec *r);
+
+APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(proxy, PROXY, int, create_req,
+ (request_rec *r, request_rec *pr), (r, pr),
+ OK, DECLINED)
+
+/* already called in the knowledge that the characters are hex digits */
+PROXY_DECLARE(int) ap_proxy_hex2c(const char *x)
+{
+ int i, ch;
+
+#if !APR_CHARSET_EBCDIC
+ ch = x[0];
+ if (apr_isdigit(ch))
+ i = ch - '0';
+ else if (apr_isupper(ch))
+ i = ch - ('A' - 10);
+ else
+ i = ch - ('a' - 10);
+ i <<= 4;
+
+ ch = x[1];
+ if (apr_isdigit(ch))
+ i += ch - '0';
+ else if (apr_isupper(ch))
+ i += ch - ('A' - 10);
+ else
+ i += ch - ('a' - 10);
+ return i;
+#else /*APR_CHARSET_EBCDIC*/
+ /* we assume that the hex value refers to an ASCII character
+ * so convert to EBCDIC so that it makes sense locally;
+ *
+ * example:
+ *
+ * client specifies %20 in URL to refer to a space char;
+ * at this point we're called with EBCDIC "20"; after turning
+ * EBCDIC "20" into binary 0x20, we then need to assume that 0x20
+ * represents an ASCII char and convert 0x20 to EBCDIC, yielding
+ * 0x40
+ */
+ char buf[1];
+
+ if (1 == sscanf(x, "%2x", &i)) {
+ buf[0] = i & 0xFF;
+ ap_xlate_proto_from_ascii(buf, 1);
+ return buf[0];
+ }
+ else {
+ return 0;
+ }
+#endif /*APR_CHARSET_EBCDIC*/
+}
+
+PROXY_DECLARE(void) ap_proxy_c2hex(int ch, char *x)
+{
+#if !APR_CHARSET_EBCDIC
+ int i;
+
+ x[0] = '%';
+ i = (ch & 0xF0) >> 4;
+ if (i >= 10)
+ x[1] = ('A' - 10) + i;
+ else
+ x[1] = '0' + i;
+
+ i = ch & 0x0F;
+ if (i >= 10)
+ x[2] = ('A' - 10) + i;
+ else
+ x[2] = '0' + i;
+#else /*APR_CHARSET_EBCDIC*/
+ static const char ntoa[] = { "0123456789ABCDEF" };
+ char buf[1];
+
+ ch &= 0xFF;
+
+ buf[0] = ch;
+ ap_xlate_proto_to_ascii(buf, 1);
+
+ x[0] = '%';
+ x[1] = ntoa[(buf[0] >> 4) & 0x0F];
+ x[2] = ntoa[buf[0] & 0x0F];
+ x[3] = '\0';
+#endif /*APR_CHARSET_EBCDIC*/
+}
+
+/*
+ * canonicalise a URL-encoded string
+ */
+
+/*
+ * Convert a URL-encoded string to canonical form.
+ * It decodes characters which need not be encoded,
+ * and encodes those which must be encoded, and does not touch
+ * those which must not be touched.
+ */
+PROXY_DECLARE(char *)ap_proxy_canonenc(apr_pool_t *p, const char *x, int len, enum enctype t,
+ int isenc)
+{
+ int i, j, ch;
+ char *y;
+ char *allowed; /* characters which should not be encoded */
+ char *reserved; /* characters which much not be en/de-coded */
+
+/* N.B. in addition to :@&=, this allows ';' in an http path
+ * and '?' in an ftp path -- this may be revised
+ *
+ * Also, it makes a '+' character in a search string reserved, as
+ * it may be form-encoded. (Although RFC 1738 doesn't allow this -
+ * it only permits ; / ? : @ = & as reserved chars.)
+ */
+ if (t == enc_path)
+ allowed = "$-_.+!*'(),;:@&=";
+ else if (t == enc_search)
+ allowed = "$-_.!*'(),;:@&=";
+ else if (t == enc_user)
+ allowed = "$-_.+!*'(),;@&=";
+ else if (t == enc_fpath)
+ allowed = "$-_.+!*'(),?:@&=";
+ else /* if (t == enc_parm) */
+ allowed = "$-_.+!*'(),?/:@&=";
+
+ if (t == enc_path)
+ reserved = "/";
+ else if (t == enc_search)
+ reserved = "+";
+ else
+ reserved = "";
+
+ y = apr_palloc(p, 3 * len + 1);
+
+ for (i = 0, j = 0; i < len; i++, j++) {
+/* always handle '/' first */
+ ch = x[i];
+ if (strchr(reserved, ch)) {
+ y[j] = ch;
+ continue;
+ }
+/* decode it if not already done */
+ if (isenc && (isenc != PROXYREQ_REVERSE) && (ch == '%')) {
+ if (!apr_isxdigit(x[i + 1]) || !apr_isxdigit(x[i + 2]))
+ return NULL;
+ ch = ap_proxy_hex2c(&x[i + 1]);
+ i += 2;
+ if (ch != 0 && strchr(reserved, ch)) { /* keep it encoded */
+ ap_proxy_c2hex(ch, &y[j]);
+ j += 2;
+ continue;
+ }
+ }
+/* recode it, if necessary */
+ if (!apr_isalnum(ch) && !strchr(allowed, ch)) {
+ ap_proxy_c2hex(ch, &y[j]);
+ j += 2;
+ }
+ else
+ y[j] = ch;
+ }
+ y[j] = '\0';
+ return y;
+}
+
+/*
+ * Parses network-location.
+ * urlp on input the URL; on output the path, after the leading /
+ * user NULL if no user/password permitted
+ * password holder for password
+ * host holder for host
+ * port port number; only set if one is supplied.
+ *
+ * Returns an error string.
+ */
+PROXY_DECLARE(char *)
+ ap_proxy_canon_netloc(apr_pool_t *p, char **const urlp, char **userp,
+ char **passwordp, char **hostp, apr_port_t *port)
+{
+ char *addr, *scope_id, *strp, *host, *url = *urlp;
+ char *user = NULL, *password = NULL;
+ apr_port_t tmp_port;
+ apr_status_t rv;
+
+ if (url[0] != '/' || url[1] != '/')
+ return "Malformed URL";
+ host = url + 2;
+ url = strchr(host, '/');
+ if (url == NULL)
+ url = "";
+ else
+ *(url++) = '\0'; /* skip seperating '/' */
+
+ /* find _last_ '@' since it might occur in user/password part */
+ strp = strrchr(host, '@');
+
+ if (strp != NULL) {
+ *strp = '\0';
+ user = host;
+ host = strp + 1;
+
+/* find password */
+ strp = strchr(user, ':');
+ if (strp != NULL) {
+ *strp = '\0';
+ password = ap_proxy_canonenc(p, strp + 1, strlen(strp + 1), enc_user, 1);
+ if (password == NULL)
+ return "Bad %-escape in URL (password)";
+ }
+
+ user = ap_proxy_canonenc(p, user, strlen(user), enc_user, 1);
+ if (user == NULL)
+ return "Bad %-escape in URL (username)";
+ }
+ if (userp != NULL) {
+ *userp = user;
+ }
+ if (passwordp != NULL) {
+ *passwordp = password;
+ }
+
+ /* Parse the host string to separate host portion from optional port.
+ * Perform range checking on port.
+ */
+ rv = apr_parse_addr_port(&addr, &scope_id, &tmp_port, host, p);
+ if (rv != APR_SUCCESS || addr == NULL || scope_id != NULL) {
+ return "Invalid host/port";
+ }
+ if (tmp_port != 0) { /* only update caller's port if port was specified */
+ *port = tmp_port;
+ }
+
+ ap_str_tolower(addr); /* DNS names are case-insensitive */
+
+ *urlp = url;
+ *hostp = addr;
+
+ return NULL;
+}
+
+/*
+ * If the date is a valid RFC 850 date or asctime() date, then it
+ * is converted to the RFC 1123 format.
+ */
+PROXY_DECLARE(const char *)
+ ap_proxy_date_canon(apr_pool_t *p, const char *date)
+{
+ apr_status_t rv;
+ char* ndate;
+
+ apr_time_t time = apr_date_parse_http(date);
+ if (!time) {
+ return date;
+ }
+
+ ndate = apr_palloc(p, APR_RFC822_DATE_LEN);
+ rv = apr_rfc822_date(ndate, time);
+ if (rv != APR_SUCCESS) {
+ return date;
+ }
+
+ return ndate;
+}
+
+PROXY_DECLARE(request_rec *)ap_proxy_make_fake_req(conn_rec *c, request_rec *r)
+{
+ request_rec *rp = apr_pcalloc(c->pool, sizeof(*r));
+
+ rp->pool = c->pool;
+ rp->status = HTTP_OK;
+
+ rp->headers_in = apr_table_make(c->pool, 50);
+ rp->subprocess_env = apr_table_make(c->pool, 50);
+ rp->headers_out = apr_table_make(c->pool, 12);
+ rp->err_headers_out = apr_table_make(c->pool, 5);
+ rp->notes = apr_table_make(c->pool, 5);
+
+ rp->server = r->server;
+ rp->proxyreq = r->proxyreq;
+ rp->request_time = r->request_time;
+ rp->connection = c;
+ rp->output_filters = c->output_filters;
+ rp->input_filters = c->input_filters;
+ rp->proto_output_filters = c->output_filters;
+ rp->proto_input_filters = c->input_filters;
+
+ rp->request_config = ap_create_request_config(c->pool);
+ proxy_run_create_req(r, rp);
+
+ return rp;
+}
+
+/*
+ * Reads headers from a buffer and returns an array of headers.
+ * Returns NULL on file error
+ * This routine tries to deal with too long lines and continuation lines.
+ *
+ * Note: Currently the headers are passed through unmerged. This has to be
+ * done so that headers which react badly to merging (such as Set-Cookie
+ * headers, which contain commas within the date field) do not get stuffed
+ * up.
+ */
+PROXY_DECLARE(apr_table_t *)ap_proxy_read_headers(request_rec *r, request_rec *rr, char *buffer, int size, conn_rec *c)
+{
+ apr_table_t *headers_out;
+ int len;
+ char *value, *end;
+ char field[MAX_STRING_LEN];
+ int saw_headers = 0;
+ void *sconf = r->server->module_config;
+ proxy_server_conf *psc;
+
+ psc = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
+
+ headers_out = apr_table_make(r->pool, 20);
+
+ /*
+ * Read header lines until we get the empty separator line, a read error,
+ * the connection closes (EOF), or we timeout.
+ */
+ while ((len = ap_getline(buffer, size, rr, 1)) > 0) {
+
+ if (!(value = strchr(buffer, ':'))) { /* Find the colon separator */
+
+ /* We may encounter invalid headers, usually from buggy
+ * MS IIS servers, so we need to determine just how to handle
+ * them. We can either ignore them, assume that they mark the
+ * start-of-body (eg: a missing CRLF) or (the default) mark
+ * the headers as totally bogus and return a 500. The sole
+ * exception is an extra "HTTP/1.0 200, OK" line sprinkled
+ * in between the usual MIME headers, which is a favorite
+ * IIS bug.
+ */
+ /* XXX: The mask check is buggy if we ever see an HTTP/1.10 */
+
+ if (!apr_date_checkmask(buffer, "HTTP/#.# ###*")) {
+ if (psc->badopt == bad_error) {
+ /* Nope, it wasn't even an extra HTTP header. Give up. */
+ return NULL;
+ }
+ else if (psc->badopt == bad_body) {
+ /* if we've already started loading headers_out, then
+ * return what we've accumulated so far, in the hopes
+ * that they are useful. Otherwise, we completely bail.
+ */
+ /* FIXME: We've already scarfed the supposed 1st line of
+ * the body, so the actual content may end up being bogus
+ * as well. If the content is HTML, we may be lucky.
+ */
+ if (saw_headers) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, r->server,
+ "proxy: Starting body due to bogus non-header in headers "
+ "returned by %s (%s)", r->uri, r->method);
+ return headers_out;
+ } else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, r->server,
+ "proxy: No HTTP headers "
+ "returned by %s (%s)", r->uri, r->method);
+ return NULL;
+ }
+ }
+ }
+ /* this is the psc->badopt == bad_ignore case */
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, r->server,
+ "proxy: Ignoring bogus HTTP header "
+ "returned by %s (%s)", r->uri, r->method);
+ continue;
+ }
+
+ *value = '\0';
+ ++value;
+ /* XXX: RFC2068 defines only SP and HT as whitespace, this test is
+ * wrong... and so are many others probably.
+ */
+ while (apr_isspace(*value))
+ ++value; /* Skip to start of value */
+
+ /* should strip trailing whitespace as well */
+ for (end = &value[strlen(value)-1]; end > value && apr_isspace(*end); --end)
+ *end = '\0';
+
+ /* make sure we add so as not to destroy duplicated headers */
+ apr_table_add(headers_out, buffer, value);
+ saw_headers = 1;
+
+ /* the header was too long; at the least we should skip extra data */
+ if (len >= size - 1) {
+ while ((len = ap_getline(field, MAX_STRING_LEN, rr, 1))
+ >= MAX_STRING_LEN - 1) {
+ /* soak up the extra data */
+ }
+ if (len == 0) /* time to exit the larger loop as well */
+ break;
+ }
+ }
+ return headers_out;
+}
+
+
+/*
+ * list is a comma-separated list of case-insensitive tokens, with
+ * optional whitespace around the tokens.
+ * The return returns 1 if the token val is found in the list, or 0
+ * otherwise.
+ */
+PROXY_DECLARE(int) ap_proxy_liststr(const char *list, const char *val)
+{
+ int len, i;
+ const char *p;
+
+ len = strlen(val);
+
+ while (list != NULL) {
+ p = ap_strchr_c(list, ',');
+ if (p != NULL) {
+ i = p - list;
+ do
+ p++;
+ while (apr_isspace(*p));
+ }
+ else
+ i = strlen(list);
+
+ while (i > 0 && apr_isspace(list[i - 1]))
+ i--;
+ if (i == len && strncasecmp(list, val, len) == 0)
+ return 1;
+ list = p;
+ }
+ return 0;
+}
+
+/*
+ * list is a comma-separated list of case-insensitive tokens, with
+ * optional whitespace around the tokens.
+ * if val appears on the list of tokens, it is removed from the list,
+ * and the new list is returned.
+ */
+PROXY_DECLARE(char *)ap_proxy_removestr(apr_pool_t *pool, const char *list, const char *val)
+{
+ int len, i;
+ const char *p;
+ char *new = NULL;
+
+ len = strlen(val);
+
+ while (list != NULL) {
+ p = ap_strchr_c(list, ',');
+ if (p != NULL) {
+ i = p - list;
+ do
+ p++;
+ while (apr_isspace(*p));
+ }
+ else
+ i = strlen(list);
+
+ while (i > 0 && apr_isspace(list[i - 1]))
+ i--;
+ if (i == len && strncasecmp(list, val, len) == 0) {
+ /* do nothing */
+ }
+ else {
+ if (new)
+ new = apr_pstrcat(pool, new, ",", apr_pstrndup(pool, list, i), NULL);
+ else
+ new = apr_pstrndup(pool, list, i);
+ }
+ list = p;
+ }
+ return new;
+}
+
+/*
+ * Converts 8 hex digits to a time integer
+ */
+PROXY_DECLARE(int) ap_proxy_hex2sec(const char *x)
+{
+ int i, ch;
+ unsigned int j;
+
+ for (i = 0, j = 0; i < 8; i++) {
+ ch = x[i];
+ j <<= 4;
+ if (apr_isdigit(ch))
+ j |= ch - '0';
+ else if (apr_isupper(ch))
+ j |= ch - ('A' - 10);
+ else
+ j |= ch - ('a' - 10);
+ }
+ if (j == 0xffffffff)
+ return -1; /* so that it works with 8-byte ints */
+ else
+ return j;
+}
+
+/*
+ * Converts a time integer to 8 hex digits
+ */
+PROXY_DECLARE(void) ap_proxy_sec2hex(int t, char *y)
+{
+ int i, ch;
+ unsigned int j = t;
+
+ for (i = 7; i >= 0; i--) {
+ ch = j & 0xF;
+ j >>= 4;
+ if (ch >= 10)
+ y[i] = ch + ('A' - 10);
+ else
+ y[i] = ch + '0';
+ }
+ y[8] = '\0';
+}
+
+PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message)
+{
+ apr_table_setn(r->notes, "error-notes",
+ apr_pstrcat(r->pool,
+ "The proxy server could not handle the request "
+ "<em><a href=\"", ap_escape_uri(r->pool, r->uri),
+ "\">", ap_escape_html(r->pool, r->method),
+ "&nbsp;",
+ ap_escape_html(r->pool, r->uri), "</a></em>.<p>\n"
+ "Reason: <strong>",
+ ap_escape_html(r->pool, message),
+ "</strong></p>", NULL));
+
+ /* Allow "error-notes" string to be printed by ap_send_error_response() */
+ apr_table_setn(r->notes, "verbose-error-to", apr_pstrdup(r->pool, "*"));
+
+ r->status_line = apr_psprintf(r->pool, "%3.3u Proxy Error", statuscode);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "proxy: %s returned by %s", message, r->uri);
+ return statuscode;
+}
+
+static const char *
+ proxy_get_host_of_request(request_rec *r)
+{
+ char *url, *user = NULL, *password = NULL, *err, *host;
+ apr_port_t port;
+
+ if (r->hostname != NULL)
+ return r->hostname;
+
+ /* Set url to the first char after "scheme://" */
+ if ((url = strchr(r->uri, ':')) == NULL
+ || url[1] != '/' || url[2] != '/')
+ return NULL;
+
+ url = apr_pstrdup(r->pool, &url[1]); /* make it point to "//", which is what proxy_canon_netloc expects */
+
+ err = ap_proxy_canon_netloc(r->pool, &url, &user, &password, &host, &port);
+
+ if (err != NULL)
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "%s", err);
+
+ r->hostname = host;
+
+ return host; /* ought to return the port, too */
+}
+
+/* Return TRUE if addr represents an IP address (or an IP network address) */
+PROXY_DECLARE(int) ap_proxy_is_ipaddr(struct dirconn_entry *This, apr_pool_t *p)
+{
+ const char *addr = This->name;
+ long ip_addr[4];
+ int i, quads;
+ long bits;
+
+ /* if the address is given with an explicit netmask, use that */
+ /* Due to a deficiency in apr_inet_addr(), it is impossible to parse */
+ /* "partial" addresses (with less than 4 quads) correctly, i.e. */
+ /* 192.168.123 is parsed as 192.168.0.123, which is not what I want. */
+ /* I therefore have to parse the IP address manually: */
+ /*if (proxy_readmask(This->name, &This->addr.s_addr, &This->mask.s_addr) == 0) */
+ /* addr and mask were set by proxy_readmask() */
+ /*return 1; */
+
+ /* Parse IP addr manually, optionally allowing */
+ /* abbreviated net addresses like 192.168. */
+
+ /* Iterate over up to 4 (dotted) quads. */
+ for (quads = 0; quads < 4 && *addr != '\0'; ++quads) {
+ char *tmp;
+
+ if (*addr == '/' && quads > 0) /* netmask starts here. */
+ break;
+
+ if (!apr_isdigit(*addr))
+ return 0; /* no digit at start of quad */
+
+ ip_addr[quads] = strtol(addr, &tmp, 0);
+
+ if (tmp == addr) /* expected a digit, found something else */
+ return 0;
+
+ if (ip_addr[quads] < 0 || ip_addr[quads] > 255) {
+ /* invalid octet */
+ return 0;
+ }
+
+ addr = tmp;
+
+ if (*addr == '.' && quads != 3)
+ ++addr; /* after the 4th quad, a dot would be illegal */
+ }
+
+ for (This->addr.s_addr = 0, i = 0; i < quads; ++i)
+ This->addr.s_addr |= htonl(ip_addr[i] << (24 - 8 * i));
+
+ if (addr[0] == '/' && apr_isdigit(addr[1])) { /* net mask follows: */
+ char *tmp;
+
+ ++addr;
+
+ bits = strtol(addr, &tmp, 0);
+
+ if (tmp == addr) /* expected a digit, found something else */
+ return 0;
+
+ addr = tmp;
+
+ if (bits < 0 || bits > 32) /* netmask must be between 0 and 32 */
+ return 0;
+
+ }
+ else {
+ /* Determine (i.e., "guess") netmask by counting the */
+ /* number of trailing .0's; reduce #quads appropriately */
+ /* (so that 192.168.0.0 is equivalent to 192.168.) */
+ while (quads > 0 && ip_addr[quads - 1] == 0)
+ --quads;
+
+ /* "IP Address should be given in dotted-quad form, optionally followed by a netmask (e.g., 192.168.111.0/24)"; */
+ if (quads < 1)
+ return 0;
+
+ /* every zero-byte counts as 8 zero-bits */
+ bits = 8 * quads;
+
+ if (bits != 32) /* no warning for fully qualified IP address */
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "Warning: NetMask not supplied with IP-Addr; guessing: %s/%ld",
+ inet_ntoa(This->addr), bits);
+ }
+
+ This->mask.s_addr = htonl(APR_INADDR_NONE << (32 - bits));
+
+ if (*addr == '\0' && (This->addr.s_addr & ~This->mask.s_addr) != 0) {
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "Warning: NetMask and IP-Addr disagree in %s/%ld",
+ inet_ntoa(This->addr), bits);
+ This->addr.s_addr &= This->mask.s_addr;
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ " Set to %s/%ld",
+ inet_ntoa(This->addr), bits);
+ }
+
+ if (*addr == '\0') {
+ This->matcher = proxy_match_ipaddr;
+ return 1;
+ }
+ else
+ return (*addr == '\0'); /* okay iff we've parsed the whole string */
+}
+
+/* Return TRUE if addr represents an IP address (or an IP network address) */
+static int proxy_match_ipaddr(struct dirconn_entry *This, request_rec *r)
+{
+ int i, ip_addr[4];
+ struct in_addr addr, *ip;
+ const char *host = proxy_get_host_of_request(r);
+
+ if (host == NULL) /* oops! */
+ return 0;
+
+ memset(&addr, '\0', sizeof addr);
+ memset(ip_addr, '\0', sizeof ip_addr);
+
+ if (4 == sscanf(host, "%d.%d.%d.%d", &ip_addr[0], &ip_addr[1], &ip_addr[2], &ip_addr[3])) {
+ for (addr.s_addr = 0, i = 0; i < 4; ++i)
+ addr.s_addr |= htonl(ip_addr[i] << (24 - 8 * i));
+
+ if (This->addr.s_addr == (addr.s_addr & This->mask.s_addr)) {
+#if DEBUGGING
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "1)IP-Match: %s[%s] <-> ", host, inet_ntoa(addr));
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "%s/", inet_ntoa(This->addr));
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "%s", inet_ntoa(This->mask));
+#endif
+ return 1;
+ }
+#if DEBUGGING
+ else {
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "1)IP-NoMatch: %s[%s] <-> ", host, inet_ntoa(addr));
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "%s/", inet_ntoa(This->addr));
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "%s", inet_ntoa(This->mask));
+ }
+#endif
+ }
+ else {
+ struct apr_sockaddr_t *reqaddr;
+
+ if (apr_sockaddr_info_get(&reqaddr, host, APR_UNSPEC, 0, 0, r->pool)
+ != APR_SUCCESS) {
+#if DEBUGGING
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "2)IP-NoMatch: hostname=%s msg=Host not found",
+ host);
+#endif
+ return 0;
+ }
+
+ /* Try to deal with multiple IP addr's for a host */
+ /* FIXME: This needs to be able to deal with IPv6 */
+ while (reqaddr) {
+ ip = (struct in_addr *) reqaddr->ipaddr_ptr;
+ if (This->addr.s_addr == (ip->s_addr & This->mask.s_addr)) {
+#if DEBUGGING
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "3)IP-Match: %s[%s] <-> ", host,
+ inet_ntoa(*ip));
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "%s/", inet_ntoa(This->addr));
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "%s", inet_ntoa(This->mask));
+#endif
+ return 1;
+ }
+#if DEBUGGING
+ else {
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "3)IP-NoMatch: %s[%s] <-> ", host,
+ inet_ntoa(*ip));
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "%s/", inet_ntoa(This->addr));
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "%s", inet_ntoa(This->mask));
+ }
+#endif
+ reqaddr = reqaddr->next;
+ }
+ }
+
+ return 0;
+}
+
+/* Return TRUE if addr represents a domain name */
+PROXY_DECLARE(int) ap_proxy_is_domainname(struct dirconn_entry *This, apr_pool_t *p)
+{
+ char *addr = This->name;
+ int i;
+
+ /* Domain name must start with a '.' */
+ if (addr[0] != '.')
+ return 0;
+
+ /* rfc1035 says DNS names must consist of "[-a-zA-Z0-9]" and '.' */
+ for (i = 0; apr_isalnum(addr[i]) || addr[i] == '-' || addr[i] == '.'; ++i)
+ continue;
+
+#if 0
+ if (addr[i] == ':') {
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL,
+ "@@@@ handle optional port in proxy_is_domainname()");
+ /* @@@@ handle optional port */
+ }
+#endif
+
+ if (addr[i] != '\0')
+ return 0;
+
+ /* Strip trailing dots */
+ for (i = strlen(addr) - 1; i > 0 && addr[i] == '.'; --i)
+ addr[i] = '\0';
+
+ This->matcher = proxy_match_domainname;
+ return 1;
+}
+
+/* Return TRUE if host "host" is in domain "domain" */
+static int proxy_match_domainname(struct dirconn_entry *This, request_rec *r)
+{
+ const char *host = proxy_get_host_of_request(r);
+ int d_len = strlen(This->name), h_len;
+
+ if (host == NULL) /* some error was logged already */
+ return 0;
+
+ h_len = strlen(host);
+
+ /* @@@ do this within the setup? */
+ /* Ignore trailing dots in domain comparison: */
+ while (d_len > 0 && This->name[d_len - 1] == '.')
+ --d_len;
+ while (h_len > 0 && host[h_len - 1] == '.')
+ --h_len;
+ return h_len > d_len
+ && strncasecmp(&host[h_len - d_len], This->name, d_len) == 0;
+}
+
+/* Return TRUE if host represents a host name */
+PROXY_DECLARE(int) ap_proxy_is_hostname(struct dirconn_entry *This, apr_pool_t *p)
+{
+ struct apr_sockaddr_t *addr;
+ char *host = This->name;
+ int i;
+
+ /* Host names must not start with a '.' */
+ if (host[0] == '.')
+ return 0;
+
+ /* rfc1035 says DNS names must consist of "[-a-zA-Z0-9]" and '.' */
+ for (i = 0; apr_isalnum(host[i]) || host[i] == '-' || host[i] == '.'; ++i);
+
+ if (host[i] != '\0' || apr_sockaddr_info_get(&addr, host, APR_UNSPEC, 0, 0, p) != APR_SUCCESS)
+ return 0;
+
+ This->hostaddr = addr;
+
+ /* Strip trailing dots */
+ for (i = strlen(host) - 1; i > 0 && host[i] == '.'; --i)
+ host[i] = '\0';
+
+ This->matcher = proxy_match_hostname;
+ return 1;
+}
+
+/* Return TRUE if host "host" is equal to host2 "host2" */
+static int proxy_match_hostname(struct dirconn_entry *This, request_rec *r)
+{
+ char *host = This->name;
+ const char *host2 = proxy_get_host_of_request(r);
+ int h2_len;
+ int h1_len;
+
+ if (host == NULL || host2 == NULL)
+ return 0; /* oops! */
+
+ h2_len = strlen(host2);
+ h1_len = strlen(host);
+
+#if 0
+ struct apr_sockaddr_t *addr = *This->hostaddr;
+
+ /* Try to deal with multiple IP addr's for a host */
+ while (addr) {
+ if (addr->ipaddr_ptr == ? ? ? ? ? ? ? ? ? ? ? ? ?)
+ return 1;
+ addr = addr->next;
+ }
+#endif
+
+ /* Ignore trailing dots in host2 comparison: */
+ while (h2_len > 0 && host2[h2_len - 1] == '.')
+ --h2_len;
+ while (h1_len > 0 && host[h1_len - 1] == '.')
+ --h1_len;
+ return h1_len == h2_len
+ && strncasecmp(host, host2, h1_len) == 0;
+}
+
+/* Return TRUE if addr is to be matched as a word */
+PROXY_DECLARE(int) ap_proxy_is_word(struct dirconn_entry *This, apr_pool_t *p)
+{
+ This->matcher = proxy_match_word;
+ return 1;
+}
+
+/* Return TRUE if string "str2" occurs literally in "str1" */
+static int proxy_match_word(struct dirconn_entry *This, request_rec *r)
+{
+ const char *host = proxy_get_host_of_request(r);
+ return host != NULL && ap_strstr_c(host, This->name) != NULL;
+}
+
+/* checks whether a host in uri_addr matches proxyblock */
+PROXY_DECLARE(int) ap_proxy_checkproxyblock(request_rec *r, proxy_server_conf *conf,
+ apr_sockaddr_t *uri_addr)
+{
+ int j;
+ apr_sockaddr_t * src_uri_addr = uri_addr;
+ /* XXX FIXME: conf->noproxies->elts is part of an opaque structure */
+ for (j = 0; j < conf->noproxies->nelts; j++) {
+ struct noproxy_entry *npent = (struct noproxy_entry *) conf->noproxies->elts;
+ struct apr_sockaddr_t *conf_addr = npent[j].addr;
+ uri_addr = src_uri_addr;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: checking remote machine [%s] against [%s]", uri_addr->hostname, npent[j].name);
+ if ((npent[j].name && ap_strstr_c(uri_addr->hostname, npent[j].name))
+ || npent[j].name[0] == '*') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, r->server,
+ "proxy: connect to remote machine %s blocked: name %s matched", uri_addr->hostname, npent[j].name);
+ return HTTP_FORBIDDEN;
+ }
+ while (conf_addr) {
+ while (uri_addr) {
+ char *conf_ip;
+ char *uri_ip;
+ apr_sockaddr_ip_get(&conf_ip, conf_addr);
+ apr_sockaddr_ip_get(&uri_ip, uri_addr);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: ProxyBlock comparing %s and %s", conf_ip, uri_ip);
+ if (!apr_strnatcasecmp(conf_ip, uri_ip)) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, r->server,
+ "proxy: connect to remote machine %s blocked: IP %s matched", uri_addr->hostname, conf_ip);
+ return HTTP_FORBIDDEN;
+ }
+ uri_addr = uri_addr->next;
+ }
+ conf_addr = conf_addr->next;
+ }
+ }
+ return OK;
+}
+
+/* set up the minimal filter set */
+PROXY_DECLARE(int) ap_proxy_pre_http_request(conn_rec *c, request_rec *r)
+{
+ ap_add_input_filter("HTTP_IN", NULL, r, c);
+ return OK;
+}
+
+/* converts a series of buckets into a string
+ * XXX: BillS says this function performs essentially the same function as
+ * ap_rgetline() in protocol.c. Deprecate this function and use ap_rgetline()
+ * instead? I think ap_proxy_string_read() will not work properly on non ASCII
+ * (EBCDIC) machines either.
+ */
+PROXY_DECLARE(apr_status_t) ap_proxy_string_read(conn_rec *c, apr_bucket_brigade *bb,
+ char *buff, apr_size_t bufflen, int *eos)
+{
+ apr_bucket *e;
+ apr_status_t rv;
+ char *pos = buff;
+ char *response;
+ int found = 0;
+ apr_size_t len;
+
+ /* start with an empty string */
+ buff[0] = 0;
+ *eos = 0;
+
+ /* loop through each brigade */
+ while (!found) {
+ /* get brigade from network one line at a time */
+ if (APR_SUCCESS != (rv = ap_get_brigade(c->input_filters, bb,
+ AP_MODE_GETLINE,
+ APR_BLOCK_READ,
+ 0))) {
+ return rv;
+ }
+ /* loop through each bucket */
+ while (!found) {
+ if (*eos || APR_BRIGADE_EMPTY(bb)) {
+ /* The connection aborted or timed out */
+ return APR_ECONNABORTED;
+ }
+ e = APR_BRIGADE_FIRST(bb);
+ if (APR_BUCKET_IS_EOS(e)) {
+ *eos = 1;
+ }
+ else {
+ if (APR_SUCCESS != apr_bucket_read(e, (const char **)&response, &len, APR_BLOCK_READ)) {
+ return rv;
+ }
+ /* is string LF terminated?
+ * XXX: This check can be made more efficient by simply checking
+ * if the last character in the 'response' buffer is an ASCII_LF.
+ * See ap_rgetline() for an example.
+ */
+ if (memchr(response, APR_ASCII_LF, len)) {
+ found = 1;
+ }
+ /* concat strings until buff is full - then throw the data away */
+ if (len > ((bufflen-1)-(pos-buff))) {
+ len = (bufflen-1)-(pos-buff);
+ }
+ if (len > 0) {
+ pos = apr_cpystrn(pos, response, len);
+ }
+ }
+ APR_BUCKET_REMOVE(e);
+ apr_bucket_destroy(e);
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+/* unmerge an element in the table */
+PROXY_DECLARE(void) ap_proxy_table_unmerge(apr_pool_t *p, apr_table_t *t, char *key)
+{
+ apr_off_t offset = 0;
+ apr_off_t count = 0;
+ char *value = NULL;
+
+ /* get the value to unmerge */
+ const char *initial = apr_table_get(t, key);
+ if (!initial) {
+ return;
+ }
+ value = apr_pstrdup(p, initial);
+
+ /* remove the value from the headers */
+ apr_table_unset(t, key);
+
+ /* find each comma */
+ while (value[count]) {
+ if (value[count] == ',') {
+ value[count] = 0;
+ apr_table_add(t, key, value + offset);
+ offset = count + 1;
+ }
+ count++;
+ }
+ apr_table_add(t, key, value + offset);
+}
+
+PROXY_DECLARE(int) ap_proxy_connect_to_backend(apr_socket_t **newsock,
+ const char *proxy_function,
+ apr_sockaddr_t *backend_addr,
+ const char *backend_name,
+ proxy_server_conf *conf,
+ server_rec *s,
+ apr_pool_t *p)
+{
+ apr_status_t rv;
+ int connected = 0;
+ int loglevel;
+
+ while (backend_addr && !connected) {
+ if ((rv = apr_socket_create(newsock, backend_addr->family,
+ SOCK_STREAM, p)) != APR_SUCCESS) {
+ loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
+ ap_log_error(APLOG_MARK, loglevel, rv, s,
+ "proxy: %s: error creating fam %d socket for target %s",
+ proxy_function,
+ backend_addr->family,
+ backend_name);
+ /* this could be an IPv6 address from the DNS but the
+ * local machine won't give us an IPv6 socket; hopefully the
+ * DNS returned an additional address to try
+ */
+ backend_addr = backend_addr->next;
+ continue;
+ }
+
+#if !defined(TPF) && !defined(BEOS)
+ if (conf->recv_buffer_size > 0 &&
+ (rv = apr_socket_opt_set(*newsock, APR_SO_RCVBUF,
+ conf->recv_buffer_size))) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "apr_socket_opt_set(SO_RCVBUF): Failed to set "
+ "ProxyReceiveBufferSize, using default");
+ }
+#endif
+
+ /* Set a timeout on the socket */
+ if (conf->timeout_set == 1) {
+ apr_socket_timeout_set(*newsock, conf->timeout);
+ }
+ else {
+ apr_socket_timeout_set(*newsock, s->timeout);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "proxy: %s: fam %d socket created to connect to %s",
+ proxy_function, backend_addr->family, backend_name);
+
+ /* make the connection out of the socket */
+ rv = apr_connect(*newsock, backend_addr);
+
+ /* if an error occurred, loop round and try again */
+ if (rv != APR_SUCCESS) {
+ apr_socket_close(*newsock);
+ loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
+ ap_log_error(APLOG_MARK, loglevel, rv, s,
+ "proxy: %s: attempt to connect to %pI (%s) failed",
+ proxy_function,
+ backend_addr,
+ backend_name);
+ backend_addr = backend_addr->next;
+ continue;
+ }
+ connected = 1;
+ }
+ return connected ? 0 : 1;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/.deps b/rubbos/app/httpd-2.0.64/modules/ssl/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/Makefile b/rubbos/app/httpd-2.0.64/modules/ssl/Makefile
new file mode 100644
index 00000000..b624d817
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/Makefile
@@ -0,0 +1,43 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/ssl
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/ssl
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/ssl
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# standard stuff
+#
+
+include $(top_srcdir)/build/special.mk
+
+#
+# developer stuff
+# (we really don't expect end users to use these targets!)
+#
+
+ssl_expr_scan.c: $(top_srcdir)/modules/ssl/ssl_expr_scan.l ssl_expr_parse.h
+ flex -Pssl_expr_yy -s -B $(top_srcdir)/modules/ssl/ssl_expr_scan.l
+ sed -e '/$$Header:/d' <lex.ssl_expr_yy.c >ssl_expr_scan.c && rm -f lex.ssl_expr_yy.c
+
+ssl_expr_parse.c ssl_expr_parse.h: $(top_srcdir)/modules/ssl/ssl_expr_parse.y
+ yacc -d $(top_srcdir)/modules/ssl/ssl_expr_parse.y
+ sed -e 's;yy;ssl_expr_yy;g' \
+ -e '/#if defined(c_plusplus) || defined(__cplusplus)/,/#endif/d' \
+ <y.tab.c >ssl_expr_parse.c && rm -f y.tab.c
+ sed -e 's;yy;ssl_expr_yy;g' \
+ <y.tab.h >ssl_expr_parse.h && rm -f y.tab.h
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/Makefile.in b/rubbos/app/httpd-2.0.64/modules/ssl/Makefile.in
new file mode 100644
index 00000000..a5153f3a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/Makefile.in
@@ -0,0 +1,38 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# standard stuff
+#
+
+include $(top_srcdir)/build/special.mk
+
+#
+# developer stuff
+# (we really don't expect end users to use these targets!)
+#
+
+ssl_expr_scan.c: $(top_srcdir)/modules/ssl/ssl_expr_scan.l ssl_expr_parse.h
+ flex -Pssl_expr_yy -s -B $(top_srcdir)/modules/ssl/ssl_expr_scan.l
+ sed -e '/$$Header:/d' <lex.ssl_expr_yy.c >ssl_expr_scan.c && rm -f lex.ssl_expr_yy.c
+
+ssl_expr_parse.c ssl_expr_parse.h: $(top_srcdir)/modules/ssl/ssl_expr_parse.y
+ yacc -d $(top_srcdir)/modules/ssl/ssl_expr_parse.y
+ sed -e 's;yy;ssl_expr_yy;g' \
+ -e '/#if defined(c_plusplus) || defined(__cplusplus)/,/#endif/d' \
+ <y.tab.c >ssl_expr_parse.c && rm -f y.tab.c
+ sed -e 's;yy;ssl_expr_yy;g' \
+ <y.tab.h >ssl_expr_parse.h && rm -f y.tab.h
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/README b/rubbos/app/httpd-2.0.64/modules/ssl/README
new file mode 100644
index 00000000..b24af26f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/README
@@ -0,0 +1,129 @@
+SYNOPSIS
+
+ This Apache module provides strong cryptography for the Apache 2.0 webserver
+ via the Secure Sockets Layer (SSL v2/v3) and Transport Layer Security (TLS
+ v1) protocols by the help of the SSL/TLS implementation library OpenSSL which
+ is based on SSLeay from Eric A. Young and Tim J. Hudson.
+
+ The mod_ssl package was created in April 1998 by Ralf S. Engelschall
+ and was originally derived from software developed by Ben Laurie for
+ use in the Apache-SSL HTTP server project. The mod_ssl implementation
+ for Apache 1.3 continues to be supported by the modssl project
+ <http://www.modssl.org/>.
+
+SOURCES
+
+ See the top-level LAYOUT file in httpd-2.0 for file descriptions.
+
+ The source files are written in clean ANSI C and pass the ``gcc -O -g
+ -ggdb3 -Wall -Wshadow -Wpointer-arith -Wcast-align -Wmissing-prototypes
+ -Wmissing-declarations -Wnested-externs -Winline'' compiler test
+ (assuming `gcc' is GCC 2.95.2 or newer) without any complains. When
+ you make changes or additions make sure the source still passes this
+ compiler test.
+
+FUNCTIONS
+
+ Inside the source code you will be confronted with the following types of
+ functions which can be identified by their prefixes:
+
+ ap_xxxx() ............... Apache API function
+ ssl_xxxx() .............. mod_ssl function
+ SSL_xxxx() .............. OpenSSL function (SSL library)
+ OpenSSL_xxxx() .......... OpenSSL function (SSL library)
+ X509_xxxx() ............. OpenSSL function (Crypto library)
+ PEM_xxxx() .............. OpenSSL function (Crypto library)
+ EVP_xxxx() .............. OpenSSL function (Crypto library)
+ RSA_xxxx() .............. OpenSSL function (Crypto library)
+
+DATA STRUCTURES
+
+ Inside the source code you will be confronted with the following
+ data structures:
+
+ server_rec .............. Apache (Virtual) Server
+ conn_rec ................ Apache Connection
+ request_rec ............. Apache Request
+ SSLModConfig ............ mod_ssl (Global) Module Configuration
+ SSLSrvConfig ............ mod_ssl (Virtual) Server Configuration
+ SSLDirConfig ............ mod_ssl Directory Configuration
+ SSLConnConfig ........... mod_ssl Connection Configuration
+ SSLFilterRec ............ mod_ssl Filter Context
+ SSL_CTX ................. OpenSSL Context
+ SSL_METHOD .............. OpenSSL Protocol Method
+ SSL_CIPHER .............. OpenSSL Cipher
+ SSL_SESSION ............. OpenSSL Session
+ SSL ..................... OpenSSL Connection
+ BIO ..................... OpenSSL Connection Buffer
+
+ For an overview how these are related and chained together have a look at the
+ page in README.dsov.{fig,ps}. It contains overview diagrams for those data
+ structures. It's designed for DIN A4 paper size, but you can easily generate
+ a smaller version inside XFig by specifing a magnification on the Export
+ panel.
+
+EXPERIMENTAL CODE
+
+ Experimental code is always encapsulated as following:
+
+ | #ifdef SSL_EXPERIMENTAL_xxxx
+ | ...
+ | #endif
+
+ This way it is only compiled in when this define is enabled with
+ the APACI --enable-rule=SSL_EXPERIMENTAL option and as long as the
+ C pre-processor variable SSL_EXPERIMENTAL_xxxx_IGNORE is _NOT_
+ defined (via CFLAGS). Or in other words: SSL_EXPERIMENTAL enables all
+ SSL_EXPERIMENTAL_xxxx variables, except if SSL_EXPERIMENTAL_xxxx_IGNORE
+ is already defined. Currently the following features are experimental:
+
+ o SSL_EXPERIMENTAL_ENGINE
+ The ability to support the new forthcoming OpenSSL ENGINE stuff.
+ Until this development branch of OpenSSL is merged into the main
+ stream, you have to use openssl-engine-0.9.x.tar.gz for this.
+ mod_ssl automatically recognizes this OpenSSL variant and then can
+ activate external crypto devices through SSLCryptoDevice directive.
+
+INCOMPATIBILITIES
+
+ The following intentional incompatibilities exist between mod_ssl 2.x
+ from Apache 1.3 and this mod_ssl version for Apache 2.0:
+
+ o The complete EAPI-based SSL_VENDOR stuff was removed.
+ o The complete EAPI-based SSL_COMPAT stuff was removed.
+ o The <IfDefine> variable MOD_SSL is no longer provided automatically
+
+MAJOR CHANGES
+
+ For a complete history of changes for Apache 2.0 mod_ssl, see the
+ CHANGES file in the top-level httpd-2.0 directory. The following
+ is a condensed summary of the major changes were made between
+ mod_ssl 2.x from Apache 1.3 and this mod_ssl version for Apache 2.0:
+
+ o The DBM based session cache is now based on APR's DBM API only.
+ o The shared memory based session cache is now based on APR's APIs.
+ o SSL I/O is now implemented in terms of filters rather than BUFF
+ o Eliminated ap_global_ctx. Storing Persistant information in
+ process_rec->pool->user_data. The ssl_pphrase_Handle_CB() and
+ ssl_config_global_* () functions have an extra parameter now -
+ "server_rec *" - which is used to retrieve the SSLModConfigRec.
+ o Properly support restarts, allowing mod_ssl to be added to a server
+ that is already running and to change server certs/keys on restart
+ o Various performance enhancements
+ o proxy support is no longer an "extension", much of the mod_ssl core
+ was re-written (ssl_engine_{init,kernel,config}.c) to be generic so
+ it could be re-used in proxy mode.
+ - the optional function ssl_proxy_enable is provide for mod_proxy
+ to enable proxy support
+ - proxy support now requires 'SSLProxyEngine on' to be configured
+ - proxy now supports SSLProxyCARevocation{Path,File} in addition to
+ the original SSLProxy* directives
+ o per-directory SSLCACertificate{File,Path} is now thread-safe but
+ requires SSL_set_cert_store patch to OpenSSL
+ o RSA sslc is supported via ssl_toolkit_compat.h
+ o the ssl_engine_{ds,ext}.c source files are obsolete and no longer
+ exist
+
+TODO
+
+ See the top-level STATUS file in httpd-2.0 for current efforts and goals.
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/README.dsov.fig b/rubbos/app/httpd-2.0.64/modules/ssl/README.dsov.fig
new file mode 100644
index 00000000..d8d03db2
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/README.dsov.fig
@@ -0,0 +1,346 @@
+#FIG 3.2
+Landscape
+Center
+Metric
+Letter
+100.00
+Single
+-2
+1200 2
+0 32 #616561
+0 33 #b6b2b6
+0 34 #f7f3f7
+0 35 #cfcfcf
+0 36 #ffffff
+6 6345 2835 7155 3150
+6 6345 2970 7110 3150
+4 0 0 200 0 20 8 0.0000 4 120 585 6345 3105 "ssl_module")\001
+-6
+4 0 0 200 0 20 8 0.0000 4 120 660 6345 2970 ap_ctx_get(...,\001
+-6
+6 10800 2610 12240 3060
+4 0 0 200 0 20 8 0.0000 4 120 1170 10800 2745 ap_get_module_config(...\001
+4 0 0 200 0 20 8 0.0000 4 120 795 10800 2880 ->per_dir_config,\001
+4 0 0 200 0 20 8 0.0000 4 120 585 10800 3015 &ssl_module)\001
+-6
+6 7920 4770 9135 4995
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 9135 4995 7920 4995 7920 4770 9135 4770 9135 4995
+4 0 0 100 0 18 12 0.0000 4 180 1065 8010 4950 request_rec\001
+-6
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6975 3330 7425 2520
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7200 4230 9450 2520
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7875 4905 7200 5220
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6750 5130 6750 4545
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6705 5445 7155 6120
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7875 4815 7200 4590
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 9585 2565 11475 4230
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 10170 5130 11835 4545
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7920 6075 9855 5400
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 9990 5445 10935 5625
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 10215 5310 10935 5310
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 11925 4590 11925 5085
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 9810 5490 9810 6840
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 9945 5445 10935 6030
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 8865 4725 10800 2565
+2 1 0 3 0 34 200 0 20 0.000 0 0 -1 0 0 2
+ 675 6075 5850 6075
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 675 6525 675 6075
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 0 1.00 60.00 120.00
+ 5850 6075 5850 6525
+2 1 0 3 0 34 200 0 20 0.000 0 0 -1 0 0 2
+ 900 5625 5625 5625
+2 1 0 3 0 34 200 0 20 0.000 0 0 -1 0 0 2
+ 1125 5175 5400 5175
+2 1 0 3 0 34 200 0 20 0.000 0 0 -1 0 0 2
+ 1350 4725 5175 4725
+2 1 0 3 0 34 200 0 20 0.000 0 0 -1 0 0 2
+ 1575 4275 4950 4275
+2 1 0 3 0 34 200 0 20 0.000 0 0 -1 0 0 2
+ 1800 3825 4725 3825
+2 1 0 3 0 34 200 0 20 0.000 0 0 -1 0 0 2
+ 2025 3375 4500 3375
+2 1 0 3 0 34 200 0 20 0.000 0 0 -1 0 0 2
+ 2250 2925 4275 2925
+2 1 0 3 0 34 200 0 20 0.000 0 0 -1 0 0 2
+ 2475 2475 4050 2475
+2 1 0 3 0 34 200 0 20 0.000 0 0 -1 0 0 2
+ 2700 2025 3825 2025
+2 1 0 3 0 34 200 0 20 0.000 0 0 -1 0 0 2
+ 2925 1575 3600 1575
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 900 6075 900 5625
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1125 6525 1125 5175
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1350 5175 1350 4725
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1575 4725 1575 4275
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1800 6525 1800 3825
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2025 3825 2025 3375
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2250 3375 2250 2925
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2475 2925 2475 2475
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 0 1.00 60.00 120.00
+ 5625 5625 5625 6075
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 0 1.00 60.00 120.00
+ 5400 5175 5400 6525
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 0 1.00 60.00 120.00
+ 5175 4725 5175 5175
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 0 1.00 60.00 120.00
+ 4950 4275 4950 4725
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 0 1.00 60.00 120.00
+ 4725 3825 4725 6525
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 0 1.00 60.00 120.00
+ 4500 3375 4500 3825
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 0 1.00 60.00 120.00
+ 4275 2925 4275 3375
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 0 1.00 60.00 120.00
+ 4050 2475 4050 2925
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2700 6525 2700 2025
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 0 1.00 60.00 120.00
+ 3825 2025 3825 6525
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 0 1.00 60.00 120.00
+ 3600 1575 3600 2025
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2925 2025 2925 1575
+2 1 0 4 0 0 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 4.00 60.00 120.00
+ 540 6525 6300 6525
+2 3 0 1 7 7 800 0 20 0.000 0 0 -1 0 0 9
+ 675 6525 5850 6525 5850 6075 5625 6075 5625 5625 900 5625
+ 900 6075 675 6075 675 6525
+2 3 0 1 34 34 700 0 20 0.000 0 0 -1 0 0 13
+ 1125 6525 5355 6525 5400 5175 5175 5175 5175 4725 4950 4725
+ 4950 4275 1575 4275 1575 4725 1350 4725 1350 5175 1125 5175
+ 1125 6525
+2 3 0 1 35 35 500 0 20 0.000 0 0 -1 0 0 17
+ 1800 6525 4725 6525 4725 3825 4500 3825 4500 3375 4275 3375
+ 4275 2925 4050 2925 4050 2475 2475 2475 2475 2925 2250 2925
+ 2250 3375 2025 3375 2025 3825 1800 3825 1800 6525
+2 3 0 1 33 33 400 0 20 0.000 0 0 -1 0 0 9
+ 2700 6525 3825 6525 3825 2025 3600 2025 3600 1575 2925 1575
+ 2925 2025 2700 2025 2700 6525
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 1 2
+ 2 0 1.00 60.00 120.00
+ 2 0 1.00 60.00 120.00
+ 2700 6750 3825 6750
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 1 2
+ 2 0 1.00 60.00 120.00
+ 2 0 1.00 60.00 120.00
+ 1125 7200 5400 7200
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 1 2
+ 2 0 1.00 60.00 120.00
+ 2 0 1.00 60.00 120.00
+ 1800 6975 4725 6975
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 1 2
+ 2 0 1.00 60.00 120.00
+ 2 0 1.00 60.00 120.00
+ 675 7425 5850 7425
+2 1 2 1 0 34 200 0 20 3.000 0 1 -1 0 0 2
+ 675 6570 675 7650
+2 1 2 1 0 34 200 0 20 3.000 0 1 -1 0 0 2
+ 1125 6570 1125 7650
+2 1 2 1 0 34 200 0 20 3.000 0 1 -1 0 0 2
+ 1800 6570 1800 7650
+2 1 2 1 0 34 200 0 20 3.000 0 1 -1 0 0 2
+ 2700 6570 2700 7650
+2 1 2 1 0 34 200 0 20 3.000 0 1 -1 0 0 2
+ 3825 6570 3825 7650
+2 1 2 1 0 34 200 0 20 3.000 0 1 -1 0 0 2
+ 4725 6570 4725 7650
+2 1 2 1 0 34 200 0 20 3.000 0 1 -1 0 0 2
+ 5400 6570 5400 7650
+2 1 2 1 0 34 200 0 20 3.000 0 1 -1 0 0 2
+ 5850 6570 5850 7650
+2 4 0 2 0 7 100 0 -1 0.000 0 0 20 0 0 5
+ 12600 8550 450 8550 450 225 12600 225 12600 8550
+2 4 0 1 0 34 200 0 20 0.000 0 0 20 0 0 5
+ 12600 1350 450 1350 450 225 12600 225 12600 1350
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 10170 2475 8775 2475 8775 2250 10170 2250 10170 2475
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 11925 2475 10575 2475 10575 2250 11925 2250 11925 2475
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 12375 4500 11430 4500 11430 4275 12375 4275 12375 4500
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 12375 5400 10980 5400 10980 5175 12375 5175 12375 5400
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 10170 5400 9675 5400 9675 5175 10170 5175 10170 5400
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 7875 6300 7200 6300 7200 6075 7875 6075 7875 6300
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 8190 2475 6750 2475 6750 2250 8190 2250 8190 2475
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 7605 3600 6300 3600 6300 3375 7605 3375 7605 3600
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 7335 4500 6300 4500 6300 4275 7335 4275 7335 4500
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 7200 5400 6300 5400 6300 5175 7200 5175 7200 5400
+2 1 0 6 7 7 600 0 -1 0.000 0 0 -1 0 0 2
+ 9450 4500 6075 1935
+2 1 0 6 7 7 600 0 -1 0.000 0 0 4 0 0 2
+ 9450 4500 12465 2205
+2 1 0 6 7 7 600 0 -1 0.000 0 0 4 0 0 2
+ 9450 4500 9450 7785
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 9630 5310 7245 5310
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 11385 4365 7380 4365
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 12240 5805 10980 5805 10980 5580 12240 5580 12240 5805
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 12375 6210 10980 6210 10980 5985 12375 5985 12375 6210
+2 1 0 1 0 34 200 0 20 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 11205 6885 9900 5445
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 12285 7155 10530 7155 10530 6930 12285 6930 12285 7155
+2 4 0 1 35 35 200 0 20 0.000 0 0 4 0 0 5
+ 10170 7155 9630 7155 9630 6930 10170 6930 10170 7155
+2 1 0 6 7 7 600 0 -1 0.000 0 0 4 0 0 2
+ 12510 6435 9450 6435
+2 1 0 1 0 34 300 0 20 0.000 0 0 7 1 0 4
+ 1 1 1.00 60.00 120.00
+ 12375 4455 12510 4635 12510 6210 11970 6885
+2 1 2 1 0 34 200 0 20 1.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 9850 5143 9175 4918
+3 1 0 1 34 34 800 0 20 0.000 0 0 0 41
+ 7380 1710 6390 2115 5535 2115 6075 3015 5670 3465 6165 3915
+ 5715 4410 6030 5040 6030 5310 6480 5715 6390 6255 6975 6300
+ 7065 6975 7965 6750 8100 7560 8955 7290 9360 7740 9720 7560
+ 10755 8145 12060 8280 12375 7650 12420 7200 12510 7065 12330 6660
+ 12510 6390 12420 5940 12375 5400 12510 5220 12510 4725 12600 4275
+ 12375 3645 12105 3240 12150 2745 12375 2700 12330 1980 11790 1575
+ 11250 1935 10125 1485 8955 2070 7785 1620 7695 1575
+ 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000
+ 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000
+ 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000
+ 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000
+ 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000
+ 1.000
+4 0 0 100 0 0 12 0.0000 4 180 1440 10575 675 Ralf S. Engelschall\001
+4 0 0 100 0 18 20 0.0000 4 270 3840 4275 675 Apache+mod_ssl+OpenSSL\001
+4 0 0 100 0 0 10 0.0000 4 135 1320 10575 855 rse@engelschall.com\001
+4 0 0 100 0 0 10 0.0000 4 135 1410 10575 1035 www.engelschall.com\001
+4 0 0 100 0 0 12 0.0000 4 135 870 900 675 Version 1.3\001
+4 0 0 100 0 0 12 0.0000 4 180 1035 900 855 12-Apr-1999\001
+4 0 0 200 0 20 8 0.0000 4 60 390 6210 4680 ->server\001
+4 0 0 200 0 20 8 0.0000 4 120 855 8280 6120 ap_ctx_get(...,"ssl")\001
+4 0 0 200 0 20 8 0.0000 4 120 1170 7740 2700 ap_get_module_config(...\001
+4 0 0 200 0 20 8 0.0000 4 120 810 7740 2835 ->module_config,\001
+4 0 0 200 0 20 8 0.0000 4 120 585 7740 2970 &ssl_module)\001
+4 0 0 100 0 18 20 0.0000 4 270 1200 9000 8100 Chaining\001
+4 0 0 100 0 18 20 0.0000 4 210 1095 2745 8100 Lifetime\001
+4 0 0 100 0 18 12 0.0000 4 180 1215 810 6255 ap_global_ctx\001
+4 0 0 100 0 18 12 0.0000 4 180 1305 990 5805 SSLModConfig\001
+4 0 0 100 0 18 12 0.0000 4 180 840 4050 4455 SSL_CTX\001
+4 0 0 100 0 18 12 0.0000 4 150 975 4455 5355 server_rec\001
+4 0 0 100 0 18 12 0.0000 4 180 1260 3870 4905 SSLSrvConfig\001
+4 0 0 100 0 18 12 0.0000 4 135 480 1845 4005 BUFF\001
+4 0 0 100 0 18 12 0.0000 4 150 810 2070 3555 conn_rec\001
+4 0 0 100 0 18 12 0.0000 4 135 345 2295 3105 BIO\001
+4 0 0 100 0 18 12 0.0000 4 135 375 2565 2655 SSL\001
+4 0 0 100 0 18 12 0.0000 4 180 1185 3645 1620 SSLDirConfig\001
+4 0 0 100 0 18 12 0.0000 4 180 1065 3915 2070 request_rec\001
+4 0 0 200 0 0 8 0.0000 4 120 1440 900 7560 Startup, Runtime, Shutdown\001
+4 0 0 200 0 0 8 0.0000 4 105 975 1350 7335 Configuration Time\001
+4 0 0 200 0 0 8 0.0000 4 90 1050 2025 7110 Connection Duration\001
+4 0 0 200 0 0 8 0.0000 4 120 885 2835 6885 Request Duration\001
+4 0 0 200 0 18 20 0.0000 4 195 90 6345 6795 t\001
+4 0 0 200 0 20 8 0.0000 4 90 345 7110 5985 ->client\001
+4 0 0 100 0 18 12 0.0000 4 180 1305 6795 2430 SSLModConfig\001
+4 0 0 100 0 18 12 0.0000 4 180 1260 8865 2430 SSLSrvConfig\001
+4 0 0 100 0 18 12 0.0000 4 180 1215 6345 3555 ap_global_ctx\001
+4 0 0 100 0 18 12 0.0000 4 150 975 6345 4455 server_rec\001
+4 0 0 100 0 18 12 0.0000 4 150 810 6345 5355 conn_rec\001
+4 0 0 100 0 18 12 0.0000 4 135 375 9720 5355 SSL\001
+4 0 0 100 0 18 12 0.0000 4 180 1185 10665 2430 SSLDirConfig\001
+4 0 0 100 0 18 12 0.0000 4 135 480 7290 6255 BUFF\001
+4 0 0 100 0 18 12 0.0000 4 180 1305 11025 5355 SSL_METHOD\001
+4 0 0 100 0 18 12 0.0000 4 180 840 11475 4455 SSL_CTX\001
+4 0 0 100 0 18 24 0.0000 4 285 4365 3915 1080 Data Structure Overview\001
+4 0 0 200 0 20 8 0.0000 4 90 615 7065 5085 ->connection\001
+4 0 0 200 0 20 8 0.0000 4 60 390 7065 4770 ->server\001
+4 0 0 200 0 20 8 0.0000 4 120 960 8010 5445 SSL_get_app_data()\001
+4 0 0 200 0 20 8 0.0000 4 120 510 10530 4050 ->pSSLCtx\001
+4 0 0 200 0 20 8 0.0000 4 120 1215 7875 4275 SSL_CTX_get_app_data()\001
+4 0 0 200 0 20 8 0.0000 4 120 1155 10305 5535 SSL_get_current_cipher()\001
+4 0 0 100 0 18 12 0.0000 4 180 1170 11025 5760 SSL_CIPHER\001
+4 0 0 100 0 18 12 0.0000 4 180 1350 10980 6165 SSL_SESSION\001
+4 0 0 200 0 20 8 0.0000 4 120 840 10440 5940 SSL_get_session()\001
+4 0 0 100 0 18 12 0.0000 4 180 1665 10575 7110 X509_STORE_CTX\001
+4 0 0 100 0 18 12 0.0000 4 135 345 9720 7110 BIO\001
+4 0 0 200 0 20 8 0.0000 4 120 840 9540 7335 SSL_get_{r,w}bio()\001
+4 0 0 100 0 18 20 0.0000 4 270 1170 8730 3465 mod_ssl\001
+4 0 0 100 0 18 20 0.0000 4 270 1050 8145 6750 Apache\001
+4 0 0 200 0 20 8 0.0000 4 120 945 10125 4680 SSL_get_SSL_CTX()\001
+4 0 0 200 0 20 8 0.0000 4 120 1170 10350 5175 SSL_get_SSL_METHOD()\001
+4 0 0 200 0 20 8 0.0000 4 90 465 11745 4770 ->method\001
+4 0 0 200 0 20 8 0.0000 4 120 1665 9945 6480 X509_STORE_CTX_get_app_data()\001
+4 0 0 200 0 20 8 0.0000 4 120 1215 10980 6705 SSL_CTX_get_cert_store()\001
+4 0 0 200 0 20 8 0.0000 4 120 1020 8280 5130 SSL_get_app_data2()\001
+4 0 0 100 0 18 20 0.0000 4 270 1290 10710 7605 OpenSSL\001
+4 0 0 100 0 18 12 0.0000 4 180 720 10710 7785 [Crypto]\001
+4 0 0 100 0 18 20 0.0000 4 270 1290 10935 3645 OpenSSL\001
+4 0 0 100 0 18 12 0.0000 4 180 495 10935 3825 [SSL]\001
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/README.dsov.ps b/rubbos/app/httpd-2.0.64/modules/ssl/README.dsov.ps
new file mode 100644
index 00000000..def19dbe
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/README.dsov.ps
@@ -0,0 +1,1138 @@
+%!PS-Adobe-2.0
+%%Title: README.dsov.ps
+%%Creator: fig2dev Version 3.2 Patchlevel 1
+%%CreationDate: Mon Apr 12 17:09:11 1999
+%%For: rse@en1.engelschall.com (Ralf S. Engelschall)
+%%Orientation: Landscape
+%%BoundingBox: 59 37 553 755
+%%Pages: 1
+%%BeginSetup
+%%IncludeFeature: *PageSize Letter
+%%EndSetup
+%%Magnification: 0.9340
+%%EndComments
+/$F2psDict 200 dict def
+$F2psDict begin
+$F2psDict /mtrx matrix put
+/col-1 {0 setgray} bind def
+/col0 {0.000 0.000 0.000 srgb} bind def
+/col1 {0.000 0.000 1.000 srgb} bind def
+/col2 {0.000 1.000 0.000 srgb} bind def
+/col3 {0.000 1.000 1.000 srgb} bind def
+/col4 {1.000 0.000 0.000 srgb} bind def
+/col5 {1.000 0.000 1.000 srgb} bind def
+/col6 {1.000 1.000 0.000 srgb} bind def
+/col7 {1.000 1.000 1.000 srgb} bind def
+/col8 {0.000 0.000 0.560 srgb} bind def
+/col9 {0.000 0.000 0.690 srgb} bind def
+/col10 {0.000 0.000 0.820 srgb} bind def
+/col11 {0.530 0.810 1.000 srgb} bind def
+/col12 {0.000 0.560 0.000 srgb} bind def
+/col13 {0.000 0.690 0.000 srgb} bind def
+/col14 {0.000 0.820 0.000 srgb} bind def
+/col15 {0.000 0.560 0.560 srgb} bind def
+/col16 {0.000 0.690 0.690 srgb} bind def
+/col17 {0.000 0.820 0.820 srgb} bind def
+/col18 {0.560 0.000 0.000 srgb} bind def
+/col19 {0.690 0.000 0.000 srgb} bind def
+/col20 {0.820 0.000 0.000 srgb} bind def
+/col21 {0.560 0.000 0.560 srgb} bind def
+/col22 {0.690 0.000 0.690 srgb} bind def
+/col23 {0.820 0.000 0.820 srgb} bind def
+/col24 {0.500 0.190 0.000 srgb} bind def
+/col25 {0.630 0.250 0.000 srgb} bind def
+/col26 {0.750 0.380 0.000 srgb} bind def
+/col27 {1.000 0.500 0.500 srgb} bind def
+/col28 {1.000 0.630 0.630 srgb} bind def
+/col29 {1.000 0.750 0.750 srgb} bind def
+/col30 {1.000 0.880 0.880 srgb} bind def
+/col31 {1.000 0.840 0.000 srgb} bind def
+/col32 {0.380 0.396 0.380 srgb} bind def
+/col33 {0.714 0.698 0.714 srgb} bind def
+/col34 {0.969 0.953 0.969 srgb} bind def
+/col35 {0.812 0.812 0.812 srgb} bind def
+/col36 {1.000 1.000 1.000 srgb} bind def
+
+end
+save
+48.0 12.0 translate
+ 90 rotate
+1 -1 scale
+
+/cp {closepath} bind def
+/ef {eofill} bind def
+/gr {grestore} bind def
+/gs {gsave} bind def
+/sa {save} bind def
+/rs {restore} bind def
+/l {lineto} bind def
+/m {moveto} bind def
+/rm {rmoveto} bind def
+/n {newpath} bind def
+/s {stroke} bind def
+/sh {show} bind def
+/slc {setlinecap} bind def
+/slj {setlinejoin} bind def
+/slw {setlinewidth} bind def
+/srgb {setrgbcolor} bind def
+/rot {rotate} bind def
+/sc {scale} bind def
+/sd {setdash} bind def
+/ff {findfont} bind def
+/sf {setfont} bind def
+/scf {scalefont} bind def
+/sw {stringwidth} bind def
+/tr {translate} bind def
+/tnt {dup dup currentrgbcolor
+ 4 -2 roll dup 1 exch sub 3 -1 roll mul add
+ 4 -2 roll dup 1 exch sub 3 -1 roll mul add
+ 4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb}
+ bind def
+/shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul
+ 4 -2 roll mul srgb} bind def
+/reencdict 12 dict def /ReEncode { reencdict begin
+/newcodesandnames exch def /newfontname exch def /basefontname exch def
+/basefontdict basefontname findfont def /newfont basefontdict maxlength dict def
+basefontdict { exch dup /FID ne { dup /Encoding eq
+{ exch dup length array copy newfont 3 1 roll put }
+{ exch newfont 3 1 roll put } ifelse } { pop pop } ifelse } forall
+newfont /FontName newfontname put newcodesandnames aload pop
+128 1 255 { newfont /Encoding get exch /.notdef put } for
+newcodesandnames length 2 idiv { newfont /Encoding get 3 1 roll put } repeat
+newfontname newfont definefont pop end } def
+/isovec [
+8#200 /grave 8#201 /acute 8#202 /circumflex 8#203 /tilde
+8#204 /macron 8#205 /breve 8#206 /dotaccent 8#207 /dieresis
+8#210 /ring 8#211 /cedilla 8#212 /hungarumlaut 8#213 /ogonek 8#214 /caron
+8#220 /dotlessi 8#230 /oe 8#231 /OE
+8#240 /space 8#241 /exclamdown 8#242 /cent 8#243 /sterling
+8#244 /currency 8#245 /yen 8#246 /brokenbar 8#247 /section 8#250 /dieresis
+8#251 /copyright 8#252 /ordfeminine 8#253 /guillemotleft 8#254 /logicalnot
+8#255 /endash 8#256 /registered 8#257 /macron 8#260 /degree 8#261 /plusminus
+8#262 /twosuperior 8#263 /threesuperior 8#264 /acute 8#265 /mu 8#266 /paragraph
+8#267 /periodcentered 8#270 /cedilla 8#271 /onesuperior 8#272 /ordmasculine
+8#273 /guillemotright 8#274 /onequarter 8#275 /onehalf
+8#276 /threequarters 8#277 /questiondown 8#300 /Agrave 8#301 /Aacute
+8#302 /Acircumflex 8#303 /Atilde 8#304 /Adieresis 8#305 /Aring
+8#306 /AE 8#307 /Ccedilla 8#310 /Egrave 8#311 /Eacute
+8#312 /Ecircumflex 8#313 /Edieresis 8#314 /Igrave 8#315 /Iacute
+8#316 /Icircumflex 8#317 /Idieresis 8#320 /Eth 8#321 /Ntilde 8#322 /Ograve
+8#323 /Oacute 8#324 /Ocircumflex 8#325 /Otilde 8#326 /Odieresis 8#327 /multiply
+8#330 /Oslash 8#331 /Ugrave 8#332 /Uacute 8#333 /Ucircumflex
+8#334 /Udieresis 8#335 /Yacute 8#336 /Thorn 8#337 /germandbls 8#340 /agrave
+8#341 /aacute 8#342 /acircumflex 8#343 /atilde 8#344 /adieresis 8#345 /aring
+8#346 /ae 8#347 /ccedilla 8#350 /egrave 8#351 /eacute
+8#352 /ecircumflex 8#353 /edieresis 8#354 /igrave 8#355 /iacute
+8#356 /icircumflex 8#357 /idieresis 8#360 /eth 8#361 /ntilde 8#362 /ograve
+8#363 /oacute 8#364 /ocircumflex 8#365 /otilde 8#366 /odieresis 8#367 /divide
+8#370 /oslash 8#371 /ugrave 8#372 /uacute 8#373 /ucircumflex
+8#374 /udieresis 8#375 /yacute 8#376 /thorn 8#377 /ydieresis] def
+/Times-Roman /Times-Roman-iso isovec ReEncode
+/Helvetica-Bold /Helvetica-Bold-iso isovec ReEncode
+/Helvetica-Narrow /Helvetica-Narrow-iso isovec ReEncode
+/$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def
+/$F2psEnd {$F2psEnteredState restore end} def
+%%EndProlog
+
+$F2psBegin
+10 setmiterlimit
+n -1000 9572 m -1000 -1000 l 13622 -1000 l 13622 9572 l cp clip
+ 0.05883 0.05883 sc
+%%Page: 1 1
+% Polyline
+7.500 slw
+n 6413 2048 m 6380 2054 l 6348 2061 l 6315 2067 l 6283 2073 l 6250 2079 l
+ 6217 2084 l 6185 2090 l 6152 2095 l 6120 2101 l 6088 2107 l
+ 6057 2113 l 6027 2120 l 5998 2126 l 5970 2134 l 5943 2141 l
+ 5918 2149 l 5894 2158 l 5873 2167 l 5853 2177 l 5835 2187 l
+ 5819 2198 l 5805 2210 l 5793 2222 l 5782 2235 l 5774 2250 l
+ 5768 2265 l 5763 2281 l 5760 2299 l 5759 2318 l 5759 2339 l
+ 5761 2360 l 5764 2383 l 5768 2408 l 5774 2433 l 5780 2460 l
+ 5788 2488 l 5797 2516 l 5806 2546 l 5815 2575 l 5825 2606 l
+ 5836 2636 l 5846 2666 l 5856 2696 l 5866 2726 l 5875 2755 l
+ 5884 2784 l 5892 2812 l 5899 2839 l 5905 2866 l 5910 2891 l
+ 5915 2916 l 5918 2940 l 5919 2968 l 5920 2995 l 5919 3022 l
+ 5916 3048 l 5912 3075 l 5908 3101 l 5902 3127 l 5895 3153 l
+ 5887 3179 l 5880 3205 l 5871 3230 l 5863 3254 l 5855 3278 l
+ 5848 3302 l 5841 3324 l 5834 3346 l 5829 3367 l 5824 3388 l
+ 5821 3408 l 5819 3427 l 5819 3446 l 5820 3465 l 5823 3484 l
+ 5827 3503 l 5833 3522 l 5840 3542 l 5848 3562 l 5858 3582 l
+ 5868 3603 l 5880 3625 l 5891 3647 l 5904 3669 l 5916 3691 l
+ 5929 3713 l 5941 3736 l 5953 3758 l 5964 3779 l 5974 3801 l
+ 5983 3822 l 5991 3843 l 5997 3863 l 6002 3883 l 6006 3903 l
+ 6008 3923 l 6008 3942 l 6006 3962 l 6003 3983 l 5998 4004 l
+ 5992 4025 l 5985 4048 l 5977 4070 l 5968 4094 l 5958 4118 l
+ 5947 4142 l 5936 4167 l 5925 4192 l 5913 4216 l 5902 4241 l
+ 5892 4266 l 5882 4291 l 5872 4315 l 5864 4339 l 5857 4362 l
+ 5851 4386 l 5846 4409 l 5843 4433 l 5840 4456 l 5840 4480 l
+ 5840 4505 l 5842 4530 l 5845 4556 l 5849 4582 l 5854 4609 l
+ 5860 4636 l 5867 4664 l 5875 4692 l 5883 4720 l 5892 4747 l
+ 5901 4774 l 5910 4801 l 5920 4827 l 5929 4852 l 5938 4875 l
+ 5947 4898 l 5955 4920 l 5963 4941 l 5971 4961 l 5978 4980 l
+ 5985 5002 l 5992 5024 l 5999 5046 l 6005 5067 l 6010 5088 l
+ 6016 5109 l 6022 5129 l 6027 5150 l 6033 5170 l 6039 5190 l
+ 6045 5209 l 6052 5228 l 6059 5246 l 6067 5264 l 6075 5281 l
+ 6084 5298 l 6094 5315 l 6105 5333 l 6115 5347 l 6125 5361 l
+ 6137 5376 l 6149 5392 l 6162 5408 l 6176 5425 l 6191 5443 l
+ 6206 5461 l 6221 5480 l 6237 5499 l 6253 5519 l 6269 5539 l
+ 6284 5559 l 6299 5579 l 6313 5599 l 6327 5619 l 6340 5639 l
+ 6352 5659 l 6363 5679 l 6373 5698 l 6382 5718 l 6390 5738 l
+ 6398 5759 l 6404 5782 l 6410 5805 l 6415 5828 l 6420 5852 l
+ 6424 5877 l 6428 5902 l 6431 5927 l 6435 5952 l 6438 5977 l
+ 6442 6001 l 6446 6025 l 6450 6048 l 6455 6069 l 6461 6090 l
+ 6467 6109 l 6474 6127 l 6483 6143 l 6492 6159 l 6503 6173 l
+ 6515 6185 l 6528 6197 l 6543 6209 l 6560 6220 l 6578 6230 l
+ 6598 6240 l 6619 6250 l 6641 6260 l 6663 6270 l 6687 6281 l
+ 6710 6291 l 6733 6302 l 6757 6312 l 6779 6324 l 6801 6335 l
+ 6821 6348 l 6841 6361 l 6859 6374 l 6876 6389 l 6893 6405 l
+ 6906 6421 l 6919 6437 l 6932 6455 l 6944 6475 l 6955 6495 l
+ 6967 6516 l 6979 6538 l 6991 6561 l 7003 6584 l 7015 6608 l
+ 7027 6631 l 7040 6654 l 7053 6677 l 7067 6699 l 7081 6720 l
+ 7096 6739 l 7111 6758 l 7127 6774 l 7144 6789 l 7161 6803 l
+ 7180 6815 l 7200 6825 l 7220 6833 l 7240 6840 l 7263 6845 l
+ 7286 6850 l 7311 6854 l 7338 6857 l 7365 6859 l 7394 6861 l
+ 7424 6862 l 7454 6864 l 7485 6865 l 7516 6866 l 7547 6867 l
+ 7578 6868 l 7609 6870 l 7639 6872 l 7668 6875 l 7696 6879 l
+ 7723 6883 l 7748 6889 l 7773 6895 l 7795 6903 l 7817 6912 l
+ 7838 6923 l 7857 6934 l 7875 6948 l 7892 6963 l 7909 6980 l
+ 7926 6998 l 7941 7017 l 7957 7038 l 7972 7060 l 7987 7083 l
+ 8002 7106 l 8017 7130 l 8031 7154 l 8046 7178 l 8061 7202 l
+ 8075 7225 l 8090 7247 l 8105 7269 l 8120 7289 l 8135 7308 l
+ 8151 7326 l 8167 7342 l 8184 7356 l 8202 7369 l 8220 7380 l
+ 8239 7390 l 8260 7397 l 8282 7404 l 8305 7409 l 8330 7413 l
+ 8356 7416 l 8383 7418 l 8412 7420 l 8441 7420 l 8471 7419 l
+ 8502 7418 l 8534 7417 l 8565 7415 l 8597 7413 l 8629 7411 l
+ 8660 7409 l 8690 7407 l 8720 7405 l 8749 7404 l 8777 7404 l
+ 8804 7404 l 8830 7405 l 8856 7407 l 8880 7410 l 8906 7414 l
+ 8931 7420 l 8956 7427 l 8981 7435 l 9005 7444 l 9029 7455 l
+ 9053 7466 l 9077 7478 l 9100 7491 l 9123 7504 l 9146 7517 l
+ 9168 7531 l 9190 7544 l 9210 7557 l 9230 7570 l 9250 7582 l
+ 9268 7593 l 9286 7604 l 9304 7613 l 9320 7621 l 9336 7629 l
+ 9353 7635 l 9370 7641 l 9388 7645 l 9406 7648 l 9425 7650 l
+ 9444 7652 l 9464 7653 l 9485 7653 l 9508 7653 l 9531 7653 l
+ 9555 7653 l 9579 7653 l 9605 7654 l 9631 7655 l 9658 7656 l
+ 9685 7659 l 9713 7662 l 9742 7666 l 9771 7672 l 9801 7679 l
+ 9833 7688 l 9853 7694 l 9874 7700 l 9895 7708 l 9918 7716 l
+ 9941 7725 l 9966 7734 l 9991 7745 l 10017 7755 l 10045 7767 l
+ 10073 7779 l 10102 7791 l 10132 7804 l 10163 7818 l 10194 7831 l
+ 10227 7845 l 10259 7860 l 10293 7874 l 10326 7889 l 10360 7903 l
+ 10394 7918 l 10429 7932 l 10463 7947 l 10497 7961 l 10531 7974 l
+ 10565 7988 l 10599 8001 l 10633 8013 l 10667 8025 l 10700 8037 l
+ 10733 8049 l 10767 8059 l 10800 8070 l 10834 8080 l 10868 8090 l
+ 10902 8099 l 10937 8108 l 10973 8117 l 11009 8125 l 11045 8133 l
+ 11083 8141 l 11120 8148 l 11158 8155 l 11197 8161 l 11236 8167 l
+ 11275 8172 l 11313 8177 l 11352 8181 l 11391 8184 l 11429 8187 l
+ 11467 8190 l 11504 8191 l 11540 8192 l 11576 8192 l 11610 8192 l
+ 11644 8191 l 11676 8189 l 11707 8187 l 11738 8184 l 11767 8180 l
+ 11794 8176 l 11821 8171 l 11847 8165 l 11871 8159 l 11895 8153 l
+ 11923 8143 l 11950 8133 l 11976 8122 l 12001 8109 l 12025 8096 l
+ 12048 8081 l 12071 8065 l 12092 8048 l 12113 8031 l 12133 8012 l
+ 12153 7992 l 12171 7972 l 12188 7951 l 12205 7930 l 12220 7909 l
+ 12235 7887 l 12248 7865 l 12260 7843 l 12272 7822 l 12282 7800 l
+ 12292 7779 l 12301 7759 l 12309 7739 l 12316 7719 l 12323 7699 l
+ 12330 7680 l 12338 7655 l 12345 7631 l 12352 7607 l 12359 7582 l
+ 12365 7558 l 12371 7533 l 12377 7508 l 12382 7484 l 12388 7460 l
+ 12392 7436 l 12397 7414 l 12401 7391 l 12405 7370 l 12409 7350 l
+ 12412 7331 l 12415 7313 l 12418 7297 l 12421 7281 l 12424 7266 l
+ 12428 7253 l 12432 7234 l 12437 7216 l 12442 7199 l 12446 7183 l
+ 12451 7166 l 12456 7150 l 12460 7134 l 12463 7117 l 12466 7101 l
+ 12468 7086 l 12469 7070 l 12469 7054 l 12467 7037 l 12465 7020 l
+ 12462 7006 l 12459 6991 l 12455 6975 l 12450 6958 l 12445 6940 l
+ 12440 6921 l 12434 6901 l 12428 6880 l 12422 6859 l 12416 6838 l
+ 12411 6817 l 12406 6796 l 12401 6776 l 12397 6756 l 12394 6736 l
+ 12392 6718 l 12390 6700 l 12390 6683 l 12390 6665 l 12392 6649 l
+ 12394 6631 l 12397 6614 l 12401 6597 l 12406 6579 l 12411 6561 l
+ 12416 6542 l 12422 6524 l 12428 6505 l 12434 6487 l 12440 6468 l
+ 12445 6450 l 12450 6432 l 12455 6414 l 12459 6396 l 12462 6378 l
+ 12465 6360 l 12467 6343 l 12468 6326 l 12469 6308 l 12469 6289 l
+ 12468 6269 l 12468 6249 l 12466 6227 l 12464 6205 l 12462 6182 l
+ 12460 6159 l 12457 6135 l 12454 6111 l 12451 6087 l 12447 6063 l
+ 12444 6040 l 12441 6016 l 12437 5993 l 12434 5970 l 12431 5948 l
+ 12428 5925 l 12424 5902 l 12421 5879 l 12419 5855 l 12416 5831 l
+ 12413 5806 l 12411 5781 l 12408 5755 l 12406 5729 l 12404 5702 l
+ 12403 5676 l 12401 5651 l 12400 5625 l 12400 5601 l 12399 5578 l
+ 12399 5555 l 12400 5534 l 12401 5514 l 12402 5495 l 12403 5477 l
+ 12405 5460 l 12408 5440 l 12411 5421 l 12416 5402 l 12420 5384 l
+ 12426 5365 l 12431 5347 l 12437 5329 l 12444 5311 l 12450 5293 l
+ 12456 5275 l 12462 5258 l 12468 5240 l 12474 5222 l 12479 5205 l
+ 12483 5186 l 12488 5168 l 12490 5152 l 12493 5135 l 12496 5117 l
+ 12498 5099 l 12500 5079 l 12502 5058 l 12504 5036 l 12506 5014 l
+ 12507 4990 l 12509 4966 l 12510 4942 l 12512 4918 l 12513 4893 l
+ 12515 4869 l 12516 4845 l 12518 4822 l 12520 4799 l 12521 4776 l
+ 12523 4754 l 12525 4733 l 12527 4713 l 12529 4693 l 12531 4673 l
+ 12534 4653 l 12536 4632 l 12539 4610 l 12541 4588 l 12543 4566 l
+ 12546 4543 l 12548 4520 l 12550 4497 l 12552 4473 l 12553 4450 l
+ 12554 4426 l 12555 4403 l 12555 4380 l 12555 4357 l 12555 4334 l
+ 12554 4312 l 12552 4290 l 12550 4267 l 12548 4245 l 12545 4224 l
+ 12541 4203 l 12537 4181 l 12533 4159 l 12528 4136 l 12523 4112 l
+ 12517 4088 l 12510 4064 l 12503 4038 l 12496 4013 l 12488 3987 l
+ 12479 3961 l 12471 3935 l 12462 3909 l 12452 3884 l 12443 3859 l
+ 12434 3835 l 12424 3811 l 12415 3788 l 12405 3766 l 12396 3744 l
+ 12386 3723 l 12377 3702 l 12368 3683 l 12357 3661 l 12347 3640 l
+ 12336 3619 l 12325 3598 l 12314 3576 l 12303 3555 l 12291 3533 l
+ 12280 3511 l 12269 3489 l 12257 3467 l 12246 3446 l 12235 3424 l
+ 12225 3402 l 12215 3381 l 12206 3360 l 12197 3340 l 12189 3320 l
+ 12181 3301 l 12174 3281 l 12168 3262 l 12162 3244 l 12158 3225 l
+ 12153 3204 l 12149 3183 l 12145 3162 l 12142 3139 l 12140 3117 l
+ 12138 3094 l 12137 3071 l 12137 3047 l 12138 3024 l 12139 3001 l
+ 12141 2978 l 12143 2956 l 12146 2935 l 12150 2915 l 12154 2896 l
+ 12158 2879 l 12163 2862 l 12168 2847 l 12174 2833 l 12180 2820 l
+ 12188 2805 l 12197 2792 l 12206 2779 l 12216 2766 l 12227 2754 l
+ 12238 2742 l 12249 2730 l 12260 2717 l 12272 2704 l 12282 2691 l
+ 12292 2676 l 12302 2661 l 12310 2645 l 12318 2627 l 12324 2608 l
+ 12330 2588 l 12334 2571 l 12336 2553 l 12339 2534 l 12341 2513 l
+ 12342 2491 l 12343 2467 l 12343 2442 l 12342 2416 l 12340 2389 l
+ 12338 2360 l 12335 2332 l 12331 2303 l 12326 2273 l 12320 2244 l
+ 12314 2215 l 12307 2187 l 12299 2159 l 12290 2132 l 12280 2106 l
+ 12270 2081 l 12259 2056 l 12248 2033 l 12236 2011 l 12224 1990 l
+ 12210 1970 l 12196 1949 l 12181 1929 l 12164 1910 l 12147 1890 l
+ 12129 1871 l 12110 1853 l 12090 1835 l 12070 1818 l 12049 1802 l
+ 12027 1787 l 12005 1773 l 11983 1761 l 11961 1749 l 11939 1739 l
+ 11917 1730 l 11895 1722 l 11874 1716 l 11852 1710 l 11831 1707 l
+ 11811 1704 l 11790 1703 l 11769 1702 l 11748 1703 l 11727 1705 l
+ 11706 1708 l 11683 1711 l 11660 1716 l 11636 1721 l 11612 1727 l
+ 11587 1733 l 11560 1740 l 11534 1747 l 11506 1754 l 11479 1761 l
+ 11450 1768 l 11422 1774 l 11393 1780 l 11364 1786 l 11334 1791 l
+ 11305 1795 l 11275 1798 l 11245 1800 l 11215 1801 l 11184 1801 l
+ 11153 1800 l 11128 1798 l 11104 1796 l 11078 1793 l 11052 1790 l
+ 11025 1785 l 10997 1781 l 10968 1776 l 10939 1770 l 10908 1764 l
+ 10877 1758 l 10844 1751 l 10811 1744 l 10778 1737 l 10743 1730 l
+ 10708 1722 l 10673 1715 l 10637 1708 l 10601 1701 l 10565 1695 l
+ 10530 1688 l 10494 1682 l 10458 1677 l 10422 1672 l 10387 1668 l
+ 10352 1664 l 10318 1661 l 10284 1658 l 10250 1657 l 10216 1656 l
+ 10183 1655 l 10150 1656 l 10118 1658 l 10087 1660 l 10055 1663 l
+ 10024 1666 l 9992 1671 l 9960 1676 l 9927 1682 l 9894 1688 l
+ 9861 1695 l 9827 1703 l 9792 1711 l 9757 1720 l 9721 1729 l
+ 9685 1738 l 9649 1748 l 9613 1757 l 9576 1767 l 9539 1778 l
+ 9502 1788 l 9465 1798 l 9429 1807 l 9392 1817 l 9356 1826 l
+ 9320 1835 l 9285 1844 l 9250 1852 l 9216 1860 l 9182 1867 l
+ 9148 1873 l 9115 1879 l 9082 1884 l 9050 1889 l 9018 1892 l
+ 8987 1895 l 8955 1898 l 8919 1899 l 8883 1900 l 8847 1899 l
+ 8811 1898 l 8774 1896 l 8737 1893 l 8699 1889 l 8661 1884 l
+ 8623 1878 l 8585 1872 l 8546 1865 l 8508 1857 l 8470 1849 l
+ 8432 1840 l 8395 1830 l 8358 1821 l 8322 1811 l 8287 1801 l
+ 8254 1790 l 8221 1780 l 8189 1770 l 8159 1760 l 8130 1750 l
+ 8102 1740 l 8076 1730 l 8051 1721 l 8028 1712 l 8006 1703 l
+ 7985 1695 l 7965 1688 l 7931 1674 l 7899 1662 l 7871 1650 l
+ 7844 1640 l 7820 1631 l 7798 1623 l 7778 1617 l 7760 1611 l
+ 7743 1607 l 7728 1603 l 7715 1601 l 7702 1600 l 7691 1600 l
+ 7680 1601 l 7669 1603 l 7658 1605 l 7648 1607 l 7638 1610 l
+ 7627 1613 l 7615 1617 l 7601 1621 l 7587 1626 l 7571 1632 l
+ 7554 1638 l 7536 1645 l 7517 1653 l 7496 1661 l 7474 1670 l
+ 7452 1679 l 7428 1689 l 7403 1699 l 7378 1709 l 7352 1720 l
+ 7325 1731 l 7297 1743 l 7268 1755 l 7247 1763 l 7226 1772 l
+ 7204 1781 l 7182 1790 l 7158 1800 l 7133 1810 l 7108 1820 l
+ 7081 1831 l 7053 1842 l 7025 1853 l 6996 1864 l 6966 1875 l
+ 6935 1886 l 6904 1898 l 6873 1909 l 6841 1921 l 6809 1932 l
+ 6776 1943 l 6744 1954 l 6712 1964 l 6680 1974 l 6649 1984 l
+ 6618 1994 l 6587 2003 l 6557 2011 l 6527 2019 l 6498 2027 l
+ 6469 2034 l 6441 2041 l cp gs col34 1.00 shd ef gr gs col34 s gr
+% Polyline
+n 675 6525 m 5850 6525 l 5850 6075 l 5625 6075 l 5625 5625 l 900 5625 l
+ 900 6075 l 675 6075 l cp gs col7 1.00 shd ef gr gs col7 s gr
+% Polyline
+n 1125 6525 m 5355 6525 l 5400 5175 l 5175 5175 l 5175 4725 l 4950 4725 l
+ 4950 4275 l 1575 4275 l 1575 4725 l 1350 4725 l 1350 5175 l
+ 1125 5175 l cp gs col34 1.00 shd ef gr gs col34 s gr
+% Polyline
+75.000 slw
+n 9450 4500 m 12465 2205 l gs col7 s gr
+% Polyline
+n 9450 4500 m 9450 7785 l gs col7 s gr
+% Polyline
+n 9450 4500 m 6075 1935 l gs col7 s gr
+% Polyline
+n 12510 6435 m 9450 6435 l gs col7 s gr
+% Polyline
+7.500 slw
+n 1800 6525 m 4725 6525 l 4725 3825 l 4500 3825 l 4500 3375 l 4275 3375 l
+ 4275 2925 l 4050 2925 l 4050 2475 l 2475 2475 l 2475 2925 l
+ 2250 2925 l 2250 3375 l 2025 3375 l 2025 3825 l 1800 3825 l
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+n 2700 6525 m 3825 6525 l 3825 2025 l 3600 2025 l 3600 1575 l 2925 1575 l
+ 2925 2025 l 2700 2025 l cp gs col33 1.00 shd ef gr gs col33 s gr
+% Polyline
+gs clippath
+12068 6810 m 11970 6885 l 12022 6773 l 11937 6878 l 11984 6915 l cp
+clip
+n 12375 4455 m 12510 4635 l 12510 6210 l 11970 6885 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 12068 6810 m 11970 6885 l 12022 6773 l 12045 6791 l 12068 6810 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+7113 6004 m 7155 6120 l 7063 6037 l 7138 6149 l 7188 6116 l cp
+clip
+n 6705 5445 m 7155 6120 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 7113 6004 m 7155 6120 l 7063 6037 l 7088 6020 l 7113 6004 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+7304 4656 m 7200 4590 l 7323 4599 l 7195 4557 l 7176 4614 l cp
+clip
+n 7875 4815 m 7200 4590 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 7304 4656 m 7200 4590 l 7323 4599 l 7314 4628 l 7304 4656 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+11405 4128 m 11475 4230 l 11365 4173 l 11466 4262 l 11506 4217 l cp
+clip
+n 9585 2565 m 11475 4230 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 11405 4128 m 11475 4230 l 11365 4173 l 11385 4151 l 11405 4128 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+11712 4556 m 11835 4545 l 11732 4613 l 11859 4568 l 11839 4512 l cp
+clip
+n 10170 5130 m 11835 4545 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 11712 4556 m 11835 4545 l 11732 4613 l 11722 4585 l 11712 4556 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+9732 5411 m 9855 5400 l 9752 5468 l 9879 5423 l 9859 5367 l cp
+clip
+n 7920 6075 m 9855 5400 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 9732 5411 m 9855 5400 l 9752 5468 l 9742 5440 l 9732 5411 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+10823 5573 m 10935 5625 l 10812 5632 l 10944 5657 l 10955 5598 l cp
+clip
+n 9990 5445 m 10935 5625 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 10823 5573 m 10935 5625 l 10812 5632 l 10817 5603 l 10823 5573 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+10815 5280 m 10935 5310 l 10815 5340 l 10950 5340 l 10950 5280 l cp
+clip
+n 10215 5310 m 10935 5310 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 10815 5280 m 10935 5310 l 10815 5340 l 10815 5310 l 10815 5280 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+11955 4965 m 11925 5085 l 11895 4965 l 11895 5100 l 11955 5100 l cp
+clip
+n 11925 4590 m 11925 5085 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 11955 4965 m 11925 5085 l 11895 4965 l 11925 4965 l 11955 4965 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+9840 6720 m 9810 6840 l 9780 6720 l 9780 6855 l 9840 6855 l cp
+clip
+n 9810 5490 m 9810 6840 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 9840 6720 m 9810 6840 l 9780 6720 l 9810 6720 l 9840 6720 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+10847 5943 m 10935 6030 l 10816 5995 l 10933 6063 l 10963 6012 l cp
+clip
+n 9945 5445 m 10935 6030 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 10847 5943 m 10935 6030 l 10816 5995 l 10832 5969 l 10847 5943 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+10698 2634 m 10800 2565 l 10742 2674 l 10832 2574 l 10788 2534 l cp
+clip
+n 8865 4725 m 10800 2565 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 10698 2634 m 10800 2565 l 10742 2674 l 10720 2654 l 10698 2634 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+30.000 slw
+n 675 6075 m 5850 6075 l gs col34 1.00 shd ef gr gs col0 s gr
+% Polyline
+7.500 slw
+ [15 15] 15 sd
+gs clippath
+645 6195 m 675 6075 l 705 6195 l 705 6060 l 645 6060 l cp
+clip
+n 675 6525 m 675 6075 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 645 6195 m 675 6075 l 705 6195 l 675 6195 l 645 6195 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+5880 6405 m 5850 6525 l 5820 6405 l 5820 6540 l 5880 6540 l cp
+clip
+n 5850 6075 m 5850 6525 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 5880 6405 m 5850 6525 l 5820 6405 l 5850 6405 l 5880 6405 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+30.000 slw
+n 900 5625 m 5625 5625 l gs col34 1.00 shd ef gr gs col0 s gr
+% Polyline
+n 1125 5175 m 5400 5175 l gs col34 1.00 shd ef gr gs col0 s gr
+% Polyline
+n 1350 4725 m 5175 4725 l gs col34 1.00 shd ef gr gs col0 s gr
+% Polyline
+n 1575 4275 m 4950 4275 l gs col34 1.00 shd ef gr gs col0 s gr
+% Polyline
+n 1800 3825 m 4725 3825 l gs col34 1.00 shd ef gr gs col0 s gr
+% Polyline
+n 2025 3375 m 4500 3375 l gs col34 1.00 shd ef gr gs col0 s gr
+% Polyline
+n 2250 2925 m 4275 2925 l gs col34 1.00 shd ef gr gs col0 s gr
+% Polyline
+n 2475 2475 m 4050 2475 l gs col34 1.00 shd ef gr gs col0 s gr
+% Polyline
+n 2700 2025 m 3825 2025 l gs col34 1.00 shd ef gr gs col0 s gr
+% Polyline
+n 2925 1575 m 3600 1575 l gs col34 1.00 shd ef gr gs col0 s gr
+% Polyline
+7.500 slw
+ [15 15] 15 sd
+gs clippath
+870 5745 m 900 5625 l 930 5745 l 930 5610 l 870 5610 l cp
+clip
+n 900 6075 m 900 5625 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 870 5745 m 900 5625 l 930 5745 l 900 5745 l 870 5745 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+1095 5295 m 1125 5175 l 1155 5295 l 1155 5160 l 1095 5160 l cp
+clip
+n 1125 6525 m 1125 5175 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 1095 5295 m 1125 5175 l 1155 5295 l 1125 5295 l 1095 5295 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+1320 4845 m 1350 4725 l 1380 4845 l 1380 4710 l 1320 4710 l cp
+clip
+n 1350 5175 m 1350 4725 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 1320 4845 m 1350 4725 l 1380 4845 l 1350 4845 l 1320 4845 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+1545 4395 m 1575 4275 l 1605 4395 l 1605 4260 l 1545 4260 l cp
+clip
+n 1575 4725 m 1575 4275 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 1545 4395 m 1575 4275 l 1605 4395 l 1575 4395 l 1545 4395 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+1770 3945 m 1800 3825 l 1830 3945 l 1830 3810 l 1770 3810 l cp
+clip
+n 1800 6525 m 1800 3825 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 1770 3945 m 1800 3825 l 1830 3945 l 1800 3945 l 1770 3945 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+1995 3495 m 2025 3375 l 2055 3495 l 2055 3360 l 1995 3360 l cp
+clip
+n 2025 3825 m 2025 3375 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 1995 3495 m 2025 3375 l 2055 3495 l 2025 3495 l 1995 3495 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+2220 3045 m 2250 2925 l 2280 3045 l 2280 2910 l 2220 2910 l cp
+clip
+n 2250 3375 m 2250 2925 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 2220 3045 m 2250 2925 l 2280 3045 l 2250 3045 l 2220 3045 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+2445 2595 m 2475 2475 l 2505 2595 l 2505 2460 l 2445 2460 l cp
+clip
+n 2475 2925 m 2475 2475 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 2445 2595 m 2475 2475 l 2505 2595 l 2475 2595 l 2445 2595 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+5655 5955 m 5625 6075 l 5595 5955 l 5595 6090 l 5655 6090 l cp
+clip
+n 5625 5625 m 5625 6075 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 5655 5955 m 5625 6075 l 5595 5955 l 5625 5955 l 5655 5955 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+5430 6405 m 5400 6525 l 5370 6405 l 5370 6540 l 5430 6540 l cp
+clip
+n 5400 5175 m 5400 6525 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 5430 6405 m 5400 6525 l 5370 6405 l 5400 6405 l 5430 6405 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+5205 5055 m 5175 5175 l 5145 5055 l 5145 5190 l 5205 5190 l cp
+clip
+n 5175 4725 m 5175 5175 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 5205 5055 m 5175 5175 l 5145 5055 l 5175 5055 l 5205 5055 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+4980 4605 m 4950 4725 l 4920 4605 l 4920 4740 l 4980 4740 l cp
+clip
+n 4950 4275 m 4950 4725 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 4980 4605 m 4950 4725 l 4920 4605 l 4950 4605 l 4980 4605 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+4755 6405 m 4725 6525 l 4695 6405 l 4695 6540 l 4755 6540 l cp
+clip
+n 4725 3825 m 4725 6525 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 4755 6405 m 4725 6525 l 4695 6405 l 4725 6405 l 4755 6405 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+4530 3705 m 4500 3825 l 4470 3705 l 4470 3840 l 4530 3840 l cp
+clip
+n 4500 3375 m 4500 3825 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 4530 3705 m 4500 3825 l 4470 3705 l 4500 3705 l 4530 3705 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+4305 3255 m 4275 3375 l 4245 3255 l 4245 3390 l 4305 3390 l cp
+clip
+n 4275 2925 m 4275 3375 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 4305 3255 m 4275 3375 l 4245 3255 l 4275 3255 l 4305 3255 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+4080 2805 m 4050 2925 l 4020 2805 l 4020 2940 l 4080 2940 l cp
+clip
+n 4050 2475 m 4050 2925 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 4080 2805 m 4050 2925 l 4020 2805 l 4050 2805 l 4080 2805 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+2670 2145 m 2700 2025 l 2730 2145 l 2730 2010 l 2670 2010 l cp
+clip
+n 2700 6525 m 2700 2025 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 2670 2145 m 2700 2025 l 2730 2145 l 2700 2145 l 2670 2145 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+3855 6405 m 3825 6525 l 3795 6405 l 3795 6540 l 3855 6540 l cp
+clip
+n 3825 2025 m 3825 6525 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 3855 6405 m 3825 6525 l 3795 6405 l 3825 6405 l 3855 6405 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+3630 1905 m 3600 2025 l 3570 1905 l 3570 2040 l 3630 2040 l cp
+clip
+n 3600 1575 m 3600 2025 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 3630 1905 m 3600 2025 l 3570 1905 l 3600 1905 l 3630 1905 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+2895 1695 m 2925 1575 l 2955 1695 l 2955 1560 l 2895 1560 l cp
+clip
+n 2925 2025 m 2925 1575 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 2895 1695 m 2925 1575 l 2955 1695 l 2925 1695 l 2895 1695 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+45.000 slw
+gs clippath
+6087 6495 m 6207 6525 l 6087 6555 l 6360 6555 l 6360 6495 l cp
+clip
+n 540 6525 m 6300 6525 l gs 0.00 setgray ef gr gs col0 s gr gr
+
+% arrowhead
+n 6087 6495 m 6207 6525 l 6087 6555 l 6087 6525 l 6087 6495 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+7.500 slw
+gs clippath
+3681 6720 m 3825 6750 l 3681 6780 l 3840 6780 l 3840 6720 l cp
+2844 6780 m 2700 6750 l 2844 6720 l 2685 6720 l 2685 6780 l cp
+clip
+n 2700 6750 m 3825 6750 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 2844 6780 m 2700 6750 l 2844 6720 l 2820 6750 l 2844 6780 l cp gs col7 1.00 shd ef gr col0 s
+% arrowhead
+n 3681 6720 m 3825 6750 l 3681 6780 l 3705 6750 l 3681 6720 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+gs clippath
+5256 7170 m 5400 7200 l 5256 7230 l 5415 7230 l 5415 7170 l cp
+1269 7230 m 1125 7200 l 1269 7170 l 1110 7170 l 1110 7230 l cp
+clip
+n 1125 7200 m 5400 7200 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 1269 7230 m 1125 7200 l 1269 7170 l 1245 7200 l 1269 7230 l cp gs col7 1.00 shd ef gr col0 s
+% arrowhead
+n 5256 7170 m 5400 7200 l 5256 7230 l 5280 7200 l 5256 7170 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+gs clippath
+4581 6945 m 4725 6975 l 4581 7005 l 4740 7005 l 4740 6945 l cp
+1944 7005 m 1800 6975 l 1944 6945 l 1785 6945 l 1785 7005 l cp
+clip
+n 1800 6975 m 4725 6975 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 1944 7005 m 1800 6975 l 1944 6945 l 1920 6975 l 1944 7005 l cp gs col7 1.00 shd ef gr col0 s
+% arrowhead
+n 4581 6945 m 4725 6975 l 4581 7005 l 4605 6975 l 4581 6945 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+gs clippath
+5706 7395 m 5850 7425 l 5706 7455 l 5865 7455 l 5865 7395 l cp
+819 7455 m 675 7425 l 819 7395 l 660 7395 l 660 7455 l cp
+clip
+n 675 7425 m 5850 7425 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 819 7455 m 675 7425 l 819 7395 l 795 7425 l 819 7455 l cp gs col7 1.00 shd ef gr col0 s
+% arrowhead
+n 5706 7395 m 5850 7425 l 5706 7455 l 5730 7425 l 5706 7395 l cp gs col7 1.00 shd ef gr col0 s
+% Polyline
+1 slc
+ [15 45] 45 sd
+n 675 6570 m 675 7650 l gs col34 1.00 shd ef gr gs col0 s gr [] 0 sd
+% Polyline
+ [15 45] 45 sd
+n 1125 6570 m 1125 7650 l gs col34 1.00 shd ef gr gs col0 s gr [] 0 sd
+% Polyline
+ [15 45] 45 sd
+n 1800 6570 m 1800 7650 l gs col34 1.00 shd ef gr gs col0 s gr [] 0 sd
+% Polyline
+ [15 45] 45 sd
+n 2700 6570 m 2700 7650 l gs col34 1.00 shd ef gr gs col0 s gr [] 0 sd
+% Polyline
+ [15 45] 45 sd
+n 3825 6570 m 3825 7650 l gs col34 1.00 shd ef gr gs col0 s gr [] 0 sd
+% Polyline
+ [15 45] 45 sd
+n 4725 6570 m 4725 7650 l gs col34 1.00 shd ef gr gs col0 s gr [] 0 sd
+% Polyline
+ [15 45] 45 sd
+n 5400 6570 m 5400 7650 l gs col34 1.00 shd ef gr gs col0 s gr [] 0 sd
+% Polyline
+ [15 45] 45 sd
+n 5850 6570 m 5850 7650 l gs col34 1.00 shd ef gr gs col0 s gr [] 0 sd
+% Polyline
+0 slc
+n 750 225 m 450 225 450 1050 300 arcto 4 {pop} repeat
+ 450 1350 12300 1350 300 arcto 4 {pop} repeat
+ 12600 1350 12600 525 300 arcto 4 {pop} repeat
+ 12600 225 750 225 300 arcto 4 {pop} repeat
+ cp gs col34 1.00 shd ef gr gs col0 s gr
+% Polyline
+n 8835 2250 m 8775 2250 8775 2415 60 arcto 4 {pop} repeat
+ 8775 2475 10110 2475 60 arcto 4 {pop} repeat
+ 10170 2475 10170 2310 60 arcto 4 {pop} repeat
+ 10170 2250 8835 2250 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+n 10635 2250 m 10575 2250 10575 2415 60 arcto 4 {pop} repeat
+ 10575 2475 11865 2475 60 arcto 4 {pop} repeat
+ 11925 2475 11925 2310 60 arcto 4 {pop} repeat
+ 11925 2250 10635 2250 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+n 11490 4275 m 11430 4275 11430 4440 60 arcto 4 {pop} repeat
+ 11430 4500 12315 4500 60 arcto 4 {pop} repeat
+ 12375 4500 12375 4335 60 arcto 4 {pop} repeat
+ 12375 4275 11490 4275 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+n 11040 5175 m 10980 5175 10980 5340 60 arcto 4 {pop} repeat
+ 10980 5400 12315 5400 60 arcto 4 {pop} repeat
+ 12375 5400 12375 5235 60 arcto 4 {pop} repeat
+ 12375 5175 11040 5175 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+n 9735 5175 m 9675 5175 9675 5340 60 arcto 4 {pop} repeat
+ 9675 5400 10110 5400 60 arcto 4 {pop} repeat
+ 10170 5400 10170 5235 60 arcto 4 {pop} repeat
+ 10170 5175 9735 5175 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+n 7260 6075 m 7200 6075 7200 6240 60 arcto 4 {pop} repeat
+ 7200 6300 7815 6300 60 arcto 4 {pop} repeat
+ 7875 6300 7875 6135 60 arcto 4 {pop} repeat
+ 7875 6075 7260 6075 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+n 6810 2250 m 6750 2250 6750 2415 60 arcto 4 {pop} repeat
+ 6750 2475 8130 2475 60 arcto 4 {pop} repeat
+ 8190 2475 8190 2310 60 arcto 4 {pop} repeat
+ 8190 2250 6810 2250 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+n 6360 3375 m 6300 3375 6300 3540 60 arcto 4 {pop} repeat
+ 6300 3600 7545 3600 60 arcto 4 {pop} repeat
+ 7605 3600 7605 3435 60 arcto 4 {pop} repeat
+ 7605 3375 6360 3375 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+n 6360 4275 m 6300 4275 6300 4440 60 arcto 4 {pop} repeat
+ 6300 4500 7275 4500 60 arcto 4 {pop} repeat
+ 7335 4500 7335 4335 60 arcto 4 {pop} repeat
+ 7335 4275 6360 4275 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+n 6360 5175 m 6300 5175 6300 5340 60 arcto 4 {pop} repeat
+ 6300 5400 7140 5400 60 arcto 4 {pop} repeat
+ 7200 5400 7200 5235 60 arcto 4 {pop} repeat
+ 7200 5175 6360 5175 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+gs clippath
+7365 5340 m 7245 5310 l 7365 5280 l 7230 5280 l 7230 5340 l cp
+clip
+n 9630 5310 m 7245 5310 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 7365 5340 m 7245 5310 l 7365 5280 l 7365 5310 l 7365 5340 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+7500 4395 m 7380 4365 l 7500 4335 l 7365 4335 l 7365 4395 l cp
+clip
+n 11385 4365 m 7380 4365 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 7500 4395 m 7380 4365 l 7500 4335 l 7500 4365 l 7500 4395 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+n 11040 5580 m 10980 5580 10980 5745 60 arcto 4 {pop} repeat
+ 10980 5805 12180 5805 60 arcto 4 {pop} repeat
+ 12240 5805 12240 5640 60 arcto 4 {pop} repeat
+ 12240 5580 11040 5580 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+n 11040 5985 m 10980 5985 10980 6150 60 arcto 4 {pop} repeat
+ 10980 6210 12315 6210 60 arcto 4 {pop} repeat
+ 12375 6210 12375 6045 60 arcto 4 {pop} repeat
+ 12375 5985 11040 5985 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+gs clippath
+9958 5554 m 9900 5445 l 10003 5514 l 9912 5414 l 9868 5454 l cp
+clip
+n 11205 6885 m 9900 5445 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 9958 5554 m 9900 5445 l 10003 5514 l 9981 5534 l 9958 5554 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+n 10590 6930 m 10530 6930 10530 7095 60 arcto 4 {pop} repeat
+ 10530 7155 12225 7155 60 arcto 4 {pop} repeat
+ 12285 7155 12285 6990 60 arcto 4 {pop} repeat
+ 12285 6930 10590 6930 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+n 9690 6930 m 9630 6930 9630 7095 60 arcto 4 {pop} repeat
+ 9630 7155 10110 7155 60 arcto 4 {pop} repeat
+ 10170 7155 10170 6990 60 arcto 4 {pop} repeat
+ 10170 6930 9690 6930 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+/Times-Roman-iso ff 120.00 scf sf
+900 7560 m
+gs 1 -1 sc (Startup, Runtime, Shutdown) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+6345 2970 m
+gs 1 -1 sc (ap_ctx_get\(...,) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+10800 2745 m
+gs 1 -1 sc (ap_get_module_config\(...) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+10800 2880 m
+gs 1 -1 sc (->per_dir_config,) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+10800 3015 m
+gs 1 -1 sc (&ssl_module\)) col0 sh gr
+% Polyline
+n 7980 4770 m 7920 4770 7920 4935 60 arcto 4 {pop} repeat
+ 7920 4995 9075 4995 60 arcto 4 {pop} repeat
+ 9135 4995 9135 4830 60 arcto 4 {pop} repeat
+ 9135 4770 7980 4770 60 arcto 4 {pop} repeat
+ cp gs col35 1.00 shd ef gr gs col35 s gr
+% Polyline
+gs clippath
+7340 2610 m 7425 2520 l 7393 2639 l 7459 2521 l 7406 2492 l cp
+clip
+n 6975 3330 m 7425 2520 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 7340 2610 m 7425 2520 l 7393 2639 l 7367 2625 l 7340 2610 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+9336 2569 m 9450 2520 l 9373 2616 l 9480 2535 l 9444 2487 l cp
+clip
+n 7200 4230 m 9450 2520 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 9336 2569 m 9450 2520 l 9373 2616 l 9354 2593 l 9336 2569 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+7321 5196 m 7200 5220 l 7296 5142 l 7174 5199 l 7199 5254 l cp
+clip
+n 7875 4905 m 7200 5220 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 7321 5196 m 7200 5220 l 7296 5142 l 7309 5169 l 7321 5196 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+gs clippath
+6720 4665 m 6750 4545 l 6780 4665 l 6780 4530 l 6720 4530 l cp
+clip
+n 6750 5130 m 6750 4545 l gs col34 1.00 shd ef gr gs col0 s gr gr
+
+% arrowhead
+n 6720 4665 m 6750 4545 l 6780 4665 l 6750 4665 l 6720 4665 l cp gs 0.00 setgray ef gr col0 s
+% Polyline
+ [15 15] 15 sd
+gs clippath
+9279 4984 m 9175 4918 l 9298 4927 l 9170 4885 l 9151 4942 l cp
+clip
+n 9850 5143 m 9175 4918 l gs col34 1.00 shd ef gr gs col0 s gr gr
+ [] 0 sd
+% arrowhead
+n 9279 4984 m 9175 4918 l 9298 4927 l 9289 4956 l 9279 4984 l cp gs 0.00 setgray ef gr col0 s
+/Helvetica-Narrow-iso ff 120.00 scf sf
+6210 4680 m
+gs 1 -1 sc (->server) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+8280 6120 m
+gs 1 -1 sc (ap_ctx_get\(...,"ssl"\)) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+7740 2700 m
+gs 1 -1 sc (ap_get_module_config\(...) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+7740 2835 m
+gs 1 -1 sc (->module_config,) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+7740 2970 m
+gs 1 -1 sc (&ssl_module\)) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+6345 3105 m
+gs 1 -1 sc ("ssl_module"\)) col0 sh gr
+/Times-Roman-iso ff 120.00 scf sf
+1350 7335 m
+gs 1 -1 sc (Configuration Time) col0 sh gr
+/Times-Roman-iso ff 120.00 scf sf
+2025 7110 m
+gs 1 -1 sc (Connection Duration) col0 sh gr
+/Times-Roman-iso ff 120.00 scf sf
+2835 6885 m
+gs 1 -1 sc (Request Duration) col0 sh gr
+/Helvetica-Bold-iso ff 300.00 scf sf
+6345 6795 m
+gs 1 -1 sc (t) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+7110 5985 m
+gs 1 -1 sc (->client) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+7065 5085 m
+gs 1 -1 sc (->connection) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+7065 4770 m
+gs 1 -1 sc (->server) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+8010 5445 m
+gs 1 -1 sc (SSL_get_app_data\(\)) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+10530 4050 m
+gs 1 -1 sc (->pSSLCtx) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+7875 4275 m
+gs 1 -1 sc (SSL_CTX_get_app_data\(\)) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+10305 5535 m
+gs 1 -1 sc (SSL_get_current_cipher\(\)) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+10440 5940 m
+gs 1 -1 sc (SSL_get_session\(\)) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+9540 7335 m
+gs 1 -1 sc (SSL_get_{r,w}bio\(\)) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+10125 4680 m
+gs 1 -1 sc (SSL_get_SSL_CTX\(\)) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+10350 5175 m
+gs 1 -1 sc (SSL_get_SSL_METHOD\(\)) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+11745 4770 m
+gs 1 -1 sc (->method) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+9945 6480 m
+gs 1 -1 sc (X509_STORE_CTX_get_app_data\(\)) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+10980 6705 m
+gs 1 -1 sc (SSL_CTX_get_cert_store\(\)) col0 sh gr
+/Helvetica-Narrow-iso ff 120.00 scf sf
+8280 5130 m
+gs 1 -1 sc (SSL_get_app_data2\(\)) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+3645 1620 m
+gs 1 -1 sc (SSLDirConfig) col0 sh gr
+/Helvetica-Bold-iso ff 300.00 scf sf
+10935 3645 m
+gs 1 -1 sc (OpenSSL) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+10935 3825 m
+gs 1 -1 sc ([SSL]) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+11025 5760 m
+gs 1 -1 sc (SSL_CIPHER) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+10980 6165 m
+gs 1 -1 sc (SSL_SESSION) col0 sh gr
+/Helvetica-Bold-iso ff 300.00 scf sf
+10710 7605 m
+gs 1 -1 sc (OpenSSL) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+10575 7110 m
+gs 1 -1 sc (X509_STORE_CTX) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+6795 2430 m
+gs 1 -1 sc (SSLModConfig) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+8865 2430 m
+gs 1 -1 sc (SSLSrvConfig) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+6345 3555 m
+gs 1 -1 sc (ap_global_ctx) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+6345 4455 m
+gs 1 -1 sc (server_rec) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+6345 5355 m
+gs 1 -1 sc (conn_rec) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+9720 5355 m
+gs 1 -1 sc (SSL) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+10665 2430 m
+gs 1 -1 sc (SSLDirConfig) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+7290 6255 m
+gs 1 -1 sc (BUFF) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+11025 5355 m
+gs 1 -1 sc (SSL_METHOD) col0 sh gr
+% Polyline
+15.000 slw
+n 750 225 m 450 225 450 8250 300 arcto 4 {pop} repeat
+ 450 8550 12300 8550 300 arcto 4 {pop} repeat
+ 12600 8550 12600 525 300 arcto 4 {pop} repeat
+ 12600 225 750 225 300 arcto 4 {pop} repeat
+ cp gs col0 s gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+11475 4455 m
+gs 1 -1 sc (SSL_CTX) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+8010 4950 m
+gs 1 -1 sc (request_rec) col0 sh gr
+/Times-Roman-iso ff 180.00 scf sf
+10575 675 m
+gs 1 -1 sc (Ralf S. Engelschall) col0 sh gr
+/Helvetica-Bold-iso ff 300.00 scf sf
+4275 675 m
+gs 1 -1 sc (Apache+mod_ssl+OpenSSL) col0 sh gr
+/Times-Roman-iso ff 150.00 scf sf
+10575 855 m
+gs 1 -1 sc (rse@engelschall.com) col0 sh gr
+/Times-Roman-iso ff 150.00 scf sf
+10575 1035 m
+gs 1 -1 sc (www.engelschall.com) col0 sh gr
+/Times-Roman-iso ff 180.00 scf sf
+900 675 m
+gs 1 -1 sc (Version 1.3) col0 sh gr
+/Times-Roman-iso ff 180.00 scf sf
+900 855 m
+gs 1 -1 sc (12-Apr-1999) col0 sh gr
+/Helvetica-Bold-iso ff 360.00 scf sf
+3915 1080 m
+gs 1 -1 sc (Data Structure Overview) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+9720 7110 m
+gs 1 -1 sc (BIO) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+10710 7785 m
+gs 1 -1 sc ([Crypto]) col0 sh gr
+/Helvetica-Bold-iso ff 300.00 scf sf
+8730 3465 m
+gs 1 -1 sc (mod_ssl) col0 sh gr
+/Helvetica-Bold-iso ff 300.00 scf sf
+8145 6750 m
+gs 1 -1 sc (Apache) col0 sh gr
+/Helvetica-Bold-iso ff 300.00 scf sf
+9000 8100 m
+gs 1 -1 sc (Chaining) col0 sh gr
+/Helvetica-Bold-iso ff 300.00 scf sf
+2745 8100 m
+gs 1 -1 sc (Lifetime) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+810 6255 m
+gs 1 -1 sc (ap_global_ctx) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+990 5805 m
+gs 1 -1 sc (SSLModConfig) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+4050 4455 m
+gs 1 -1 sc (SSL_CTX) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+4455 5355 m
+gs 1 -1 sc (server_rec) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+3870 4905 m
+gs 1 -1 sc (SSLSrvConfig) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+1845 4005 m
+gs 1 -1 sc (BUFF) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+2070 3555 m
+gs 1 -1 sc (conn_rec) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+2295 3105 m
+gs 1 -1 sc (BIO) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+2565 2655 m
+gs 1 -1 sc (SSL) col0 sh gr
+/Helvetica-Bold-iso ff 180.00 scf sf
+3915 2070 m
+gs 1 -1 sc (request_rec) col0 sh gr
+$F2psEnd
+rs
+showpage
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/config.m4 b/rubbos/app/httpd-2.0.64/modules/ssl/config.m4
new file mode 100644
index 00000000..8cb4b42e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/config.m4
@@ -0,0 +1,54 @@
+dnl Licensed to the Apache Software Foundation (ASF) under one or more
+dnl contributor license agreements. See the NOTICE file distributed with
+dnl this work for additional information regarding copyright ownership.
+dnl The ASF licenses this file to You under the Apache License, Version 2.0
+dnl (the "License"); you may not use this file except in compliance with
+dnl the License. You may obtain a copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS,
+dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+dnl See the License for the specific language governing permissions and
+dnl limitations under the License.
+
+dnl # start of module specific part
+APACHE_MODPATH_INIT(ssl)
+
+dnl # list of module object files
+ssl_objs="dnl
+mod_ssl.lo dnl
+ssl_engine_config.lo dnl
+ssl_engine_dh.lo dnl
+ssl_engine_init.lo dnl
+ssl_engine_io.lo dnl
+ssl_engine_kernel.lo dnl
+ssl_engine_log.lo dnl
+ssl_engine_mutex.lo dnl
+ssl_engine_pphrase.lo dnl
+ssl_engine_rand.lo dnl
+ssl_engine_vars.lo dnl
+ssl_expr.lo dnl
+ssl_expr_eval.lo dnl
+ssl_expr_parse.lo dnl
+ssl_expr_scan.lo dnl
+ssl_scache.lo dnl
+ssl_scache_dbm.lo dnl
+ssl_scache_shmcb.lo dnl
+ssl_scache_shmht.lo dnl
+ssl_util.lo dnl
+ssl_util_ssl.lo dnl
+ssl_util_table.lo dnl
+"
+dnl # hook module into the Autoconf mechanism (--enable-ssl option)
+APACHE_MODULE(ssl, [SSL/TLS support (mod_ssl)], $ssl_objs, , no, [
+ APACHE_CHECK_SSL_TOOLKIT
+ APR_SETVAR(MOD_SSL_LDADD, [\$(SSL_LIBS)])
+ AC_CHECK_FUNCS(SSL_set_state)
+ AC_CHECK_FUNCS(SSL_set_cert_store)
+])
+
+dnl # end of module specific part
+APACHE_MODPATH_FINISH
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/mod_ssl.c b/rubbos/app/httpd-2.0.64/modules/ssl/mod_ssl.c
new file mode 100644
index 00000000..dd22ec9a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/mod_ssl.c
@@ -0,0 +1,428 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * mod_ssl.c
+ * Apache API interface structures
+ */
+
+#include "mod_ssl.h"
+#include "util_md5.h"
+#include <assert.h>
+
+/*
+ * the table of configuration directives we provide
+ */
+
+#define SSL_CMD_ALL(name, args, desc) \
+ AP_INIT_##args("SSL"#name, ssl_cmd_SSL##name, \
+ NULL, RSRC_CONF|OR_AUTHCFG, desc),
+
+#define SSL_CMD_SRV(name, args, desc) \
+ AP_INIT_##args("SSL"#name, ssl_cmd_SSL##name, \
+ NULL, RSRC_CONF, desc),
+
+#define SSL_CMD_DIR(name, type, args, desc) \
+ AP_INIT_##args("SSL"#name, ssl_cmd_SSL##name, \
+ NULL, OR_##type, desc),
+
+#define AP_END_CMD { NULL }
+
+const char ssl_valid_ssl_mutex_string[] =
+ "Valid SSLMutex mechanisms are: `none', `default'"
+#if APR_HAS_FLOCK_SERIALIZE
+ ", `flock:/path/to/file'"
+#endif
+#if APR_HAS_FCNTL_SERIALIZE
+ ", `fcntl:/path/to/file'"
+#endif
+#if APR_HAS_SYSVSEM_SERIALIZE && !defined(PERCHILD_MPM)
+ ", `sysvsem'"
+#endif
+#if APR_HAS_POSIXSEM_SERIALIZE
+ ", `posixsem'"
+#endif
+#if APR_HAS_PROC_PTHREAD_SERIALIZE
+ ", `pthread'"
+#endif
+#if APR_HAS_FLOCK_SERIALIZE || APR_HAS_FCNTL_SERIALIZE
+ ", `file:/path/to/file'"
+#endif
+#if (APR_HAS_SYSVSEM_SERIALIZE && !defined(PERCHILD_MPM)) || APR_HAS_POSIXSEM_SERIALIZE
+ ", `sem'"
+#endif
+ " ";
+
+static const command_rec ssl_config_cmds[] = {
+ /*
+ * Global (main-server) context configuration directives
+ */
+ SSL_CMD_SRV(Mutex, TAKE1, ssl_valid_ssl_mutex_string)
+ SSL_CMD_SRV(PassPhraseDialog, TAKE1,
+ "SSL dialog mechanism for the pass phrase query "
+ "(`builtin', `|/path/to/pipe_program`, "
+ "or `exec:/path/to/cgi_program')")
+ SSL_CMD_SRV(SessionCache, TAKE1,
+ "SSL Session Cache storage "
+ "(`none', `dbm:/path/to/file')")
+#ifdef SSL_EXPERIMENTAL_ENGINE
+ SSL_CMD_SRV(CryptoDevice, TAKE1,
+ "SSL external Crypto Device usage "
+ "(`builtin', `...')")
+#endif
+ SSL_CMD_SRV(RandomSeed, TAKE23,
+ "SSL Pseudo Random Number Generator (PRNG) seeding source "
+ "(`startup|connect builtin|file:/path|exec:/path [bytes]')")
+
+ /*
+ * Per-server context configuration directives
+ */
+ SSL_CMD_SRV(Engine, FLAG,
+ "SSL switch for the protocol engine "
+ "(`on', `off')")
+ SSL_CMD_ALL(CipherSuite, TAKE1,
+ "Colon-delimited list of permitted SSL Ciphers "
+ "(`XXX:...:XXX' - see manual)")
+ SSL_CMD_SRV(CertificateFile, TAKE1,
+ "SSL Server Certificate file "
+ "(`/path/to/file' - PEM or DER encoded)")
+ SSL_CMD_SRV(CertificateKeyFile, TAKE1,
+ "SSL Server Private Key file "
+ "(`/path/to/file' - PEM or DER encoded)")
+ SSL_CMD_SRV(CertificateChainFile, TAKE1,
+ "SSL Server CA Certificate Chain file "
+ "(`/path/to/file' - PEM encoded)")
+ SSL_CMD_ALL(CACertificatePath, TAKE1,
+ "SSL CA Certificate path "
+ "(`/path/to/dir' - contains PEM encoded files)")
+ SSL_CMD_ALL(CACertificateFile, TAKE1,
+ "SSL CA Certificate file "
+ "(`/path/to/file' - PEM encoded)")
+ SSL_CMD_SRV(CARevocationPath, TAKE1,
+ "SSL CA Certificate Revocation List (CRL) path "
+ "(`/path/to/dir' - contains PEM encoded files)")
+ SSL_CMD_SRV(CARevocationFile, TAKE1,
+ "SSL CA Certificate Revocation List (CRL) file "
+ "(`/path/to/file' - PEM encoded)")
+ SSL_CMD_ALL(VerifyClient, TAKE1,
+ "SSL Client verify type "
+ "(`none', `optional', `require', `optional_no_ca')")
+ SSL_CMD_ALL(VerifyDepth, TAKE1,
+ "SSL Client verify depth "
+ "(`N' - number of intermediate certificates)")
+ SSL_CMD_SRV(SessionCacheTimeout, TAKE1,
+ "SSL Session Cache object lifetime "
+ "(`N' - number of seconds)")
+ SSL_CMD_SRV(Protocol, RAW_ARGS,
+ "Enable or disable various SSL protocols"
+ "(`[+-][SSLv2|SSLv3|TLSv1] ...' - see manual)")
+ SSL_CMD_ALL(UserName, TAKE1,
+ "Set user name to SSL variable value")
+ SSL_CMD_SRV(InsecureRenegotiation, FLAG,
+ "Enable support for insecure renegotiation")
+
+ /*
+ * Proxy configuration for remote SSL connections
+ */
+ SSL_CMD_SRV(ProxyEngine, FLAG,
+ "SSL switch for the proxy protocol engine "
+ "(`on', `off')")
+ SSL_CMD_SRV(ProxyProtocol, RAW_ARGS,
+ "SSL Proxy: enable or disable SSL protocol flavors "
+ "(`[+-][SSLv2|SSLv3|TLSv1] ...' - see manual)")
+ SSL_CMD_SRV(ProxyCipherSuite, TAKE1,
+ "SSL Proxy: colon-delimited list of permitted SSL ciphers "
+ "(`XXX:...:XXX' - see manual)")
+ SSL_CMD_SRV(ProxyVerify, TAKE1,
+ "SSL Proxy: whether to verify the remote certificate "
+ "(`on' or `off')")
+ SSL_CMD_SRV(ProxyVerifyDepth, TAKE1,
+ "SSL Proxy: maximum certificate verification depth "
+ "(`N' - number of intermediate certificates)")
+ SSL_CMD_SRV(ProxyCACertificateFile, TAKE1,
+ "SSL Proxy: file containing server certificates "
+ "(`/path/to/file' - PEM encoded certificates)")
+ SSL_CMD_SRV(ProxyCACertificatePath, TAKE1,
+ "SSL Proxy: directory containing server certificates "
+ "(`/path/to/dir' - contains PEM encoded certificates)")
+ SSL_CMD_SRV(ProxyCARevocationPath, TAKE1,
+ "SSL Proxy: CA Certificate Revocation List (CRL) path "
+ "(`/path/to/dir' - contains PEM encoded files)")
+ SSL_CMD_SRV(ProxyCARevocationFile, TAKE1,
+ "SSL Proxy: CA Certificate Revocation List (CRL) file "
+ "(`/path/to/file' - PEM encoded)")
+ SSL_CMD_SRV(ProxyMachineCertificateFile, TAKE1,
+ "SSL Proxy: file containing client certificates "
+ "(`/path/to/file' - PEM encoded certificates)")
+ SSL_CMD_SRV(ProxyMachineCertificatePath, TAKE1,
+ "SSL Proxy: directory containing client certificates "
+ "(`/path/to/dir' - contains PEM encoded certificates)")
+
+ /*
+ * Per-directory context configuration directives
+ */
+ SSL_CMD_DIR(Options, OPTIONS, RAW_ARGS,
+ "Set one or more options to configure the SSL engine"
+ "(`[+-]option[=value] ...' - see manual)")
+ SSL_CMD_DIR(RequireSSL, AUTHCFG, NO_ARGS,
+ "Require the SSL protocol for the per-directory context "
+ "(no arguments)")
+ SSL_CMD_DIR(Require, AUTHCFG, RAW_ARGS,
+ "Require a boolean expression to evaluate to true for granting access"
+ "(arbitrary complex boolean expression - see manual)")
+
+ /* Deprecated directives. */
+ AP_INIT_RAW_ARGS("SSLLog", ap_set_deprecated, NULL, OR_ALL,
+ "SSLLog directive is no longer supported - use ErrorLog."),
+ AP_INIT_RAW_ARGS("SSLLogLevel", ap_set_deprecated, NULL, OR_ALL,
+ "SSLLogLevel directive is no longer supported - use LogLevel."),
+
+ AP_END_CMD
+};
+
+/*
+ * the various processing hooks
+ */
+
+static int ssl_hook_pre_config(apr_pool_t *pconf,
+ apr_pool_t *plog,
+ apr_pool_t *ptemp)
+{
+ /* Preregister the malloc callbacks so cmds can make library calls */
+ CRYPTO_malloc_init();
+
+ /* Register us to handle mod_log_config %c/%x variables */
+ ssl_var_log_config_register(pconf);
+#if 0 /* XXX */
+ /* XXX: Register us to handle mod_status extensions that don't exist yet */
+ ssl_scache_status_register(pconf);
+#endif /* -0- */
+
+ return OK;
+}
+
+static SSLConnRec *ssl_init_connection_ctx(conn_rec *c)
+{
+ SSLConnRec *sslconn = myConnConfig(c);
+
+ if (sslconn) {
+ return sslconn;
+ }
+
+ sslconn = apr_pcalloc(c->pool, sizeof(*sslconn));
+
+ myConnConfigSet(c, sslconn);
+
+ return sslconn;
+}
+
+int ssl_proxy_enable(conn_rec *c)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(c->base_server);
+
+ SSLConnRec *sslconn = ssl_init_connection_ctx(c);
+
+ if (!sc->proxy_enabled) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, c->base_server,
+ "SSL Proxy requested for %s but not enabled "
+ "[Hint: SSLProxyEngine]", sc->vhost_id);
+
+ return 0;
+ }
+
+ sslconn->is_proxy = 1;
+ sslconn->disabled = 0;
+
+ return 1;
+}
+
+int ssl_engine_disable(conn_rec *c)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(c->base_server);
+
+ SSLConnRec *sslconn;
+
+ if (!sc->enabled) {
+ return 0;
+ }
+
+ sslconn = ssl_init_connection_ctx(c);
+
+ sslconn->disabled = 1;
+
+ return 1;
+}
+
+static int ssl_hook_pre_connection(conn_rec *c, void *csd)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(c->base_server);
+ SSL *ssl;
+ SSLConnRec *sslconn = myConnConfig(c);
+ char *vhost_md5;
+ modssl_ctx_t *mctx;
+
+ /*
+ * Immediately stop processing if SSL is disabled for this connection
+ */
+ if (!(sc && (sc->enabled ||
+ (sslconn && sslconn->is_proxy))))
+ {
+ return DECLINED;
+ }
+
+ /*
+ * Create SSL context
+ */
+ if (!sslconn) {
+ sslconn = ssl_init_connection_ctx(c);
+ }
+
+ if (sslconn->disabled) {
+ return DECLINED;
+ }
+
+ /*
+ * Remember the connection information for
+ * later access inside callback functions
+ */
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, c->base_server,
+ "Connection to child %ld established "
+ "(server %s, client %s)", c->id, sc->vhost_id,
+ c->remote_ip ? c->remote_ip : "unknown");
+
+ /*
+ * Seed the Pseudo Random Number Generator (PRNG)
+ */
+ ssl_rand_seed(c->base_server, c->pool, SSL_RSCTX_CONNECT, "");
+
+ mctx = sslconn->is_proxy ? sc->proxy : sc->server;
+
+ /*
+ * Create a new SSL connection with the configured server SSL context and
+ * attach this to the socket. Additionally we register this attachment
+ * so we can detach later.
+ */
+ if (!(ssl = SSL_new(mctx->ssl_ctx))) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, c->base_server,
+ "Unable to create a new SSL connection from the SSL "
+ "context");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, c->base_server);
+
+ c->aborted = 1;
+
+ return DECLINED; /* XXX */
+ }
+
+ vhost_md5 = ap_md5_binary(c->pool, (unsigned char *)sc->vhost_id,
+ sc->vhost_id_len);
+
+ if (!SSL_set_session_id_context(ssl, (unsigned char *)vhost_md5,
+ MD5_DIGESTSIZE*2))
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, c->base_server,
+ "Unable to set session id context to `%s'", vhost_md5);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, c->base_server);
+
+ c->aborted = 1;
+
+ return DECLINED; /* XXX */
+ }
+
+ SSL_set_app_data(ssl, c);
+ SSL_set_app_data2(ssl, NULL); /* will be request_rec */
+
+ sslconn->ssl = ssl;
+
+ /*
+ * Configure callbacks for SSL connection
+ */
+ SSL_set_tmp_rsa_callback(ssl, ssl_callback_TmpRSA);
+ SSL_set_tmp_dh_callback(ssl, ssl_callback_TmpDH);
+
+ SSL_set_verify_result(ssl, X509_V_OK);
+
+ ssl_io_filter_init(c, ssl);
+
+ return APR_SUCCESS;
+}
+
+static const char *ssl_hook_http_method(const request_rec *r)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(r->server);
+
+ if (sc->enabled == FALSE) {
+ return NULL;
+ }
+
+ return "https";
+}
+
+static apr_port_t ssl_hook_default_port(const request_rec *r)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(r->server);
+
+ if (sc->enabled == FALSE) {
+ return 0;
+ }
+
+ return 443;
+}
+
+/*
+ * the module registration phase
+ */
+
+static void ssl_register_hooks(apr_pool_t *p)
+{
+ /* ssl_hook_ReadReq needs to use the BrowserMatch settings so must
+ * run after mod_setenvif's post_read_request hook. */
+ static const char *pre_prr[] = { "mod_setenvif.c", NULL };
+
+ ssl_io_filter_register(p);
+
+ ap_hook_pre_connection(ssl_hook_pre_connection,NULL,NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_config (ssl_init_Module, NULL,NULL, APR_HOOK_MIDDLE);
+ ap_hook_http_method (ssl_hook_http_method, NULL,NULL, APR_HOOK_MIDDLE);
+ ap_hook_default_port (ssl_hook_default_port, NULL,NULL, APR_HOOK_MIDDLE);
+ ap_hook_pre_config (ssl_hook_pre_config, NULL,NULL, APR_HOOK_MIDDLE);
+ ap_hook_child_init (ssl_init_Child, NULL,NULL, APR_HOOK_MIDDLE);
+ ap_hook_check_user_id (ssl_hook_UserCheck, NULL,NULL, APR_HOOK_FIRST);
+ ap_hook_fixups (ssl_hook_Fixup, NULL,NULL, APR_HOOK_MIDDLE);
+ ap_hook_access_checker(ssl_hook_Access, NULL,NULL, APR_HOOK_MIDDLE);
+ ap_hook_auth_checker (ssl_hook_Auth, NULL,NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_read_request(ssl_hook_ReadReq, pre_prr,NULL, APR_HOOK_MIDDLE);
+
+ ssl_var_register();
+
+ APR_REGISTER_OPTIONAL_FN(ssl_proxy_enable);
+ APR_REGISTER_OPTIONAL_FN(ssl_engine_disable);
+}
+
+module AP_MODULE_DECLARE_DATA ssl_module = {
+ STANDARD20_MODULE_STUFF,
+ ssl_config_perdir_create, /* create per-dir config structures */
+ ssl_config_perdir_merge, /* merge per-dir config structures */
+ ssl_config_server_create, /* create per-server config structures */
+ ssl_config_server_merge, /* merge per-server config structures */
+ ssl_config_cmds, /* table of configuration directives */
+ ssl_register_hooks /* register hooks */
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/mod_ssl.dsp b/rubbos/app/httpd-2.0.64/modules/ssl/mod_ssl.dsp
new file mode 100644
index 00000000..cf3df14e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/mod_ssl.dsp
@@ -0,0 +1,328 @@
+# Microsoft Developer Studio Project File - Name="mod_ssl" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_ssl - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_ssl.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_ssl.mak" CFG="mod_ssl - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_ssl - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_ssl - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_ssl - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/openssl/inc32/openssl" /I "../../srclib/openssl/inc32" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "WIN32_LEAN_AND_MEAN" /D "NO_IDEA" /D "NO_RC5" /D "NO_MDC2" /D "OPENSSL_NO_IDEA" /D "OPENSSL_NO_RC5" /D "OPENSSL_NO_MDC2" /D "HAVE_SSL_SET_STATE=1" /Fd"Release\mod_ssl_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_ssl.so" /base:@..\..\os\win32\BaseAddr.ref,mod_ssl.so
+# ADD LINK32 kernel32.lib user32.lib wsock32.lib ws2_32.lib advapi32.lib gdi32.lib ssleay32.lib libeay32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_ssl.so" /libpath:"../../srclib/openssl/out32dll" /libpath:"../../srclib/openssl/out32" /base:@..\..\os\win32\BaseAddr.ref,mod_ssl.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_ssl - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/openssl/inc32/openssl" /I "../../srclib/openssl/inc32" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "WIN32_LEAN_AND_MEAN" /D "NO_IDEA" /D "NO_RC5" /D "NO_MDC2" /D "OPENSSL_NO_IDEA" /D "OPENSSL_NO_RC5" /D "OPENSSL_NO_MDC2" /D "HAVE_SSL_SET_STATE=1" /Fd"Debug\mod_ssl_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_ssl.so" /base:@..\..\os\win32\BaseAddr.ref,mod_ssl.so
+# ADD LINK32 kernel32.lib user32.lib wsock32.lib ws2_32.lib advapi32.lib gdi32.lib ssleay32.lib libeay32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_ssl.so" /libpath:"../../srclib/openssl/out32dll.dbg" /libpath:"../../srclib/openssl/out32.dbg" /base:@..\..\os\win32\BaseAddr.ref,mod_ssl.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_ssl - Win32 Release"
+# Name "mod_ssl - Win32 Debug"
+# Begin Group "Source Files"
+
+# PROP Default_Filter "*.c"
+# Begin Source File
+
+SOURCE=.\mod_ssl.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_engine_config.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_engine_dh.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_engine_init.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_engine_io.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_engine_kernel.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_engine_log.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_engine_mutex.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_engine_pphrase.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_engine_rand.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_engine_vars.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_expr.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_expr_eval.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_expr_parse.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_expr_scan.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_scache.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_scache_dbm.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_scache_shmcb.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_scache_shmht.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_util_ssl.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_util_table.c
+# End Source File
+# End Group
+# Begin Group "Header Files"
+
+# PROP Default_Filter "*.h"
+# Begin Source File
+
+SOURCE=.\mod_ssl.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_expr.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_expr_parse.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_toolkit_compat.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_util_ssl.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_util_table.h
+# End Source File
+# End Group
+# Begin Group "Generated Files"
+
+# PROP Default_Filter ""
+# Begin Source File
+
+SOURCE=.\ssl_expr_parse.y
+
+!IF "$(CFG)" == "mod_ssl - Win32 Release"
+
+# Begin Custom Build - Generating ssl_expr_parse.c/.h from ssl_expr_parse.y
+InputPath=.\ssl_expr_parse.y
+
+BuildCmds= \
+ bison -y -d ssl_expr_parse.y \
+ sed -e "s;yy;ssl_expr_yy;g" -e "/#if defined(c_plusplus) || defined(__cplusplus)/,/#endif/d" <y.tab.c >ssl_expr_parse.c \
+ del y.tab.c \
+ sed -e "s;yy;ssl_expr_yy;g" <y.tab.h >ssl_expr_parse.h \
+ del y.tab.h \
+
+
+"ssl_expr_parse.c" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ $(BuildCmds)
+
+"ssl_expr_parse.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ $(BuildCmds)
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_ssl - Win32 Debug"
+
+# Begin Custom Build - Generating ssl_expr_parse.c/.h from ssl_expr_parse.y
+InputPath=.\ssl_expr_parse.y
+
+BuildCmds= \
+ bison -y -d ssl_expr_parse.y \
+ sed -e "s;yy;ssl_expr_yy;g" -e "/#if defined(c_plusplus) || defined(__cplusplus)/,/#endif/d" <y.tab.c >ssl_expr_parse.c \
+ del y.tab.c \
+ sed -e "s;yy;ssl_expr_yy;g" <y.tab.h >ssl_expr_parse.h \
+ del y.tab.h \
+
+
+"ssl_expr_parse.c" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ $(BuildCmds)
+
+"ssl_expr_parse.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ $(BuildCmds)
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# Begin Source File
+
+SOURCE=.\ssl_expr_scan.l
+
+!IF "$(CFG)" == "mod_ssl - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Generating ssl_expr_scan.c from ssl_expr_scan.l
+InputPath=.\ssl_expr_scan.l
+
+"ssl_expr_scan.c" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ flex -Pssl_expr_yy -s -B ssl_expr_scan.l
+ sed -e "/$$Header:/d" <lex.ssl_expr_yy.c >ssl_expr_scan.c
+ del lex.ssl_expr_yy.c
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_ssl - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Generating ssl_expr_scan.c from ssl_expr_scan.l
+InputPath=.\ssl_expr_scan.l
+
+"ssl_expr_scan.c" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ flex -Pssl_expr_yy -s -B ssl_expr_scan.l
+ sed -e "/$$Header:/d" <lex.ssl_expr_yy.c >ssl_expr_scan.c
+ del lex.ssl_expr_yy.c
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Group
+# Begin Source File
+
+SOURCE=.\mod_ssl.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_ssl - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_ssl.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_ssl.so "ssl_module for Apache" ../../include/ap_release.h > .\mod_ssl.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_ssl - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_ssl.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_ssl.so "ssl_module for Apache" ../../include/ap_release.h > .\mod_ssl.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/mod_ssl.h b/rubbos/app/httpd-2.0.64/modules/ssl/mod_ssl.h
new file mode 100644
index 00000000..6f69c26f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/mod_ssl.h
@@ -0,0 +1,724 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * mod_ssl.h
+ * Global header
+ */
+ /* ``The Apache Group: a collection
+ of talented individuals who are
+ trying to perfect the art of
+ never finishing something.''
+ -- Rob Hartill */
+#ifndef __MOD_SSL_H__
+#define __MOD_SSL_H__
+
+/*
+ * Optionally enable the experimental stuff, but allow the user to
+ * override the decision which experimental parts are included by using
+ * CFLAGS="-DSSL_EXPERIMENTAL_xxxx_IGNORE".
+ */
+#ifdef SSL_EXPERIMENTAL
+#ifdef SSL_ENGINE
+#ifndef SSL_EXPERIMENTAL_ENGINE_IGNORE
+#define SSL_EXPERIMENTAL_ENGINE
+#endif
+#endif
+#endif /* SSL_EXPERIMENTAL */
+
+/*
+ * Power up our brain...
+ */
+
+/* Apache headers */
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "http_connection.h"
+#include "http_request.h"
+#include "http_protocol.h"
+#include "util_script.h"
+#include "util_filter.h"
+#include "mpm.h"
+#include "apr.h"
+#include "apr_strings.h"
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+#include "apr_tables.h"
+#include "apr_lib.h"
+#include "apr_fnmatch.h"
+#include "apr_strings.h"
+#include "apr_dbm.h"
+#include "apr_rmm.h"
+#include "apr_shm.h"
+#include "apr_global_mutex.h"
+#include "apr_optional.h"
+
+#define MOD_SSL_VERSION AP_SERVER_BASEREVISION
+
+#ifdef HAVE_SSLC
+
+#include <bio.h>
+#include <ssl.h>
+#include <err.h>
+#include <x509.h>
+#include <pem.h>
+#include <evp.h>
+#include <objects.h>
+#include <sslc.h>
+
+#else /* !HAVE_SSLC (implicit HAVE_OPENSSL) */
+
+#include <ssl.h>
+#include <err.h>
+#include <x509.h>
+#include <pem.h>
+#include <crypto.h>
+#include <evp.h>
+#include <rand.h>
+#ifdef SSL_EXPERIMENTAL_ENGINE
+#include <engine.h>
+#endif
+#ifdef HAVE_SSL_X509V3_H
+#include <x509v3.h>
+#endif
+
+#endif /* !HAVE_SSLC (implicit HAVE_OPENSSL) */
+
+
+/* mod_ssl headers */
+#include "ssl_toolkit_compat.h"
+#include "ssl_expr.h"
+#include "ssl_util_ssl.h"
+#include "ssl_util_table.h"
+
+/* The #ifdef macros are only defined AFTER including the above
+ * therefore we cannot include these system files at the top :-(
+ */
+#if APR_HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h> /* needed for STDIN_FILENO et.al., at least on FreeBSD */
+#endif
+
+/*
+ * Provide reasonable default for some defines
+ */
+#ifndef FALSE
+#define FALSE (0)
+#endif
+#ifndef TRUE
+#define TRUE (!FALSE)
+#endif
+#ifndef PFALSE
+#define PFALSE ((void *)FALSE)
+#endif
+#ifndef PTRUE
+#define PTRUE ((void *)TRUE)
+#endif
+#ifndef UNSET
+#define UNSET (-1)
+#endif
+#ifndef NUL
+#define NUL '\0'
+#endif
+#ifndef RAND_MAX
+#include <limits.h>
+#define RAND_MAX INT_MAX
+#endif
+
+/*
+ * Provide reasonable defines for some types
+ */
+#ifndef BOOL
+#define BOOL unsigned int
+#endif
+#ifndef UCHAR
+#define UCHAR unsigned char
+#endif
+
+/*
+ * Provide useful shorthands
+ */
+#define strEQ(s1,s2) (strcmp(s1,s2) == 0)
+#define strNE(s1,s2) (strcmp(s1,s2) != 0)
+#define strEQn(s1,s2,n) (strncmp(s1,s2,n) == 0)
+#define strNEn(s1,s2,n) (strncmp(s1,s2,n) != 0)
+
+#define strcEQ(s1,s2) (strcasecmp(s1,s2) == 0)
+#define strcNE(s1,s2) (strcasecmp(s1,s2) != 0)
+#define strcEQn(s1,s2,n) (strncasecmp(s1,s2,n) == 0)
+#define strcNEn(s1,s2,n) (strncasecmp(s1,s2,n) != 0)
+
+#define strIsEmpty(s) (s == NULL || s[0] == NUL)
+
+#define myConnConfig(c) \
+(SSLConnRec *)ap_get_module_config(c->conn_config, &ssl_module)
+#define myCtxConfig(sslconn, sc) (sslconn->is_proxy ? sc->proxy : sc->server)
+#define myConnConfigSet(c, val) \
+ap_set_module_config(c->conn_config, &ssl_module, val)
+#define mySrvConfig(srv) (SSLSrvConfigRec *)ap_get_module_config(srv->module_config, &ssl_module)
+#define myDirConfig(req) (SSLDirConfigRec *)ap_get_module_config(req->per_dir_config, &ssl_module)
+#define myModConfig(srv) (mySrvConfig((srv)))->mc
+
+#define myCtxVarSet(mc,num,val) mc->rCtx.pV##num = val
+#define myCtxVarGet(mc,num,type) (type)(mc->rCtx.pV##num)
+
+/*
+ * Defaults for the configuration
+ */
+#ifndef SSL_SESSION_CACHE_TIMEOUT
+#define SSL_SESSION_CACHE_TIMEOUT 300
+#endif
+
+/*
+ * Support for MM library
+ */
+#define SSL_MM_FILE_MODE ( APR_UREAD | APR_UWRITE | APR_GREAD | APR_WREAD )
+
+/*
+ * Support for DBM library
+ */
+#define SSL_DBM_FILE_MODE ( APR_UREAD | APR_UWRITE | APR_GREAD | APR_WREAD )
+
+#if !defined(SSL_DBM_FILE_SUFFIX_DIR) && !defined(SSL_DBM_FILE_SUFFIX_PAG)
+#if defined(DBM_SUFFIX)
+#define SSL_DBM_FILE_SUFFIX_DIR DBM_SUFFIX
+#define SSL_DBM_FILE_SUFFIX_PAG DBM_SUFFIX
+#elif defined(__FreeBSD__) || (defined(DB_LOCK) && defined(DB_SHMEM))
+#define SSL_DBM_FILE_SUFFIX_DIR ".db"
+#define SSL_DBM_FILE_SUFFIX_PAG ".db"
+#else
+#define SSL_DBM_FILE_SUFFIX_DIR ".dir"
+#define SSL_DBM_FILE_SUFFIX_PAG ".pag"
+#endif
+#endif
+
+/*
+ * Define the certificate algorithm types
+ */
+
+typedef int ssl_algo_t;
+
+#define SSL_ALGO_UNKNOWN (0)
+#define SSL_ALGO_RSA (1<<0)
+#define SSL_ALGO_DSA (1<<1)
+#define SSL_ALGO_ALL (SSL_ALGO_RSA|SSL_ALGO_DSA)
+
+#define SSL_AIDX_RSA (0)
+#define SSL_AIDX_DSA (1)
+#define SSL_AIDX_MAX (2)
+
+
+/*
+ * Define IDs for the temporary RSA keys and DH params
+ */
+
+#define SSL_TMP_KEY_RSA_512 (0)
+#define SSL_TMP_KEY_RSA_1024 (1)
+#define SSL_TMP_KEY_DH_512 (2)
+#define SSL_TMP_KEY_DH_1024 (3)
+#define SSL_TMP_KEY_MAX (4)
+
+/*
+ * Define the SSL options
+ */
+#define SSL_OPT_NONE (0)
+#define SSL_OPT_RELSET (1<<0)
+#define SSL_OPT_STDENVVARS (1<<1)
+#define SSL_OPT_COMPATENVVARS (1<<2)
+#define SSL_OPT_EXPORTCERTDATA (1<<3)
+#define SSL_OPT_FAKEBASICAUTH (1<<4)
+#define SSL_OPT_STRICTREQUIRE (1<<5)
+#define SSL_OPT_OPTRENEGOTIATE (1<<6)
+#define SSL_OPT_ALL (SSL_OPT_STDENVVARS|SSL_OPT_COMPATENVVAR|SSL_OPT_EXPORTCERTDATA|SSL_OPT_FAKEBASICAUTH|SSL_OPT_STRICTREQUIRE|SSL_OPT_OPTRENEGOTIATE)
+typedef int ssl_opt_t;
+
+/*
+ * Define the SSL Protocol options
+ */
+#define SSL_PROTOCOL_NONE (0)
+#define SSL_PROTOCOL_SSLV2 (1<<0)
+#define SSL_PROTOCOL_SSLV3 (1<<1)
+#define SSL_PROTOCOL_TLSV1 (1<<2)
+#define SSL_PROTOCOL_ALL (SSL_PROTOCOL_SSLV2|SSL_PROTOCOL_SSLV3|SSL_PROTOCOL_TLSV1)
+typedef int ssl_proto_t;
+
+/*
+ * Define the SSL verify levels
+ */
+typedef enum {
+ SSL_CVERIFY_UNSET = UNSET,
+ SSL_CVERIFY_NONE = 0,
+ SSL_CVERIFY_OPTIONAL = 1,
+ SSL_CVERIFY_REQUIRE = 2,
+ SSL_CVERIFY_OPTIONAL_NO_CA = 3
+} ssl_verify_t;
+
+#define SSL_VERIFY_PEER_STRICT \
+ (SSL_VERIFY_PEER|SSL_VERIFY_FAIL_IF_NO_PEER_CERT)
+
+#ifndef X509_V_ERR_CERT_UNTRUSTED
+#define X509_V_ERR_CERT_UNTRUSTED 27
+#endif
+
+#define ssl_verify_error_is_optional(errnum) \
+ ((errnum == X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT) \
+ || (errnum == X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN) \
+ || (errnum == X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY) \
+ || (errnum == X509_V_ERR_CERT_UNTRUSTED) \
+ || (errnum == X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE))
+
+/*
+ * Define the SSL pass phrase dialog types
+ */
+typedef enum {
+ SSL_PPTYPE_UNSET = UNSET,
+ SSL_PPTYPE_BUILTIN = 0,
+ SSL_PPTYPE_FILTER = 1,
+ SSL_PPTYPE_PIPE = 2
+} ssl_pphrase_t;
+
+/*
+ * Define the Path Checking modes
+ */
+#define SSL_PCM_EXISTS 1
+#define SSL_PCM_ISREG 2
+#define SSL_PCM_ISDIR 4
+#define SSL_PCM_ISNONZERO 8
+typedef unsigned int ssl_pathcheck_t;
+
+/*
+ * Define the SSL session cache modes and structures
+ */
+typedef enum {
+ SSL_SCMODE_UNSET = UNSET,
+ SSL_SCMODE_NONE = 0,
+ SSL_SCMODE_DBM = 1,
+ SSL_SCMODE_SHMHT = 2,
+ SSL_SCMODE_SHMCB = 3
+} ssl_scmode_t;
+
+/*
+ * Define the SSL mutex modes
+ */
+typedef enum {
+ SSL_MUTEXMODE_UNSET = UNSET,
+ SSL_MUTEXMODE_NONE = 0,
+ SSL_MUTEXMODE_USED = 1
+} ssl_mutexmode_t;
+
+/*
+ * Define the SSL requirement structure
+ */
+typedef struct {
+ char *cpExpr;
+ ssl_expr *mpExpr;
+} ssl_require_t;
+
+/*
+ * Define the SSL random number generator seeding source
+ */
+typedef enum {
+ SSL_RSCTX_STARTUP = 1,
+ SSL_RSCTX_CONNECT = 2
+} ssl_rsctx_t;
+typedef enum {
+ SSL_RSSRC_BUILTIN = 1,
+ SSL_RSSRC_FILE = 2,
+ SSL_RSSRC_EXEC = 3,
+ SSL_RSSRC_EGD = 4
+} ssl_rssrc_t;
+typedef struct {
+ ssl_rsctx_t nCtx;
+ ssl_rssrc_t nSrc;
+ char *cpPath;
+ int nBytes;
+} ssl_randseed_t;
+
+/*
+ * Define the structure of an ASN.1 anything
+ */
+typedef struct {
+ long int nData;
+ unsigned char *cpData;
+ apr_time_t source_mtime;
+} ssl_asn1_t;
+
+/*
+ * Define the mod_ssl per-module configuration structure
+ * (i.e. the global configuration for each httpd process)
+ */
+
+typedef enum {
+ SSL_SHUTDOWN_TYPE_UNSET,
+ SSL_SHUTDOWN_TYPE_STANDARD,
+ SSL_SHUTDOWN_TYPE_UNCLEAN,
+ SSL_SHUTDOWN_TYPE_ACCURATE
+} ssl_shutdown_type_e;
+
+typedef struct {
+ SSL *ssl;
+ const char *client_dn;
+ X509 *client_cert;
+ ssl_shutdown_type_e shutdown_type;
+ const char *verify_info;
+ const char *verify_error;
+ int verify_depth;
+ int is_proxy;
+ int disabled;
+ int non_ssl_request;
+
+ /* Track the handshake/renegotiation state for the connection so
+ * that all client-initiated renegotiations can be rejected, as a
+ * partial fix for CVE-2009-3555. */
+ enum {
+ RENEG_INIT = 0, /* Before initial handshake */
+ RENEG_REJECT, /* After initial handshake; any client-initiated
+ * renegotiation should be rejected */
+ RENEG_ALLOW, /* A server-initated renegotiation is taking
+ * place (as dictated by configuration) */
+ RENEG_ABORT /* Renegotiation initiated by client, abort the
+ * connection */
+ } reneg_state;
+} SSLConnRec;
+
+typedef struct {
+ pid_t pid;
+ apr_pool_t *pPool;
+ BOOL bFixed;
+ int nSessionCacheMode;
+ char *szSessionCacheDataFile;
+ int nSessionCacheDataSize;
+ apr_shm_t *pSessionCacheDataMM;
+ apr_rmm_t *pSessionCacheDataRMM;
+ apr_table_t *tSessionCacheDataTable;
+ ssl_mutexmode_t nMutexMode;
+ apr_lockmech_e nMutexMech;
+ const char *szMutexFile;
+ apr_global_mutex_t *pMutex;
+ apr_array_header_t *aRandSeed;
+ apr_hash_t *tVHostKeys;
+ void *pTmpKeys[SSL_TMP_KEY_MAX];
+ apr_hash_t *tPublicCert;
+ apr_hash_t *tPrivateKey;
+#ifdef SSL_EXPERIMENTAL_ENGINE
+ char *szCryptoDevice;
+#endif
+ struct {
+ void *pV1, *pV2, *pV3, *pV4, *pV5, *pV6, *pV7, *pV8, *pV9, *pV10;
+ } rCtx;
+} SSLModConfigRec;
+
+/* public cert/private key */
+typedef struct {
+ /*
+ * server only has 1-2 certs/keys
+ * 1 RSA and/or 1 DSA
+ */
+ const char *cert_files[SSL_AIDX_MAX];
+ const char *key_files[SSL_AIDX_MAX];
+ X509 *certs[SSL_AIDX_MAX];
+ EVP_PKEY *keys[SSL_AIDX_MAX];
+} modssl_pk_server_t;
+
+typedef struct {
+ /* proxy can have any number of cert/key pairs */
+ const char *cert_file;
+ const char *cert_path;
+ STACK_OF(X509_INFO) *certs;
+} modssl_pk_proxy_t;
+
+/* stuff related to authentication that can also be per-dir */
+typedef struct {
+ /* known/trusted CAs */
+ const char *ca_cert_path;
+ const char *ca_cert_file;
+
+ const char *cipher_suite;
+
+ /* for client or downstream server authentication */
+ int verify_depth;
+ ssl_verify_t verify_mode;
+} modssl_auth_ctx_t;
+
+typedef struct SSLSrvConfigRec SSLSrvConfigRec;
+
+typedef struct {
+ SSLSrvConfigRec *sc; /* pointer back to server config */
+ SSL_CTX *ssl_ctx;
+
+ /* we are one or the other */
+ modssl_pk_server_t *pks;
+ modssl_pk_proxy_t *pkp;
+
+ ssl_proto_t protocol;
+
+ /* config for handling encrypted keys */
+ ssl_pphrase_t pphrase_dialog_type;
+ const char *pphrase_dialog_path;
+
+ const char *cert_chain;
+
+ /* certificate revocation list */
+ const char *crl_path;
+ const char *crl_file;
+ X509_STORE *crl;
+
+ modssl_auth_ctx_t auth;
+} modssl_ctx_t;
+
+struct SSLSrvConfigRec {
+ SSLModConfigRec *mc;
+ BOOL enabled;
+ BOOL proxy_enabled;
+ const char *vhost_id;
+ int vhost_id_len;
+ int session_cache_timeout;
+ BOOL insecure_reneg;
+ modssl_ctx_t *server;
+ modssl_ctx_t *proxy;
+};
+
+/*
+ * Define the mod_ssl per-directory configuration structure
+ * (i.e. the local configuration for all <Directory>
+ * and .htaccess contexts)
+ */
+typedef struct {
+ BOOL bSSLRequired;
+ apr_array_header_t *aRequirement;
+ ssl_opt_t nOptions;
+ ssl_opt_t nOptionsAdd;
+ ssl_opt_t nOptionsDel;
+ const char *szCipherSuite;
+ ssl_verify_t nVerifyClient;
+ int nVerifyDepth;
+ const char *szCACertificatePath;
+ const char *szCACertificateFile;
+ const char *szUserName;
+} SSLDirConfigRec;
+
+/*
+ * function prototypes
+ */
+
+/* API glue structures */
+extern module AP_MODULE_DECLARE_DATA ssl_module;
+
+/* "global" stuff */
+extern const char ssl_valid_ssl_mutex_string[];
+
+/* configuration handling */
+SSLModConfigRec *ssl_config_global_create(server_rec *);
+void ssl_config_global_fix(SSLModConfigRec *);
+BOOL ssl_config_global_isfixed(SSLModConfigRec *);
+void *ssl_config_server_create(apr_pool_t *, server_rec *);
+void *ssl_config_server_merge(apr_pool_t *, void *, void *);
+void *ssl_config_perdir_create(apr_pool_t *, char *);
+void *ssl_config_perdir_merge(apr_pool_t *, void *, void *);
+const char *ssl_cmd_SSLMutex(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLPassPhraseDialog(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLCryptoDevice(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLRandomSeed(cmd_parms *, void *, const char *, const char *, const char *);
+const char *ssl_cmd_SSLEngine(cmd_parms *, void *, int);
+const char *ssl_cmd_SSLCipherSuite(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLCertificateFile(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLCertificateKeyFile(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLCertificateChainFile(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLCACertificatePath(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLCACertificateFile(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLCARevocationPath(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLCARevocationFile(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLVerifyClient(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLVerifyDepth(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLSessionCache(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLSessionCacheTimeout(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLProtocol(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLOptions(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLRequireSSL(cmd_parms *, void *);
+const char *ssl_cmd_SSLRequire(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLUserName(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLInsecureRenegotiation(cmd_parms *cmd, void *dcfg, int flag);
+
+const char *ssl_cmd_SSLProxyEngine(cmd_parms *cmd, void *dcfg, int flag);
+const char *ssl_cmd_SSLProxyProtocol(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLProxyCipherSuite(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLProxyVerify(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLProxyVerifyDepth(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLProxyCACertificatePath(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLProxyCACertificateFile(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLProxyCARevocationPath(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLProxyCARevocationFile(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLProxyMachineCertificatePath(cmd_parms *, void *, const char *);
+const char *ssl_cmd_SSLProxyMachineCertificateFile(cmd_parms *, void *, const char *);
+
+/* module initialization */
+int ssl_init_Module(apr_pool_t *, apr_pool_t *, apr_pool_t *, server_rec *);
+void ssl_init_Engine(server_rec *, apr_pool_t *);
+void ssl_init_ConfigureServer(server_rec *, apr_pool_t *, apr_pool_t *, SSLSrvConfigRec *);
+void ssl_init_CheckServers(server_rec *, apr_pool_t *);
+STACK_OF(X509_NAME)
+ *ssl_init_FindCAList(server_rec *, apr_pool_t *, const char *, const char *);
+void ssl_init_Child(apr_pool_t *, server_rec *);
+apr_status_t ssl_init_ModuleKill(void *data);
+
+/* Apache API hooks */
+int ssl_hook_Auth(request_rec *);
+int ssl_hook_UserCheck(request_rec *);
+int ssl_hook_Access(request_rec *);
+int ssl_hook_Fixup(request_rec *);
+int ssl_hook_ReadReq(request_rec *);
+
+/* OpenSSL callbacks */
+RSA *ssl_callback_TmpRSA(SSL *, int, int);
+DH *ssl_callback_TmpDH(SSL *, int, int);
+int ssl_callback_SSLVerify(int, X509_STORE_CTX *);
+int ssl_callback_SSLVerify_CRL(int, X509_STORE_CTX *, conn_rec *);
+int ssl_callback_proxy_cert(SSL *ssl, MODSSL_CLIENT_CERT_CB_ARG_TYPE **x509, EVP_PKEY **pkey);
+int ssl_callback_NewSessionCacheEntry(SSL *, SSL_SESSION *);
+SSL_SESSION *ssl_callback_GetSessionCacheEntry(SSL *, unsigned char *, int, int *);
+void ssl_callback_DelSessionCacheEntry(SSL_CTX *, SSL_SESSION *);
+void ssl_callback_Info(MODSSL_INFO_CB_ARG_TYPE, int, int);
+
+/* Session Cache Support */
+void ssl_scache_init(server_rec *, apr_pool_t *);
+#if 0 /* XXX */
+void ssl_scache_status_register(apr_pool_t *p);
+#endif
+void ssl_scache_kill(server_rec *);
+BOOL ssl_scache_store(server_rec *, UCHAR *, int, time_t, SSL_SESSION *);
+SSL_SESSION *ssl_scache_retrieve(server_rec *, UCHAR *, int);
+void ssl_scache_remove(server_rec *, UCHAR *, int);
+void ssl_scache_expire(server_rec *);
+void ssl_scache_status(server_rec *, apr_pool_t *, void (*)(char *, void *), void *);
+char *ssl_scache_id2sz(UCHAR *, int);
+void ssl_scache_dbm_init(server_rec *, apr_pool_t *);
+void ssl_scache_dbm_kill(server_rec *);
+BOOL ssl_scache_dbm_store(server_rec *, UCHAR *, int, time_t, SSL_SESSION *);
+SSL_SESSION *ssl_scache_dbm_retrieve(server_rec *, UCHAR *, int);
+void ssl_scache_dbm_remove(server_rec *, UCHAR *, int);
+void ssl_scache_dbm_expire(server_rec *);
+void ssl_scache_dbm_status(server_rec *, apr_pool_t *, void (*)(char *, void *), void *);
+
+void ssl_scache_shmht_init(server_rec *, apr_pool_t *);
+void ssl_scache_shmht_kill(server_rec *);
+BOOL ssl_scache_shmht_store(server_rec *, UCHAR *, int, time_t, SSL_SESSION *);
+SSL_SESSION *ssl_scache_shmht_retrieve(server_rec *, UCHAR *, int);
+void ssl_scache_shmht_remove(server_rec *, UCHAR *, int);
+void ssl_scache_shmht_expire(server_rec *);
+void ssl_scache_shmht_status(server_rec *, apr_pool_t *, void (*)(char *, void *), void *);
+
+void ssl_scache_shmcb_init(server_rec *, apr_pool_t *);
+void ssl_scache_shmcb_kill(server_rec *);
+BOOL ssl_scache_shmcb_store(server_rec *, UCHAR *, int, time_t, SSL_SESSION *);
+SSL_SESSION *ssl_scache_shmcb_retrieve(server_rec *, UCHAR *, int);
+void ssl_scache_shmcb_remove(server_rec *, UCHAR *, int);
+void ssl_scache_shmcb_expire(server_rec *);
+void ssl_scache_shmcb_status(server_rec *, apr_pool_t *, void (*)(char *, void *), void *);
+
+/* Pass Phrase Support */
+void ssl_pphrase_Handle(server_rec *, apr_pool_t *);
+
+/* Diffie-Hellman Parameter Support */
+DH *ssl_dh_GetTmpParam(int);
+DH *ssl_dh_GetParamFromFile(char *);
+
+unsigned char *ssl_asn1_table_set(apr_hash_t *table,
+ const char *key,
+ long int length);
+
+ssl_asn1_t *ssl_asn1_table_get(apr_hash_t *table,
+ const char *key);
+
+void ssl_asn1_table_unset(apr_hash_t *table,
+ const char *key);
+
+const char *ssl_asn1_keystr(int keytype);
+
+const char *ssl_asn1_table_keyfmt(apr_pool_t *p,
+ const char *id,
+ int keytype);
+/* Mutex Support */
+int ssl_mutex_init(server_rec *, apr_pool_t *);
+int ssl_mutex_reinit(server_rec *, apr_pool_t *);
+int ssl_mutex_on(server_rec *);
+int ssl_mutex_off(server_rec *);
+
+/* Logfile Support */
+void ssl_die(void);
+void ssl_log_ssl_error(const char *, int, int, server_rec *);
+
+/* Variables */
+void ssl_var_register(void);
+char *ssl_var_lookup(apr_pool_t *, server_rec *, conn_rec *, request_rec *, char *);
+void ssl_var_log_config_register(apr_pool_t *p);
+
+APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup,
+ (apr_pool_t *, server_rec *,
+ conn_rec *, request_rec *,
+ char *));
+
+/* An optional function which returns non-zero if the given connection
+ * is using SSL/TLS. */
+APR_DECLARE_OPTIONAL_FN(int, ssl_is_https, (conn_rec *));
+
+/* Proxy Support */
+int ssl_proxy_enable(conn_rec *c);
+int ssl_engine_disable(conn_rec *c);
+
+APR_DECLARE_OPTIONAL_FN(int, ssl_proxy_enable, (conn_rec *));
+
+APR_DECLARE_OPTIONAL_FN(int, ssl_engine_disable, (conn_rec *));
+
+/* I/O */
+void ssl_io_filter_init(conn_rec *, SSL *);
+void ssl_io_filter_register(apr_pool_t *);
+long ssl_io_data_cb(BIO *, int, MODSSL_BIO_CB_ARG_TYPE *, int, long, long);
+
+/* ssl_io_buffer_fill fills the setaside buffering of the HTTP request
+ * to allow an SSL renegotiation to take place. */
+int ssl_io_buffer_fill(request_rec *r);
+
+/* PRNG */
+int ssl_rand_seed(server_rec *, apr_pool_t *, ssl_rsctx_t, char *);
+
+/* Utility Functions */
+char *ssl_util_vhostid(apr_pool_t *, server_rec *);
+void ssl_util_strupper(char *);
+void ssl_util_uuencode(char *, const char *, BOOL);
+void ssl_util_uuencode_binary(unsigned char *, const unsigned char *, int, BOOL);
+apr_file_t *ssl_util_ppopen(server_rec *, apr_pool_t *, const char *,
+ const char * const *);
+void ssl_util_ppclose(server_rec *, apr_pool_t *, apr_file_t *);
+char *ssl_util_readfilter(server_rec *, apr_pool_t *, const char *,
+ const char * const *);
+BOOL ssl_util_path_check(ssl_pathcheck_t, const char *, apr_pool_t *);
+ssl_algo_t ssl_util_algotypeof(X509 *, EVP_PKEY *);
+char *ssl_util_algotypestr(ssl_algo_t);
+char *ssl_util_ptxtsub(apr_pool_t *, const char *, const char *, char *);
+void ssl_util_thread_setup(apr_pool_t *);
+
+#define APR_SHM_MAXSIZE (64 * 1024 * 1024)
+#endif /* __MOD_SSL_H__ */
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/modules.mk b/rubbos/app/httpd-2.0.64/modules/ssl/modules.mk
new file mode 100644
index 00000000..ceb52a1b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/modules.mk
@@ -0,0 +1,3 @@
+DISTCLEAN_TARGETS = modules.mk
+static =
+shared =
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_config.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_config.c
new file mode 100644
index 00000000..f597d2a4
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_config.c
@@ -0,0 +1,1420 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_engine_config.c
+ * Apache Configuration Directives
+ */
+ /* ``Damned if you do,
+ damned if you don't.''
+ -- Unknown */
+#include "mod_ssl.h"
+
+/* _________________________________________________________________
+**
+** Support for Global Configuration
+** _________________________________________________________________
+*/
+
+#define SSL_MOD_CONFIG_KEY "ssl_module"
+
+SSLModConfigRec *ssl_config_global_create(server_rec *s)
+{
+ apr_pool_t *pool = s->process->pool;
+ SSLModConfigRec *mc;
+ void *vmc;
+
+ apr_pool_userdata_get(&vmc, SSL_MOD_CONFIG_KEY, pool);
+ if (vmc) {
+ return vmc; /* reused for lifetime of the server */
+ }
+
+ /*
+ * allocate an own subpool which survives server restarts
+ */
+ mc = (SSLModConfigRec *)apr_palloc(pool, sizeof(*mc));
+ mc->pPool = pool;
+ mc->bFixed = FALSE;
+
+ /*
+ * initialize per-module configuration
+ */
+ mc->nSessionCacheMode = SSL_SCMODE_UNSET;
+ mc->szSessionCacheDataFile = NULL;
+ mc->nSessionCacheDataSize = 0;
+ mc->pSessionCacheDataMM = NULL;
+ mc->pSessionCacheDataRMM = NULL;
+ mc->tSessionCacheDataTable = NULL;
+ mc->nMutexMode = SSL_MUTEXMODE_UNSET;
+ mc->nMutexMech = APR_LOCK_DEFAULT;
+ mc->szMutexFile = NULL;
+ mc->pMutex = NULL;
+ mc->aRandSeed = apr_array_make(pool, 4,
+ sizeof(ssl_randseed_t));
+ mc->tVHostKeys = apr_hash_make(pool);
+ mc->tPrivateKey = apr_hash_make(pool);
+ mc->tPublicCert = apr_hash_make(pool);
+#ifdef SSL_EXPERIMENTAL_ENGINE
+ mc->szCryptoDevice = NULL;
+#endif
+
+ memset(mc->pTmpKeys, 0, sizeof(mc->pTmpKeys));
+
+ apr_pool_userdata_set(mc, SSL_MOD_CONFIG_KEY,
+ apr_pool_cleanup_null,
+ pool);
+
+ return mc;
+}
+
+void ssl_config_global_fix(SSLModConfigRec *mc)
+{
+ mc->bFixed = TRUE;
+}
+
+BOOL ssl_config_global_isfixed(SSLModConfigRec *mc)
+{
+ return mc->bFixed;
+}
+
+/* _________________________________________________________________
+**
+** Configuration handling
+** _________________________________________________________________
+*/
+
+static void modssl_ctx_init(modssl_ctx_t *mctx)
+{
+ mctx->sc = NULL; /* set during module init */
+
+ mctx->ssl_ctx = NULL; /* set during module init */
+
+ mctx->pks = NULL;
+ mctx->pkp = NULL;
+
+ mctx->protocol = SSL_PROTOCOL_ALL;
+
+ mctx->pphrase_dialog_type = SSL_PPTYPE_UNSET;
+ mctx->pphrase_dialog_path = NULL;
+
+ mctx->cert_chain = NULL;
+
+ mctx->crl_path = NULL;
+ mctx->crl_file = NULL;
+ mctx->crl = NULL; /* set during module init */
+
+ mctx->auth.ca_cert_path = NULL;
+ mctx->auth.ca_cert_file = NULL;
+ mctx->auth.cipher_suite = NULL;
+ mctx->auth.verify_depth = UNSET;
+ mctx->auth.verify_mode = SSL_CVERIFY_UNSET;
+}
+
+static void modssl_ctx_init_proxy(SSLSrvConfigRec *sc,
+ apr_pool_t *p)
+{
+ modssl_ctx_t *mctx;
+
+ mctx = sc->proxy = apr_palloc(p, sizeof(*sc->proxy));
+
+ modssl_ctx_init(mctx);
+
+ mctx->pkp = apr_palloc(p, sizeof(*mctx->pkp));
+
+ mctx->pkp->cert_file = NULL;
+ mctx->pkp->cert_path = NULL;
+ mctx->pkp->certs = NULL;
+}
+
+static void modssl_ctx_init_server(SSLSrvConfigRec *sc,
+ apr_pool_t *p)
+{
+ modssl_ctx_t *mctx;
+
+ mctx = sc->server = apr_palloc(p, sizeof(*sc->server));
+
+ modssl_ctx_init(mctx);
+
+ mctx->pks = apr_palloc(p, sizeof(*mctx->pks));
+
+ memset((void*)mctx->pks->cert_files, 0, sizeof(mctx->pks->cert_files));
+
+ memset((void*)mctx->pks->key_files, 0, sizeof(mctx->pks->key_files));
+
+ /* certs/keys are set during module init */
+
+ memset(mctx->pks->certs, 0, sizeof(mctx->pks->certs));
+
+ memset(mctx->pks->keys, 0, sizeof(mctx->pks->keys));
+}
+
+static SSLSrvConfigRec *ssl_config_server_new(apr_pool_t *p)
+{
+ SSLSrvConfigRec *sc = apr_palloc(p, sizeof(*sc));
+
+ sc->mc = NULL;
+ sc->enabled = UNSET;
+ sc->proxy_enabled = UNSET;
+ sc->vhost_id = NULL; /* set during module init */
+ sc->vhost_id_len = 0; /* set during module init */
+ sc->session_cache_timeout = UNSET;
+ sc->insecure_reneg = UNSET;
+
+ modssl_ctx_init_proxy(sc, p);
+
+ modssl_ctx_init_server(sc, p);
+
+ return sc;
+}
+
+/*
+ * Create per-server SSL configuration
+ */
+void *ssl_config_server_create(apr_pool_t *p, server_rec *s)
+{
+ SSLSrvConfigRec *sc = ssl_config_server_new(p);
+
+ sc->mc = ssl_config_global_create(s);
+
+ return sc;
+}
+
+#define cfgMerge(el,unset) mrg->el = (add->el == (unset)) ? base->el : add->el
+#define cfgMergeArray(el) mrg->el = apr_array_append(p, add->el, base->el)
+#define cfgMergeString(el) cfgMerge(el, NULL)
+#define cfgMergeBool(el) cfgMerge(el, UNSET)
+#define cfgMergeInt(el) cfgMerge(el, UNSET)
+
+static void modssl_ctx_cfg_merge(modssl_ctx_t *base,
+ modssl_ctx_t *add,
+ modssl_ctx_t *mrg)
+{
+ cfgMerge(protocol, SSL_PROTOCOL_ALL);
+
+ cfgMerge(pphrase_dialog_type, SSL_PPTYPE_UNSET);
+ cfgMergeString(pphrase_dialog_path);
+
+ cfgMergeString(cert_chain);
+
+ cfgMerge(crl_path, NULL);
+ cfgMerge(crl_file, NULL);
+
+ cfgMergeString(auth.ca_cert_path);
+ cfgMergeString(auth.ca_cert_file);
+ cfgMergeString(auth.cipher_suite);
+ cfgMergeInt(auth.verify_depth);
+ cfgMerge(auth.verify_mode, SSL_CVERIFY_UNSET);
+}
+
+static void modssl_ctx_cfg_merge_proxy(modssl_ctx_t *base,
+ modssl_ctx_t *add,
+ modssl_ctx_t *mrg)
+{
+ modssl_ctx_cfg_merge(base, add, mrg);
+
+ cfgMergeString(pkp->cert_file);
+ cfgMergeString(pkp->cert_path);
+}
+
+static void modssl_ctx_cfg_merge_server(modssl_ctx_t *base,
+ modssl_ctx_t *add,
+ modssl_ctx_t *mrg)
+{
+ int i;
+
+ modssl_ctx_cfg_merge(base, add, mrg);
+
+ for (i = 0; i < SSL_AIDX_MAX; i++) {
+ cfgMergeString(pks->cert_files[i]);
+ cfgMergeString(pks->key_files[i]);
+ }
+}
+
+/*
+ * Merge per-server SSL configurations
+ */
+void *ssl_config_server_merge(apr_pool_t *p, void *basev, void *addv)
+{
+ SSLSrvConfigRec *base = (SSLSrvConfigRec *)basev;
+ SSLSrvConfigRec *add = (SSLSrvConfigRec *)addv;
+ SSLSrvConfigRec *mrg = ssl_config_server_new(p);
+
+ cfgMerge(mc, NULL);
+ cfgMergeBool(enabled);
+ cfgMergeBool(proxy_enabled);
+ cfgMergeInt(session_cache_timeout);
+ cfgMergeBool(insecure_reneg);
+
+ modssl_ctx_cfg_merge_proxy(base->proxy, add->proxy, mrg->proxy);
+
+ modssl_ctx_cfg_merge_server(base->server, add->server, mrg->server);
+
+ return mrg;
+}
+
+/*
+ * Create per-directory SSL configuration
+ */
+void *ssl_config_perdir_create(apr_pool_t *p, char *dir)
+{
+ SSLDirConfigRec *dc = apr_palloc(p, sizeof(*dc));
+
+ dc->bSSLRequired = FALSE;
+ dc->aRequirement = apr_array_make(p, 4, sizeof(ssl_require_t));
+ dc->nOptions = SSL_OPT_NONE|SSL_OPT_RELSET;
+ dc->nOptionsAdd = SSL_OPT_NONE;
+ dc->nOptionsDel = SSL_OPT_NONE;
+
+ dc->szCipherSuite = NULL;
+ dc->nVerifyClient = SSL_CVERIFY_UNSET;
+ dc->nVerifyDepth = UNSET;
+
+ dc->szCACertificatePath = NULL;
+ dc->szCACertificateFile = NULL;
+ dc->szUserName = NULL;
+
+ return dc;
+}
+
+/*
+ * Merge per-directory SSL configurations
+ */
+void *ssl_config_perdir_merge(apr_pool_t *p, void *basev, void *addv)
+{
+ SSLDirConfigRec *base = (SSLDirConfigRec *)basev;
+ SSLDirConfigRec *add = (SSLDirConfigRec *)addv;
+ SSLDirConfigRec *mrg = (SSLDirConfigRec *)apr_palloc(p, sizeof(*mrg));
+
+ cfgMerge(bSSLRequired, FALSE);
+ cfgMergeArray(aRequirement);
+
+ if (add->nOptions & SSL_OPT_RELSET) {
+ mrg->nOptionsAdd =
+ (base->nOptionsAdd & ~(add->nOptionsDel)) | add->nOptionsAdd;
+ mrg->nOptionsDel =
+ (base->nOptionsDel & ~(add->nOptionsAdd)) | add->nOptionsDel;
+ mrg->nOptions =
+ (base->nOptions & ~(mrg->nOptionsDel)) | mrg->nOptionsAdd;
+ }
+ else {
+ mrg->nOptions = add->nOptions;
+ mrg->nOptionsAdd = add->nOptionsAdd;
+ mrg->nOptionsDel = add->nOptionsDel;
+ }
+
+ cfgMergeString(szCipherSuite);
+ cfgMerge(nVerifyClient, SSL_CVERIFY_UNSET);
+ cfgMergeInt(nVerifyDepth);
+
+ cfgMergeString(szCACertificatePath);
+ cfgMergeString(szCACertificateFile);
+ cfgMergeString(szUserName);
+
+ return mrg;
+}
+
+/*
+ * Configuration functions for particular directives
+ */
+
+const char *ssl_cmd_SSLMutex(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ const char *err;
+ SSLModConfigRec *mc = myModConfig(cmd->server);
+
+ if ((err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) {
+ return err;
+ }
+
+ if (ssl_config_global_isfixed(mc)) {
+ return NULL;
+ }
+
+ if (strcEQ(arg, "none") || strcEQ(arg, "no")) {
+ mc->nMutexMode = SSL_MUTEXMODE_NONE;
+ }
+ /* NOTE: previously, 'yes' implied 'sem' */
+ else if (strcEQ(arg, "default") || strcEQ(arg, "yes")) {
+ mc->nMutexMode = SSL_MUTEXMODE_USED;
+ mc->nMutexMech = APR_LOCK_DEFAULT;
+ mc->szMutexFile = NULL; /* APR determines temporary filename */
+ }
+#if APR_HAS_FLOCK_SERIALIZE
+ else if (strlen(arg) > 6 && strcEQn(arg, "flock:", 6)) {
+ const char *file = ap_server_root_relative(cmd->pool, arg+6);
+ if (!file) {
+ return apr_pstrcat(cmd->pool, "Invalid SSLMutex flock: path ",
+ arg+6, NULL);
+ }
+ mc->nMutexMode = SSL_MUTEXMODE_USED;
+ mc->nMutexMech = APR_LOCK_FLOCK;
+ mc->szMutexFile = apr_psprintf(mc->pPool, "%s.%lu",
+ file, (unsigned long)getpid());
+ }
+#endif
+#if APR_HAS_FCNTL_SERIALIZE
+ else if (strlen(arg) > 6 && strcEQn(arg, "fcntl:", 6)) {
+ const char *file = ap_server_root_relative(cmd->pool, arg+6);
+ if (!file) {
+ return apr_pstrcat(cmd->pool, "Invalid SSLMutex fcntl: path ",
+ arg+6, NULL);
+ }
+ mc->nMutexMode = SSL_MUTEXMODE_USED;
+ mc->nMutexMech = APR_LOCK_FCNTL;
+ mc->szMutexFile = apr_psprintf(mc->pPool, "%s.%lu",
+ file, (unsigned long)getpid());
+ }
+#endif
+#if APR_HAS_SYSVSEM_SERIALIZE && !defined(PERCHILD_MPM)
+ else if (strcEQ(arg, "sysvsem")) {
+ mc->nMutexMode = SSL_MUTEXMODE_USED;
+ mc->nMutexMech = APR_LOCK_SYSVSEM;
+ mc->szMutexFile = NULL; /* APR determines temporary filename */
+ }
+#endif
+#if APR_HAS_POSIXSEM_SERIALIZE
+ else if (strcEQ(arg, "posixsem")) {
+ mc->nMutexMode = SSL_MUTEXMODE_USED;
+ mc->nMutexMech = APR_LOCK_POSIXSEM;
+ mc->szMutexFile = NULL; /* APR determines temporary filename */
+ }
+#endif
+#if APR_HAS_PROC_PTHREAD_SERIALIZE
+ else if (strcEQ(arg, "pthread")) {
+ mc->nMutexMode = SSL_MUTEXMODE_USED;
+ mc->nMutexMech = APR_LOCK_PROC_PTHREAD;
+ mc->szMutexFile = NULL; /* APR determines temporary filename */
+ }
+#endif
+#if APR_HAS_FLOCK_SERIALIZE || APR_HAS_FCNTL_SERIALIZE
+ else if (strlen(arg) > 5 && strcEQn(arg, "file:", 5)) {
+ const char *file = ap_server_root_relative(cmd->pool, arg+5);
+ if (!file) {
+ return apr_pstrcat(cmd->pool, "Invalid SSLMutex file: path ",
+ arg+5, NULL);
+ }
+ mc->nMutexMode = SSL_MUTEXMODE_USED;
+#if APR_HAS_FLOCK_SERIALIZE
+ mc->nMutexMech = APR_LOCK_FLOCK;
+#endif
+#if APR_HAS_FCNTL_SERIALIZE
+ mc->nMutexMech = APR_LOCK_FCNTL;
+#endif
+ mc->szMutexFile =
+ apr_psprintf(mc->pPool, "%s.%lu",
+ file, (unsigned long)getpid());
+ }
+#endif
+#if (APR_HAS_SYSVSEM_SERIALIZE && !defined(PERCHILD_MPM)) || APR_HAS_POSIXSEM_SERIALIZE
+ else if (strcEQ(arg, "sem")) {
+ mc->nMutexMode = SSL_MUTEXMODE_USED;
+#if APR_HAS_SYSVSEM_SERIALIZE && !defined(PERCHILD_MPM)
+ mc->nMutexMech = APR_LOCK_SYSVSEM;
+#endif
+#if APR_HAS_POSIXSEM_SERIALIZE
+ mc->nMutexMech = APR_LOCK_POSIXSEM;
+#endif
+ mc->szMutexFile = NULL; /* APR determines temporary filename */
+ }
+#endif
+ else {
+ return apr_pstrcat(cmd->pool, "Invalid SSLMutex argument ",
+ arg, " (", ssl_valid_ssl_mutex_string, ")", NULL);
+ }
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLPassPhraseDialog(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ const char *err;
+ int arglen = strlen(arg);
+
+ if ((err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) {
+ return err;
+ }
+
+ if (strcEQ(arg, "builtin")) {
+ sc->server->pphrase_dialog_type = SSL_PPTYPE_BUILTIN;
+ sc->server->pphrase_dialog_path = NULL;
+ }
+ else if ((arglen > 5) && strEQn(arg, "exec:", 5)) {
+ sc->server->pphrase_dialog_type = SSL_PPTYPE_FILTER;
+ /* ### This is broken, exec: may contain args, no? */
+ sc->server->pphrase_dialog_path =
+ ap_server_root_relative(cmd->pool, arg+5);
+ if (!sc->server->pphrase_dialog_path) {
+ return apr_pstrcat(cmd->pool,
+ "Invalid SSLPassPhraseDialog exec: path ",
+ arg+5, NULL);
+ }
+ if (!ssl_util_path_check(SSL_PCM_EXISTS,
+ sc->server->pphrase_dialog_path,
+ cmd->pool))
+ {
+ return apr_pstrcat(cmd->pool,
+ "SSLPassPhraseDialog: file '",
+ sc->server->pphrase_dialog_path,
+ "' does not exist", NULL);
+ }
+
+ }
+ else if ((arglen > 1) && (arg[0] == '|')) {
+ sc->server->pphrase_dialog_type = SSL_PPTYPE_PIPE;
+ sc->server->pphrase_dialog_path = arg + 1;
+ }
+ else {
+ return "SSLPassPhraseDialog: Invalid argument";
+ }
+
+ return NULL;
+}
+
+#ifdef SSL_EXPERIMENTAL_ENGINE
+const char *ssl_cmd_SSLCryptoDevice(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLModConfigRec *mc = myModConfig(cmd->server);
+ const char *err;
+ ENGINE *e;
+#if SSL_LIBRARY_VERSION >= 0x00907000
+ static int loaded_engines = FALSE;
+
+ /* early loading to make sure the engines are already
+ available for ENGINE_by_id() above... */
+ if (!loaded_engines) {
+ ENGINE_load_builtin_engines();
+ loaded_engines = TRUE;
+ }
+#endif
+ if ((err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) {
+ return err;
+ }
+
+ if (strcEQ(arg, "builtin")) {
+ mc->szCryptoDevice = NULL;
+ }
+ else if ((e = ENGINE_by_id(arg))) {
+ mc->szCryptoDevice = arg;
+ ENGINE_free(e);
+ }
+ else {
+ return "SSLCryptoDevice: Invalid argument";
+ }
+
+ return NULL;
+}
+#endif
+
+const char *ssl_cmd_SSLRandomSeed(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg1,
+ const char *arg2,
+ const char *arg3)
+{
+ SSLModConfigRec *mc = myModConfig(cmd->server);
+ const char *err;
+ ssl_randseed_t *seed;
+ int arg2len = strlen(arg2);
+
+ if ((err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) {
+ return err;
+ }
+
+ if (ssl_config_global_isfixed(mc)) {
+ return NULL;
+ }
+
+ seed = apr_array_push(mc->aRandSeed);
+
+ if (strcEQ(arg1, "startup")) {
+ seed->nCtx = SSL_RSCTX_STARTUP;
+ }
+ else if (strcEQ(arg1, "connect")) {
+ seed->nCtx = SSL_RSCTX_CONNECT;
+ }
+ else {
+ return apr_pstrcat(cmd->pool, "SSLRandomSeed: "
+ "invalid context: `", arg1, "'",
+ NULL);
+ }
+
+ if ((arg2len > 5) && strEQn(arg2, "file:", 5)) {
+ seed->nSrc = SSL_RSSRC_FILE;
+ seed->cpPath = ap_server_root_relative(mc->pPool, arg2+5);
+ }
+ else if ((arg2len > 5) && strEQn(arg2, "exec:", 5)) {
+ seed->nSrc = SSL_RSSRC_EXEC;
+ seed->cpPath = ap_server_root_relative(mc->pPool, arg2+5);
+ }
+ else if ((arg2len > 4) && strEQn(arg2, "egd:", 4)) {
+#ifdef HAVE_SSL_RAND_EGD
+ seed->nSrc = SSL_RSSRC_EGD;
+ seed->cpPath = ap_server_root_relative(mc->pPool, arg2+4);
+#else
+ return "egd not supported with this SSL toolkit";
+#endif
+ }
+ else if (strcEQ(arg2, "builtin")) {
+ seed->nSrc = SSL_RSSRC_BUILTIN;
+ seed->cpPath = NULL;
+ }
+ else {
+ seed->nSrc = SSL_RSSRC_FILE;
+ seed->cpPath = ap_server_root_relative(mc->pPool, arg2);
+ }
+
+ if (seed->nSrc != SSL_RSSRC_BUILTIN) {
+ if (!seed->cpPath) {
+ return apr_pstrcat(cmd->pool,
+ "Invalid SSLRandomSeed path ",
+ arg2, NULL);
+ }
+ if (!ssl_util_path_check(SSL_PCM_EXISTS, seed->cpPath, cmd->pool)) {
+ return apr_pstrcat(cmd->pool,
+ "SSLRandomSeed: source path '",
+ seed->cpPath, "' does not exist", NULL);
+ }
+ }
+
+ if (!arg3) {
+ seed->nBytes = 0; /* read whole file */
+ }
+ else {
+ if (seed->nSrc == SSL_RSSRC_BUILTIN) {
+ return "SSLRandomSeed: byte specification not "
+ "allowed for builtin seed source";
+ }
+
+ seed->nBytes = atoi(arg3);
+
+ if (seed->nBytes < 0) {
+ return "SSLRandomSeed: invalid number of bytes specified";
+ }
+ }
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLEngine(cmd_parms *cmd, void *dcfg, int flag)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+
+ sc->enabled = flag ? TRUE : FALSE;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLCipherSuite(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ SSLDirConfigRec *dc = (SSLDirConfigRec *)dcfg;
+
+ if (cmd->path) {
+ dc->szCipherSuite = arg;
+ }
+ else {
+ sc->server->auth.cipher_suite = arg;
+ }
+
+ return NULL;
+}
+
+#define SSL_FLAGS_CHECK_FILE \
+ (SSL_PCM_EXISTS|SSL_PCM_ISREG|SSL_PCM_ISNONZERO)
+
+#define SSL_FLAGS_CHECK_DIR \
+ (SSL_PCM_EXISTS|SSL_PCM_ISDIR)
+
+static const char *ssl_cmd_check_file(cmd_parms *parms,
+ const char **file)
+{
+ const char *filepath = ap_server_root_relative(parms->pool, *file);
+
+ if (!filepath) {
+ return apr_pstrcat(parms->pool, parms->cmd->name,
+ ": Invalid file path ", *file, NULL);
+ }
+ *file = filepath;
+
+ if (ssl_util_path_check(SSL_FLAGS_CHECK_FILE, *file, parms->pool)) {
+ return NULL;
+ }
+
+ return apr_pstrcat(parms->pool, parms->cmd->name,
+ ": file '", *file,
+ "' does not exist or is empty", NULL);
+
+}
+
+const char *ssl_cmd_SSLInsecureRenegotiation(cmd_parms *cmd, void *dcfg, int flag)
+{
+#ifdef SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ sc->insecure_reneg = flag?TRUE:FALSE;
+ return NULL;
+#else
+ return "The SSLInsecureRenegotiation directive is not available "
+ "with this SSL library";
+#endif
+}
+
+
+static const char *ssl_cmd_check_dir(cmd_parms *parms,
+ const char **dir)
+{
+ const char *dirpath = ap_server_root_relative(parms->pool, *dir);
+
+ if (!dirpath) {
+ return apr_pstrcat(parms->pool, parms->cmd->name,
+ ": Invalid dir path ", *dir, NULL);
+ }
+ *dir = dirpath;
+
+ if (ssl_util_path_check(SSL_FLAGS_CHECK_DIR, *dir, parms->pool)) {
+ return NULL;
+ }
+
+ return apr_pstrcat(parms->pool, parms->cmd->name,
+ ": directory '", *dir,
+ "' does not exist", NULL);
+
+}
+
+#define SSL_AIDX_CERTS 1
+#define SSL_AIDX_KEYS 2
+
+static const char *ssl_cmd_check_aidx_max(cmd_parms *parms,
+ const char *arg,
+ int idx)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(parms->server);
+ const char *err, *desc=NULL, **files=NULL;
+ int i;
+
+ if ((err = ssl_cmd_check_file(parms, &arg))) {
+ return err;
+ }
+
+ switch (idx) {
+ case SSL_AIDX_CERTS:
+ desc = "certificates";
+ files = sc->server->pks->cert_files;
+ break;
+ case SSL_AIDX_KEYS:
+ desc = "private keys";
+ files = sc->server->pks->key_files;
+ break;
+ }
+
+ for (i = 0; i < SSL_AIDX_MAX; i++) {
+ if (!files[i]) {
+ files[i] = arg;
+ return NULL;
+ }
+ }
+
+ return apr_psprintf(parms->pool,
+ "%s: only up to %d "
+ "different %s per virtual host allowed",
+ parms->cmd->name, SSL_AIDX_MAX, desc);
+}
+
+const char *ssl_cmd_SSLCertificateFile(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+
+ const char *err;
+
+ if ((err = ssl_cmd_check_aidx_max(cmd, arg, SSL_AIDX_CERTS))) {
+ return err;
+ }
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLCertificateKeyFile(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ const char *err;
+
+ if ((err = ssl_cmd_check_aidx_max(cmd, arg, SSL_AIDX_KEYS))) {
+ return err;
+ }
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLCertificateChainFile(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ const char *err;
+
+ if ((err = ssl_cmd_check_file(cmd, &arg))) {
+ return err;
+ }
+
+ sc->server->cert_chain = arg;
+
+ return NULL;
+}
+
+#define NO_PER_DIR_SSL_CA \
+ "Your ssl library does not have support for per-directory CA"
+
+#ifdef HAVE_SSL_SET_CERT_STORE
+# define MODSSL_HAVE_SSL_SET_CERT_STORE 1
+#else
+# define MODSSL_HAVE_SSL_SET_CERT_STORE 0
+#endif
+
+#define MODSSL_SET_CA(f) \
+ if (cmd->path) \
+ if (MODSSL_HAVE_SSL_SET_CERT_STORE) \
+ dc->f = arg; \
+ else \
+ return NO_PER_DIR_SSL_CA; \
+ else \
+ sc->f = arg \
+
+const char *ssl_cmd_SSLCACertificatePath(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ /*SSLDirConfigRec *dc = (SSLDirConfigRec *)dcfg;*/
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ const char *err;
+
+ if ((err = ssl_cmd_check_dir(cmd, &arg))) {
+ return err;
+ }
+
+ /* XXX: bring back per-dir */
+ sc->server->auth.ca_cert_path = arg;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLCACertificateFile(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ /*SSLDirConfigRec *dc = (SSLDirConfigRec *)dcfg;*/
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ const char *err;
+
+ if ((err = ssl_cmd_check_file(cmd, &arg))) {
+ return err;
+ }
+
+ /* XXX: bring back per-dir */
+ sc->server->auth.ca_cert_file = arg;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLCARevocationPath(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ const char *err;
+
+ if ((err = ssl_cmd_check_dir(cmd, &arg))) {
+ return err;
+ }
+
+ sc->server->crl_path = arg;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLCARevocationFile(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ const char *err;
+
+ if ((err = ssl_cmd_check_file(cmd, &arg))) {
+ return err;
+ }
+
+ sc->server->crl_file = arg;
+
+ return NULL;
+}
+
+static const char *ssl_cmd_verify_parse(cmd_parms *parms,
+ const char *arg,
+ ssl_verify_t *id)
+{
+ if (strcEQ(arg, "none") || strcEQ(arg, "off")) {
+ *id = SSL_CVERIFY_NONE;
+ }
+ else if (strcEQ(arg, "optional")) {
+ *id = SSL_CVERIFY_OPTIONAL;
+ }
+ else if (strcEQ(arg, "require") || strcEQ(arg, "on")) {
+ *id = SSL_CVERIFY_REQUIRE;
+ }
+ else if (strcEQ(arg, "optional_no_ca")) {
+ *id = SSL_CVERIFY_OPTIONAL_NO_CA;
+ }
+ else {
+ return apr_pstrcat(parms->temp_pool, parms->cmd->name,
+ ": Invalid argument '", arg, "'",
+ NULL);
+ }
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLVerifyClient(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLDirConfigRec *dc = (SSLDirConfigRec *)dcfg;
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ ssl_verify_t mode;
+ const char *err;
+
+ if ((err = ssl_cmd_verify_parse(cmd, arg, &mode))) {
+ return err;
+ }
+
+ if (cmd->path) {
+ dc->nVerifyClient = mode;
+ }
+ else {
+ sc->server->auth.verify_mode = mode;
+ }
+
+ return NULL;
+}
+
+static const char *ssl_cmd_verify_depth_parse(cmd_parms *parms,
+ const char *arg,
+ int *depth)
+{
+ if ((*depth = atoi(arg)) >= 0) {
+ return NULL;
+ }
+
+ return apr_pstrcat(parms->temp_pool, parms->cmd->name,
+ ": Invalid argument '", arg, "'",
+ NULL);
+}
+
+const char *ssl_cmd_SSLVerifyDepth(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLDirConfigRec *dc = (SSLDirConfigRec *)dcfg;
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ int depth;
+ const char *err;
+
+ if ((err = ssl_cmd_verify_depth_parse(cmd, arg, &depth))) {
+ return err;
+ }
+
+ if (cmd->path) {
+ dc->nVerifyDepth = depth;
+ }
+ else {
+ sc->server->auth.verify_depth = depth;
+ }
+
+ return NULL;
+}
+
+#define MODSSL_NO_SHARED_MEMORY_ERROR \
+ "SSLSessionCache: shared memory cache not useable on this platform"
+
+const char *ssl_cmd_SSLSessionCache(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLModConfigRec *mc = myModConfig(cmd->server);
+ const char *err, *colon;
+ char *cp, *cp2;
+ int arglen = strlen(arg);
+
+ if ((err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) {
+ return err;
+ }
+
+ if (ssl_config_global_isfixed(mc)) {
+ return NULL;
+ }
+
+ if (strcEQ(arg, "none")) {
+ mc->nSessionCacheMode = SSL_SCMODE_NONE;
+ mc->szSessionCacheDataFile = NULL;
+ }
+ else if ((arglen > 4) && strcEQn(arg, "dbm:", 4)) {
+ mc->nSessionCacheMode = SSL_SCMODE_DBM;
+ mc->szSessionCacheDataFile = ap_server_root_relative(mc->pPool, arg+4);
+ if (!mc->szSessionCacheDataFile) {
+ return apr_psprintf(cmd->pool,
+ "SSLSessionCache: Invalid cache file path %s",
+ arg+4);
+ }
+ }
+ else if ((arglen > 6) && strcEQn(arg, "shmht:", 6)) {
+#if !APR_HAS_SHARED_MEMORY
+ return MODSSL_NO_SHARED_MEMORY_ERROR;
+#endif
+ mc->nSessionCacheMode = SSL_SCMODE_SHMHT;
+ colon = ap_strchr_c(arg, ':');
+ mc->szSessionCacheDataFile =
+ ap_server_root_relative(mc->pPool, colon+1);
+ if (!mc->szSessionCacheDataFile) {
+ return apr_psprintf(cmd->pool,
+ "SSLSessionCache: Invalid cache file path %s",
+ colon+1);
+ }
+ mc->tSessionCacheDataTable = NULL;
+ mc->nSessionCacheDataSize = 1024*512; /* 512KB */
+
+ if ((cp = strchr(mc->szSessionCacheDataFile, '('))) {
+ *cp++ = NUL;
+
+ if (!(cp2 = strchr(cp, ')'))) {
+ return "SSLSessionCache: Invalid argument: "
+ "no closing parenthesis";
+ }
+
+ *cp2 = NUL;
+
+ mc->nSessionCacheDataSize = atoi(cp);
+
+ if (mc->nSessionCacheDataSize < 8192) {
+ return "SSLSessionCache: Invalid argument: "
+ "size has to be >= 8192 bytes";
+ }
+
+ if (mc->nSessionCacheDataSize >= APR_SHM_MAXSIZE) {
+ return apr_psprintf(cmd->pool,
+ "SSLSessionCache: Invalid argument: "
+ "size has to be < %d bytes on this "
+ "platform", APR_SHM_MAXSIZE);
+ }
+ }
+ }
+ else if (((arglen > 4) && strcEQn(arg, "shm:", 4)) ||
+ ((arglen > 6) && strcEQn(arg, "shmcb:", 6))) {
+#if !APR_HAS_SHARED_MEMORY
+ return MODSSL_NO_SHARED_MEMORY_ERROR;
+#endif
+ mc->nSessionCacheMode = SSL_SCMODE_SHMCB;
+ colon = ap_strchr_c(arg, ':');
+ mc->szSessionCacheDataFile =
+ ap_server_root_relative(mc->pPool, colon+1);
+ if (!mc->szSessionCacheDataFile) {
+ return apr_psprintf(cmd->pool,
+ "SSLSessionCache: Invalid cache file path %s",
+ colon+1);
+ }
+ mc->tSessionCacheDataTable = NULL;
+ mc->nSessionCacheDataSize = 1024*512; /* 512KB */
+
+ if ((cp = strchr(mc->szSessionCacheDataFile, '('))) {
+ *cp++ = NUL;
+
+ if (!(cp2 = strchr(cp, ')'))) {
+ return "SSLSessionCache: Invalid argument: "
+ "no closing parenthesis";
+ }
+
+ *cp2 = NUL;
+
+ mc->nSessionCacheDataSize = atoi(cp);
+
+ if (mc->nSessionCacheDataSize < 8192) {
+ return "SSLSessionCache: Invalid argument: "
+ "size has to be >= 8192 bytes";
+
+ }
+
+ if (mc->nSessionCacheDataSize >= APR_SHM_MAXSIZE) {
+ return apr_psprintf(cmd->pool,
+ "SSLSessionCache: Invalid argument: "
+ "size has to be < %d bytes on this "
+ "platform", APR_SHM_MAXSIZE);
+
+ }
+ }
+ }
+ else {
+ return "SSLSessionCache: Invalid argument";
+ }
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLSessionCacheTimeout(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+
+ sc->session_cache_timeout = atoi(arg);
+
+ if (sc->session_cache_timeout < 0) {
+ return "SSLSessionCacheTimeout: Invalid argument";
+ }
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLOptions(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLDirConfigRec *dc = (SSLDirConfigRec *)dcfg;
+ ssl_opt_t opt;
+ int first = TRUE;
+ char action, *w;
+
+ while (*arg) {
+ w = ap_getword_conf(cmd->pool, &arg);
+ action = NUL;
+
+ if ((*w == '+') || (*w == '-')) {
+ action = *(w++);
+ }
+ else if (first) {
+ dc->nOptions = SSL_OPT_NONE;
+ first = FALSE;
+ }
+
+ if (strcEQ(w, "StdEnvVars")) {
+ opt = SSL_OPT_STDENVVARS;
+ }
+ else if (strcEQ(w, "CompatEnvVars")) {
+ opt = SSL_OPT_COMPATENVVARS;
+ }
+ else if (strcEQ(w, "ExportCertData")) {
+ opt = SSL_OPT_EXPORTCERTDATA;
+ }
+ else if (strcEQ(w, "FakeBasicAuth")) {
+ opt = SSL_OPT_FAKEBASICAUTH;
+ }
+ else if (strcEQ(w, "StrictRequire")) {
+ opt = SSL_OPT_STRICTREQUIRE;
+ }
+ else if (strcEQ(w, "OptRenegotiate")) {
+ opt = SSL_OPT_OPTRENEGOTIATE;
+ }
+ else {
+ return apr_pstrcat(cmd->pool,
+ "SSLOptions: Illegal option '", w, "'",
+ NULL);
+ }
+
+ if (action == '-') {
+ dc->nOptionsAdd &= ~opt;
+ dc->nOptionsDel |= opt;
+ dc->nOptions &= ~opt;
+ }
+ else if (action == '+') {
+ dc->nOptionsAdd |= opt;
+ dc->nOptionsDel &= ~opt;
+ dc->nOptions |= opt;
+ }
+ else {
+ dc->nOptions = opt;
+ dc->nOptionsAdd = opt;
+ dc->nOptionsDel = SSL_OPT_NONE;
+ }
+ }
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLRequireSSL(cmd_parms *cmd, void *dcfg)
+{
+ SSLDirConfigRec *dc = (SSLDirConfigRec *)dcfg;
+
+ dc->bSSLRequired = TRUE;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLRequire(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLDirConfigRec *dc = (SSLDirConfigRec *)dcfg;
+ ssl_expr *expr;
+ ssl_require_t *require;
+
+ if (!(expr = ssl_expr_comp(cmd->pool, (char *)arg))) {
+ return apr_pstrcat(cmd->pool, "SSLRequire: ",
+ ssl_expr_get_error(), NULL);
+ }
+
+ require = apr_array_push(dc->aRequirement);
+ require->cpExpr = apr_pstrdup(cmd->pool, arg);
+ require->mpExpr = expr;
+
+ return NULL;
+}
+
+static const char *ssl_cmd_protocol_parse(cmd_parms *parms,
+ const char *arg,
+ ssl_proto_t *options)
+{
+ ssl_proto_t thisopt;
+
+ *options = SSL_PROTOCOL_NONE;
+
+ while (*arg) {
+ char *w = ap_getword_conf(parms->temp_pool, &arg);
+ char action = '\0';
+
+ if ((*w == '+') || (*w == '-')) {
+ action = *(w++);
+ }
+
+ if (strcEQ(w, "SSLv2")) {
+ thisopt = SSL_PROTOCOL_SSLV2;
+ }
+ else if (strcEQ(w, "SSLv3")) {
+ thisopt = SSL_PROTOCOL_SSLV3;
+ }
+ else if (strcEQ(w, "TLSv1")) {
+ thisopt = SSL_PROTOCOL_TLSV1;
+ }
+ else if (strcEQ(w, "all")) {
+ thisopt = SSL_PROTOCOL_ALL;
+ }
+ else {
+ return apr_pstrcat(parms->temp_pool,
+ parms->cmd->name,
+ ": Illegal protocol '",
+ w, "'", NULL);
+ }
+
+ if (action == '-') {
+ *options &= ~thisopt;
+ }
+ else if (action == '+') {
+ *options |= thisopt;
+ }
+ else {
+ *options = thisopt;
+ }
+ }
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLProtocol(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+
+ return ssl_cmd_protocol_parse(cmd, arg, &sc->server->protocol);
+}
+
+const char *ssl_cmd_SSLProxyEngine(cmd_parms *cmd, void *dcfg, int flag)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+
+ sc->proxy_enabled = flag ? TRUE : FALSE;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLProxyProtocol(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+
+ return ssl_cmd_protocol_parse(cmd, arg, &sc->proxy->protocol);
+}
+
+const char *ssl_cmd_SSLProxyCipherSuite(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+
+ sc->proxy->auth.cipher_suite = arg;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLProxyVerify(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ ssl_verify_t mode;
+ const char *err;
+
+ if ((err = ssl_cmd_verify_parse(cmd, arg, &mode))) {
+ return err;
+ }
+
+ sc->proxy->auth.verify_mode = mode;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLProxyVerifyDepth(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ int depth;
+ const char *err;
+
+ if ((err = ssl_cmd_verify_depth_parse(cmd, arg, &depth))) {
+ return err;
+ }
+
+ sc->proxy->auth.verify_depth = depth;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLProxyCACertificateFile(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ const char *err;
+
+ if ((err = ssl_cmd_check_file(cmd, &arg))) {
+ return err;
+ }
+
+ sc->proxy->auth.ca_cert_file = arg;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLProxyCACertificatePath(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ const char *err;
+
+ if ((err = ssl_cmd_check_dir(cmd, &arg))) {
+ return err;
+ }
+
+ sc->proxy->auth.ca_cert_path = arg;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLProxyCARevocationPath(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ const char *err;
+
+ if ((err = ssl_cmd_check_dir(cmd, &arg))) {
+ return err;
+ }
+
+ sc->proxy->crl_path = arg;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLProxyCARevocationFile(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ const char *err;
+
+ if ((err = ssl_cmd_check_file(cmd, &arg))) {
+ return err;
+ }
+
+ sc->proxy->crl_file = arg;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLProxyMachineCertificateFile(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ const char *err;
+
+ if ((err = ssl_cmd_check_file(cmd, &arg))) {
+ return err;
+ }
+
+ sc->proxy->pkp->cert_file = arg;
+
+ return NULL;
+}
+
+const char *ssl_cmd_SSLProxyMachineCertificatePath(cmd_parms *cmd,
+ void *dcfg,
+ const char *arg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+ const char *err;
+
+ if ((err = ssl_cmd_check_dir(cmd, &arg))) {
+ return err;
+ }
+
+ sc->proxy->pkp->cert_path = arg;
+
+ return NULL;
+}
+
+
+const char *ssl_cmd_SSLUserName(cmd_parms *cmd, void *dcfg,
+ const char *arg)
+{
+ SSLDirConfigRec *dc = (SSLDirConfigRec *)dcfg;
+ dc->szUserName = arg;
+ return NULL;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_dh.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_dh.c
new file mode 100644
index 00000000..ec66f050
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_dh.c
@@ -0,0 +1,207 @@
+#if 0
+=pod
+#endif
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_engine_dh.c
+ * Diffie-Hellman Built-in Temporary Parameters
+ */
+
+#include "mod_ssl.h"
+
+/* ----BEGIN GENERATED SECTION-------- */
+
+/*
+** Diffie-Hellman-Parameters: (512 bit)
+** prime:
+** 00:d4:bc:d5:24:06:f6:9b:35:99:4b:88:de:5d:b8:
+** 96:82:c8:15:7f:62:d8:f3:36:33:ee:57:72:f1:1f:
+** 05:ab:22:d6:b5:14:5b:9f:24:1e:5a:cc:31:ff:09:
+** 0a:4b:c7:11:48:97:6f:76:79:50:94:e7:1e:79:03:
+** 52:9f:5a:82:4b
+** generator: 2 (0x2)
+** Diffie-Hellman-Parameters: (1024 bit)
+** prime:
+** 00:e6:96:9d:3d:49:5b:e3:2c:7c:f1:80:c3:bd:d4:
+** 79:8e:91:b7:81:82:51:bb:05:5e:2a:20:64:90:4a:
+** 79:a7:70:fa:15:a2:59:cb:d5:23:a6:a6:ef:09:c4:
+** 30:48:d5:a2:2f:97:1f:3c:20:12:9b:48:00:0e:6e:
+** dd:06:1c:bc:05:3e:37:1d:79:4e:53:27:df:61:1e:
+** bb:be:1b:ac:9b:5c:60:44:cf:02:3d:76:e0:5e:ea:
+** 9b:ad:99:1b:13:a6:3c:97:4e:9e:f1:83:9e:b5:db:
+** 12:51:36:f7:26:2e:56:a8:87:15:38:df:d8:23:c6:
+** 50:50:85:e2:1f:0d:d5:c8:6b
+** generator: 2 (0x2)
+*/
+
+static unsigned char dh512_p[] =
+{
+ 0xD4, 0xBC, 0xD5, 0x24, 0x06, 0xF6, 0x9B, 0x35, 0x99, 0x4B, 0x88, 0xDE,
+ 0x5D, 0xB8, 0x96, 0x82, 0xC8, 0x15, 0x7F, 0x62, 0xD8, 0xF3, 0x36, 0x33,
+ 0xEE, 0x57, 0x72, 0xF1, 0x1F, 0x05, 0xAB, 0x22, 0xD6, 0xB5, 0x14, 0x5B,
+ 0x9F, 0x24, 0x1E, 0x5A, 0xCC, 0x31, 0xFF, 0x09, 0x0A, 0x4B, 0xC7, 0x11,
+ 0x48, 0x97, 0x6F, 0x76, 0x79, 0x50, 0x94, 0xE7, 0x1E, 0x79, 0x03, 0x52,
+ 0x9F, 0x5A, 0x82, 0x4B,
+};
+static unsigned char dh512_g[] =
+{
+ 0x02,
+};
+
+static DH *get_dh512(void)
+{
+ return modssl_dh_configure(dh512_p, sizeof(dh512_p),
+ dh512_g, sizeof(dh512_g));
+}
+
+static unsigned char dh1024_p[] =
+{
+ 0xE6, 0x96, 0x9D, 0x3D, 0x49, 0x5B, 0xE3, 0x2C, 0x7C, 0xF1, 0x80, 0xC3,
+ 0xBD, 0xD4, 0x79, 0x8E, 0x91, 0xB7, 0x81, 0x82, 0x51, 0xBB, 0x05, 0x5E,
+ 0x2A, 0x20, 0x64, 0x90, 0x4A, 0x79, 0xA7, 0x70, 0xFA, 0x15, 0xA2, 0x59,
+ 0xCB, 0xD5, 0x23, 0xA6, 0xA6, 0xEF, 0x09, 0xC4, 0x30, 0x48, 0xD5, 0xA2,
+ 0x2F, 0x97, 0x1F, 0x3C, 0x20, 0x12, 0x9B, 0x48, 0x00, 0x0E, 0x6E, 0xDD,
+ 0x06, 0x1C, 0xBC, 0x05, 0x3E, 0x37, 0x1D, 0x79, 0x4E, 0x53, 0x27, 0xDF,
+ 0x61, 0x1E, 0xBB, 0xBE, 0x1B, 0xAC, 0x9B, 0x5C, 0x60, 0x44, 0xCF, 0x02,
+ 0x3D, 0x76, 0xE0, 0x5E, 0xEA, 0x9B, 0xAD, 0x99, 0x1B, 0x13, 0xA6, 0x3C,
+ 0x97, 0x4E, 0x9E, 0xF1, 0x83, 0x9E, 0xB5, 0xDB, 0x12, 0x51, 0x36, 0xF7,
+ 0x26, 0x2E, 0x56, 0xA8, 0x87, 0x15, 0x38, 0xDF, 0xD8, 0x23, 0xC6, 0x50,
+ 0x50, 0x85, 0xE2, 0x1F, 0x0D, 0xD5, 0xC8, 0x6B,
+};
+static unsigned char dh1024_g[] =
+{
+ 0x02,
+};
+
+static DH *get_dh1024(void)
+{
+ return modssl_dh_configure(dh1024_p, sizeof(dh1024_p),
+ dh1024_g, sizeof(dh1024_g));
+}
+/* ----END GENERATED SECTION---------- */
+
+DH *ssl_dh_GetTmpParam(int nKeyLen)
+{
+ DH *dh;
+
+ if (nKeyLen == 512)
+ dh = get_dh512();
+ else if (nKeyLen == 1024)
+ dh = get_dh1024();
+ else
+ dh = get_dh1024();
+ return dh;
+}
+
+DH *ssl_dh_GetParamFromFile(char *file)
+{
+ DH *dh = NULL;
+ BIO *bio;
+
+ if ((bio = BIO_new_file(file, "r")) == NULL)
+ return NULL;
+#if SSL_LIBRARY_VERSION < 0x00904000
+ dh = PEM_read_bio_DHparams(bio, NULL, NULL);
+#else
+ dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL);
+#endif
+ BIO_free(bio);
+ return (dh);
+}
+
+/*
+=cut
+##
+## Embedded Perl script for generating the temporary DH parameters
+##
+
+require 5.003;
+use strict;
+
+# configuration
+my $file = $0;
+my $begin = '----BEGIN GENERATED SECTION--------';
+my $end = '----END GENERATED SECTION----------';
+
+# read ourself and keep a backup
+open(FP, "<$file") || die;
+my $source = '';
+$source .= $_ while (<FP>);
+close(FP);
+open(FP, ">$file.bak") || die;
+print FP $source;
+close(FP);
+
+# generate the DH parameters
+print "1. Generate 512 and 1024 bit Diffie-Hellman parameters (p, g)\n";
+my $rand = '';
+foreach $file (qw(/var/log/messages /var/adm/messages
+ /kernel /vmunix /vmlinuz /etc/hosts /etc/resolv.conf)) {
+ if (-f $file) {
+ $rand = $file if ($rand eq '');
+ $rand .= ":$file" if ($rand ne '');
+ }
+}
+$rand = "-rand $rand" if ($rand ne '');
+system("openssl gendh $rand -out dh512.pem 512");
+system("openssl gendh $rand -out dh1024.pem 1024");
+
+# generate DH param info
+my $dhinfo = '';
+open(FP, "openssl dh -noout -text -in dh512.pem |") || die;
+$dhinfo .= $_ while (<FP>);
+close(FP);
+open(FP, "openssl dh -noout -text -in dh1024.pem |") || die;
+$dhinfo .= $_ while (<FP>);
+close(FP);
+$dhinfo =~ s|^|** |mg;
+$dhinfo = "\n\/\*\n$dhinfo\*\/\n\n";
+
+# generate C source from DH params
+my $dhsource = '';
+open(FP, "openssl dh -noout -C -in dh512.pem | indent | expand |") || die;
+$dhsource .= $_ while (<FP>);
+close(FP);
+open(FP, "openssl dh -noout -C -in dh1024.pem | indent | expand |") || die;
+$dhsource .= $_ while (<FP>);
+close(FP);
+$dhsource =~ s|(DH\s+\*get_dh)|static $1|sg;
+
+# generate output
+my $o = $dhinfo . $dhsource;
+
+# insert the generated code at the target location
+$source =~ s|(\/\* $begin.+?\n).*\n(.*?\/\* $end)|$1$o$2|s;
+
+# and update the source on disk
+print "Updating file `$file'\n";
+open(FP, ">$file") || die;
+print FP $source;
+close(FP);
+
+# cleanup
+unlink("dh512.pem");
+unlink("dh1024.pem");
+
+=pod
+*/
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_init.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_init.c
new file mode 100644
index 00000000..92c3395d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_init.c
@@ -0,0 +1,1243 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_engine_init.c
+ * Initialization of Servers
+ */
+ /* ``Recursive, adj.;
+ see Recursive.''
+ -- Unknown */
+#include "mod_ssl.h"
+
+/* _________________________________________________________________
+**
+** Module Initialization
+** _________________________________________________________________
+*/
+
+
+static void ssl_add_version_components(apr_pool_t *p,
+ server_rec *s)
+{
+ char *modver = ssl_var_lookup(p, s, NULL, NULL, "SSL_VERSION_INTERFACE");
+ char *libver = ssl_var_lookup(p, s, NULL, NULL, "SSL_VERSION_LIBRARY");
+ char *incver = ssl_var_lookup(p, s, NULL, NULL,
+ "SSL_VERSION_LIBRARY_INTERFACE");
+
+ ap_add_version_component(p, modver);
+ ap_add_version_component(p, libver);
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "%s compiled against Server: %s, Library: %s",
+ modver, AP_SERVER_BASEVERSION, incver);
+}
+
+
+/*
+ * Initialize SSL library
+ */
+static void ssl_init_SSLLibrary(server_rec *s)
+{
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Init: Initializing %s library", SSL_LIBRARY_NAME);
+
+ SSL_load_error_strings();
+ SSL_library_init();
+ OpenSSL_add_all_algorithms(); /* Required for eg SHA256 client certs */
+}
+
+/*
+ * Handle the Temporary RSA Keys and DH Params
+ */
+
+#define MODSSL_TMP_KEY_FREE(mc, type, idx) \
+ if (mc->pTmpKeys[idx]) { \
+ type##_free((type *)mc->pTmpKeys[idx]); \
+ mc->pTmpKeys[idx] = NULL; \
+ }
+
+#define MODSSL_TMP_KEYS_FREE(mc, type) \
+ MODSSL_TMP_KEY_FREE(mc, type, SSL_TMP_KEY_##type##_512); \
+ MODSSL_TMP_KEY_FREE(mc, type, SSL_TMP_KEY_##type##_1024)
+
+static void ssl_tmp_keys_free(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+
+ MODSSL_TMP_KEYS_FREE(mc, RSA);
+ MODSSL_TMP_KEYS_FREE(mc, DH);
+}
+
+static int ssl_tmp_key_init_rsa(server_rec *s,
+ int bits, int idx)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+
+ if (!(mc->pTmpKeys[idx] =
+ RSA_generate_key(bits, RSA_F4, NULL, NULL)))
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Init: Failed to generate temporary "
+ "%d bit RSA private key", bits);
+ return !OK;
+ }
+
+ return OK;
+}
+
+static int ssl_tmp_key_init_dh(server_rec *s,
+ int bits, int idx)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+
+ if (!(mc->pTmpKeys[idx] =
+ ssl_dh_GetTmpParam(bits)))
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Init: Failed to generate temporary "
+ "%d bit DH parameters", bits);
+ return !OK;
+ }
+
+ return OK;
+}
+
+#define MODSSL_TMP_KEY_INIT_RSA(s, bits) \
+ ssl_tmp_key_init_rsa(s, bits, SSL_TMP_KEY_RSA_##bits)
+
+#define MODSSL_TMP_KEY_INIT_DH(s, bits) \
+ ssl_tmp_key_init_dh(s, bits, SSL_TMP_KEY_DH_##bits)
+
+static int ssl_tmp_keys_init(server_rec *s)
+{
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Init: Generating temporary RSA private keys (512/1024 bits)");
+
+ if (MODSSL_TMP_KEY_INIT_RSA(s, 512) ||
+ MODSSL_TMP_KEY_INIT_RSA(s, 1024)) {
+ return !OK;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Init: Generating temporary DH parameters (512/1024 bits)");
+
+ if (MODSSL_TMP_KEY_INIT_DH(s, 512) ||
+ MODSSL_TMP_KEY_INIT_DH(s, 1024)) {
+ return !OK;
+ }
+
+ return OK;
+}
+
+/*
+ * Per-module initialization
+ */
+int ssl_init_Module(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp,
+ server_rec *base_server)
+{
+ SSLModConfigRec *mc = myModConfig(base_server);
+ SSLSrvConfigRec *sc;
+ server_rec *s;
+
+ /*
+ * Let us cleanup on restarts and exists
+ */
+ apr_pool_cleanup_register(p, base_server,
+ ssl_init_ModuleKill,
+ apr_pool_cleanup_null);
+
+ /*
+ * Any init round fixes the global config
+ */
+ ssl_config_global_create(base_server); /* just to avoid problems */
+ ssl_config_global_fix(mc);
+
+ /*
+ * try to fix the configuration and open the dedicated SSL
+ * logfile as early as possible
+ */
+ for (s = base_server; s; s = s->next) {
+ sc = mySrvConfig(s);
+
+ if (sc->server) {
+ sc->server->sc = sc;
+ }
+
+ if (sc->proxy) {
+ sc->proxy->sc = sc;
+ }
+
+ /*
+ * Create the server host:port string because we need it a lot
+ */
+ sc->vhost_id = ssl_util_vhostid(p, s);
+ sc->vhost_id_len = strlen(sc->vhost_id);
+
+ /* Fix up stuff that may not have been set */
+ if (sc->enabled == UNSET) {
+ sc->enabled = FALSE;
+ }
+
+ if (sc->proxy_enabled == UNSET) {
+ sc->proxy_enabled = FALSE;
+ }
+
+ if (sc->session_cache_timeout == UNSET) {
+ sc->session_cache_timeout = SSL_SESSION_CACHE_TIMEOUT;
+ }
+
+ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_UNSET) {
+ sc->server->pphrase_dialog_type = SSL_PPTYPE_BUILTIN;
+ }
+
+ }
+
+#if APR_HAS_THREADS
+ ssl_util_thread_setup(p);
+#endif
+
+ /*
+ * SSL external crypto device ("engine") support
+ */
+#ifdef SSL_EXPERIMENTAL_ENGINE
+ ssl_init_Engine(base_server, p);
+#endif
+
+ ssl_init_SSLLibrary(base_server);
+
+ /*
+ * Seed the Pseudo Random Number Generator (PRNG)
+ * only need ptemp here; nothing inside allocated from the pool
+ * needs to live once we return from ssl_rand_seed().
+ */
+ ssl_rand_seed(base_server, ptemp, SSL_RSCTX_STARTUP, "Init: ");
+
+ /*
+ * read server private keys/public certs into memory.
+ * decrypting any encrypted keys via configured SSLPassPhraseDialogs
+ * anything that needs to live longer than ptemp needs to also survive
+ * restarts, in which case they'll live inside s->process->pool.
+ */
+ ssl_pphrase_Handle(base_server, ptemp);
+
+ if (ssl_tmp_keys_init(base_server)) {
+ return !OK;
+ }
+
+ /*
+ * initialize the mutex handling
+ */
+ if (!ssl_mutex_init(base_server, p)) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /*
+ * initialize session caching
+ */
+ ssl_scache_init(base_server, p);
+
+ /*
+ * initialize servers
+ */
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, base_server,
+ "Init: Initializing (virtual) servers for SSL");
+
+ for (s = base_server; s; s = s->next) {
+ sc = mySrvConfig(s);
+ /*
+ * Either now skip this server when SSL is disabled for
+ * it or give out some information about what we're
+ * configuring.
+ */
+
+ /*
+ * Read the server certificate and key
+ */
+ ssl_init_ConfigureServer(s, p, ptemp, sc);
+ }
+
+ /*
+ * Configuration consistency checks
+ */
+ ssl_init_CheckServers(base_server, ptemp);
+
+ /*
+ * Announce mod_ssl and SSL library in HTTP Server field
+ * as ``mod_ssl/X.X.X OpenSSL/X.X.X''
+ */
+ ssl_add_version_components(p, base_server);
+
+ SSL_init_app_data2_idx(); /* for SSL_get_app_data2() at request time */
+
+ return OK;
+}
+
+/*
+ * Support for external a Crypto Device ("engine"), usually
+ * a hardware accellerator card for crypto operations.
+ */
+#ifdef SSL_EXPERIMENTAL_ENGINE
+void ssl_init_Engine(server_rec *s, apr_pool_t *p)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ ENGINE *e;
+
+ if (mc->szCryptoDevice) {
+ if (!(e = ENGINE_by_id(mc->szCryptoDevice))) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Init: Failed to load Crypto Device API `%s'",
+ mc->szCryptoDevice);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+
+ if (strEQ(mc->szCryptoDevice, "chil")) {
+ ENGINE_ctrl(e, ENGINE_CTRL_CHIL_SET_FORKCHECK, 1, 0, 0);
+ }
+
+ if (!ENGINE_set_default(e, ENGINE_METHOD_ALL)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Init: Failed to enable Crypto Device API `%s'",
+ mc->szCryptoDevice);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+
+ ENGINE_free(e);
+ }
+}
+#endif
+
+static void ssl_init_server_check(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ modssl_ctx_t *mctx)
+{
+ /*
+ * check for important parameters and the
+ * possibility that the user forgot to set them.
+ */
+ if (!mctx->pks->cert_files[0]) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "No SSL Certificate set [hint: SSLCertificateFile]");
+ ssl_die();
+ }
+
+ /*
+ * Check for problematic re-initializations
+ */
+ if (mctx->pks->certs[SSL_AIDX_RSA] ||
+ mctx->pks->certs[SSL_AIDX_DSA])
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Illegal attempt to re-initialise SSL for server "
+ "(theoretically shouldn't happen!)");
+ ssl_die();
+ }
+}
+
+static void ssl_init_ctx_protocol(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ modssl_ctx_t *mctx)
+{
+ SSL_CTX *ctx = NULL;
+ SSL_METHOD *method = NULL;
+ char *cp;
+ int protocol = mctx->protocol;
+ SSLSrvConfigRec *sc = mySrvConfig(s);
+
+ /*
+ * Create the new per-server SSL context
+ */
+ if (protocol == SSL_PROTOCOL_NONE) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "No SSL protocols available [hint: SSLProtocol]");
+ ssl_die();
+ }
+
+ cp = apr_pstrcat(p,
+ (protocol & SSL_PROTOCOL_SSLV2 ? "SSLv2, " : ""),
+ (protocol & SSL_PROTOCOL_SSLV3 ? "SSLv3, " : ""),
+ (protocol & SSL_PROTOCOL_TLSV1 ? "TLSv1, " : ""),
+ NULL);
+ cp[strlen(cp)-2] = NUL;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Creating new SSL context (protocols: %s)", cp);
+
+ if (protocol == SSL_PROTOCOL_SSLV2) {
+ method = mctx->pkp ?
+ SSLv2_client_method() : /* proxy */
+ SSLv2_server_method(); /* server */
+ ctx = SSL_CTX_new(method); /* only SSLv2 is left */
+ }
+ else {
+ method = mctx->pkp ?
+ SSLv23_client_method() : /* proxy */
+ SSLv23_server_method(); /* server */
+ ctx = SSL_CTX_new(method); /* be more flexible */
+ }
+
+ mctx->ssl_ctx = ctx;
+
+ SSL_CTX_set_options(ctx, SSL_OP_ALL);
+
+ if (!(protocol & SSL_PROTOCOL_SSLV2)) {
+ SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv2);
+ }
+
+ if (!(protocol & SSL_PROTOCOL_SSLV3)) {
+ SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv3);
+ }
+
+ if (!(protocol & SSL_PROTOCOL_TLSV1)) {
+ SSL_CTX_set_options(ctx, SSL_OP_NO_TLSv1);
+ }
+
+#ifdef SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION
+ if (sc->insecure_reneg == TRUE) {
+ SSL_CTX_set_options(ctx, SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION);
+ }
+#endif
+
+ SSL_CTX_set_app_data(ctx, s);
+
+ /*
+ * Configure additional context ingredients
+ */
+ SSL_CTX_set_options(ctx, SSL_OP_SINGLE_DH_USE);
+
+#ifdef SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION
+ /*
+ * Disallow a session from being resumed during a renegotiation,
+ * so that an acceptable cipher suite can be negotiated.
+ */
+ SSL_CTX_set_options(ctx, SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION);
+#endif
+}
+
+static void ssl_init_ctx_session_cache(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ modssl_ctx_t *mctx)
+{
+ SSL_CTX *ctx = mctx->ssl_ctx;
+ SSLModConfigRec *mc = myModConfig(s);
+ long cache_mode = SSL_SESS_CACHE_OFF;
+
+ if (mc->nSessionCacheMode != SSL_SCMODE_NONE) {
+ /* SSL_SESS_CACHE_NO_INTERNAL will force OpenSSL
+ * to ignore process local-caching and
+ * to always get/set/delete sessions using mod_ssl's callbacks.
+ */
+ cache_mode = SSL_SESS_CACHE_SERVER|SSL_SESS_CACHE_NO_INTERNAL;
+ }
+
+ SSL_CTX_set_session_cache_mode(ctx, cache_mode);
+
+ SSL_CTX_sess_set_new_cb(ctx, ssl_callback_NewSessionCacheEntry);
+ SSL_CTX_sess_set_get_cb(ctx, ssl_callback_GetSessionCacheEntry);
+ SSL_CTX_sess_set_remove_cb(ctx, ssl_callback_DelSessionCacheEntry);
+}
+
+static void ssl_init_ctx_callbacks(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ modssl_ctx_t *mctx)
+{
+ SSL_CTX *ctx = mctx->ssl_ctx;
+
+ SSL_CTX_set_tmp_rsa_callback(ctx, ssl_callback_TmpRSA);
+ SSL_CTX_set_tmp_dh_callback(ctx, ssl_callback_TmpDH);
+
+ SSL_CTX_set_info_callback(ctx, ssl_callback_Info);
+}
+
+static void ssl_init_ctx_verify(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ modssl_ctx_t *mctx)
+{
+ SSL_CTX *ctx = mctx->ssl_ctx;
+
+ int verify = SSL_VERIFY_NONE;
+ STACK_OF(X509_NAME) *ca_list;
+
+ if (mctx->auth.verify_mode == SSL_CVERIFY_UNSET) {
+ mctx->auth.verify_mode = SSL_CVERIFY_NONE;
+ }
+
+ if (mctx->auth.verify_depth == UNSET) {
+ mctx->auth.verify_depth = 1;
+ }
+
+ /*
+ * Configure callbacks for SSL context
+ */
+ if (mctx->auth.verify_mode == SSL_CVERIFY_REQUIRE) {
+ verify |= SSL_VERIFY_PEER_STRICT;
+ }
+
+ if ((mctx->auth.verify_mode == SSL_CVERIFY_OPTIONAL) ||
+ (mctx->auth.verify_mode == SSL_CVERIFY_OPTIONAL_NO_CA))
+ {
+ verify |= SSL_VERIFY_PEER;
+ }
+
+ SSL_CTX_set_verify(ctx, verify, ssl_callback_SSLVerify);
+
+ /*
+ * Configure Client Authentication details
+ */
+ if (mctx->auth.ca_cert_file || mctx->auth.ca_cert_path) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Configuring client authentication");
+
+ if (!SSL_CTX_load_verify_locations(ctx,
+ MODSSL_PCHAR_CAST mctx->auth.ca_cert_file,
+ MODSSL_PCHAR_CAST mctx->auth.ca_cert_path))
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Unable to configure verify locations "
+ "for client authentication");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+
+ ca_list = ssl_init_FindCAList(s, ptemp,
+ mctx->auth.ca_cert_file,
+ mctx->auth.ca_cert_path);
+ if (!ca_list) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Unable to determine list of available "
+ "CA certificates for client authentication");
+ ssl_die();
+ }
+
+ SSL_CTX_set_client_CA_list(ctx, (STACK *)ca_list);
+ }
+
+ /*
+ * Give a warning when no CAs were configured but client authentication
+ * should take place. This cannot work.
+ */
+ if (mctx->auth.verify_mode == SSL_CVERIFY_REQUIRE) {
+ ca_list = (STACK_OF(X509_NAME) *)SSL_CTX_get_client_CA_list(ctx);
+
+ if (sk_X509_NAME_num(ca_list) == 0) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "Init: Oops, you want to request client "
+ "authentication, but no CAs are known for "
+ "verification!? [Hint: SSLCACertificate*]");
+ }
+ }
+}
+
+static void ssl_init_ctx_cipher_suite(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ modssl_ctx_t *mctx)
+{
+ SSL_CTX *ctx = mctx->ssl_ctx;
+ const char *suite = mctx->auth.cipher_suite;
+
+ /*
+ * Configure SSL Cipher Suite
+ */
+ if (!suite) {
+ return;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Configuring permitted SSL ciphers [%s]",
+ suite);
+
+ if (!SSL_CTX_set_cipher_list(ctx, MODSSL_PCHAR_CAST suite)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Unable to configure permitted SSL ciphers");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+}
+
+static void ssl_init_ctx_crl(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ modssl_ctx_t *mctx)
+{
+ /*
+ * Configure Certificate Revocation List (CRL) Details
+ */
+
+ if (!(mctx->crl_file || mctx->crl_path)) {
+ return;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Configuring certificate revocation facility");
+
+ mctx->crl =
+ SSL_X509_STORE_create((char *)mctx->crl_file,
+ (char *)mctx->crl_path);
+
+ if (!mctx->crl) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Unable to configure X.509 CRL storage "
+ "for certificate revocation");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+}
+
+static void ssl_init_ctx_cert_chain(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ modssl_ctx_t *mctx)
+{
+ BOOL skip_first = FALSE;
+ int i, n;
+ const char *chain = mctx->cert_chain;
+
+ /*
+ * Optionally configure extra server certificate chain certificates.
+ * This is usually done by OpenSSL automatically when one of the
+ * server cert issuers are found under SSLCACertificatePath or in
+ * SSLCACertificateFile. But because these are intended for client
+ * authentication it can conflict. For instance when you use a
+ * Global ID server certificate you've to send out the intermediate
+ * CA certificate, too. When you would just configure this with
+ * SSLCACertificateFile and also use client authentication mod_ssl
+ * would accept all clients also issued by this CA. Obviously this
+ * isn't what we want in this situation. So this feature here exists
+ * to allow one to explicity configure CA certificates which are
+ * used only for the server certificate chain.
+ */
+ if (!chain) {
+ return;
+ }
+
+ for (i = 0; (i < SSL_AIDX_MAX) && mctx->pks->cert_files[i]; i++) {
+ if (strEQ(mctx->pks->cert_files[i], chain)) {
+ skip_first = TRUE;
+ break;
+ }
+ }
+
+ n = SSL_CTX_use_certificate_chain(mctx->ssl_ctx,
+ (char *)chain,
+ skip_first, NULL);
+ if (n < 0) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Failed to configure CA certificate chain!");
+ ssl_die();
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Configuring server certificate chain "
+ "(%d CA certificate%s)",
+ n, n == 1 ? "" : "s");
+}
+
+static void ssl_init_ctx(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ modssl_ctx_t *mctx)
+{
+ ssl_init_ctx_protocol(s, p, ptemp, mctx);
+
+ ssl_init_ctx_session_cache(s, p, ptemp, mctx);
+
+ ssl_init_ctx_callbacks(s, p, ptemp, mctx);
+
+ ssl_init_ctx_verify(s, p, ptemp, mctx);
+
+ ssl_init_ctx_cipher_suite(s, p, ptemp, mctx);
+
+ ssl_init_ctx_crl(s, p, ptemp, mctx);
+
+ if (mctx->pks) {
+ /* XXX: proxy support? */
+ ssl_init_ctx_cert_chain(s, p, ptemp, mctx);
+ }
+}
+
+static int ssl_server_import_cert(server_rec *s,
+ modssl_ctx_t *mctx,
+ const char *id,
+ int idx)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ ssl_asn1_t *asn1;
+ MODSSL_D2I_X509_CONST unsigned char *ptr;
+ const char *type = ssl_asn1_keystr(idx);
+ X509 *cert;
+
+ if (!(asn1 = ssl_asn1_table_get(mc->tPublicCert, id))) {
+ return FALSE;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Configuring %s server certificate", type);
+
+ ptr = asn1->cpData;
+ if (!(cert = d2i_X509(NULL, &ptr, asn1->nData))) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Unable to import %s server certificate", type);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+
+ if (SSL_CTX_use_certificate(mctx->ssl_ctx, cert) <= 0) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Unable to configure %s server certificate", type);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+
+ mctx->pks->certs[idx] = cert;
+
+ return TRUE;
+}
+
+static int ssl_server_import_key(server_rec *s,
+ modssl_ctx_t *mctx,
+ const char *id,
+ int idx)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ ssl_asn1_t *asn1;
+ MODSSL_D2I_PrivateKey_CONST unsigned char *ptr;
+ const char *type = ssl_asn1_keystr(idx);
+ int pkey_type = (idx == SSL_AIDX_RSA) ? EVP_PKEY_RSA : EVP_PKEY_DSA;
+ EVP_PKEY *pkey;
+
+ if (!(asn1 = ssl_asn1_table_get(mc->tPrivateKey, id))) {
+ return FALSE;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Configuring %s server private key", type);
+
+ ptr = asn1->cpData;
+ if (!(pkey = d2i_PrivateKey(pkey_type, NULL, &ptr, asn1->nData)))
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Unable to import %s server private key", type);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+
+ if (SSL_CTX_use_PrivateKey(mctx->ssl_ctx, pkey) <= 0) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Unable to configure %s server private key", type);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+
+ /*
+ * XXX: wonder if this is still needed, this is old todo doc.
+ * (see http://www.psy.uq.edu.au/~ftp/Crypto/ssleay/TODO.html)
+ */
+ if ((pkey_type == EVP_PKEY_DSA) && mctx->pks->certs[idx]) {
+ EVP_PKEY *pubkey = X509_get_pubkey(mctx->pks->certs[idx]);
+
+ if (pubkey && EVP_PKEY_missing_parameters(pubkey)) {
+ EVP_PKEY_copy_parameters(pubkey, pkey);
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Copying DSA parameters from private key to certificate");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ EVP_PKEY_free(pubkey);
+ }
+ }
+
+ mctx->pks->keys[idx] = pkey;
+
+ return TRUE;
+}
+
+static void ssl_check_public_cert(server_rec *s,
+ apr_pool_t *ptemp,
+ X509 *cert,
+ int type)
+{
+ int is_ca, pathlen;
+ char *cn;
+
+ if (!cert) {
+ return;
+ }
+
+ /*
+ * Some information about the certificate(s)
+ */
+
+ if (SSL_X509_isSGC(cert)) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "%s server certificate enables "
+ "Server Gated Cryptography (SGC)",
+ ssl_asn1_keystr(type));
+ }
+
+ if (SSL_X509_getBC(cert, &is_ca, &pathlen)) {
+ if (is_ca) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "%s server certificate is a CA certificate "
+ "(BasicConstraints: CA == TRUE !?)",
+ ssl_asn1_keystr(type));
+ }
+
+ if (pathlen > 0) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "%s server certificate is not a leaf certificate "
+ "(BasicConstraints: pathlen == %d > 0 !?)",
+ ssl_asn1_keystr(type), pathlen);
+ }
+ }
+
+ if (SSL_X509_getCN(ptemp, cert, &cn)) {
+ int fnm_flags = FNM_PERIOD|FNM_CASE_BLIND;
+
+ if (apr_fnmatch_test(cn) &&
+ (apr_fnmatch(cn, s->server_hostname,
+ fnm_flags) == FNM_NOMATCH))
+ {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "%s server certificate wildcard CommonName (CN) `%s' "
+ "does NOT match server name!?",
+ ssl_asn1_keystr(type), cn);
+ }
+ else if (strNE(s->server_hostname, cn)) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "%s server certificate CommonName (CN) `%s' "
+ "does NOT match server name!?",
+ ssl_asn1_keystr(type), cn);
+ }
+ }
+}
+
+static void ssl_init_server_certs(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ modssl_ctx_t *mctx)
+{
+ const char *rsa_id, *dsa_id;
+ const char *vhost_id = mctx->sc->vhost_id;
+ int i;
+ int have_rsa, have_dsa;
+
+ rsa_id = ssl_asn1_table_keyfmt(ptemp, vhost_id, SSL_AIDX_RSA);
+ dsa_id = ssl_asn1_table_keyfmt(ptemp, vhost_id, SSL_AIDX_DSA);
+
+ have_rsa = ssl_server_import_cert(s, mctx, rsa_id, SSL_AIDX_RSA);
+ have_dsa = ssl_server_import_cert(s, mctx, dsa_id, SSL_AIDX_DSA);
+
+ if (!(have_rsa || have_dsa)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Oops, no RSA or DSA server certificate found?!");
+ ssl_die();
+ }
+
+ for (i = 0; i < SSL_AIDX_MAX; i++) {
+ ssl_check_public_cert(s, ptemp, mctx->pks->certs[i], i);
+ }
+
+ have_rsa = ssl_server_import_key(s, mctx, rsa_id, SSL_AIDX_RSA);
+ have_dsa = ssl_server_import_key(s, mctx, dsa_id, SSL_AIDX_DSA);
+
+ if (!(have_rsa || have_dsa)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Oops, no RSA or DSA server private key found?!");
+ ssl_die();
+ }
+}
+
+static void ssl_init_proxy_certs(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ modssl_ctx_t *mctx)
+{
+ int n, ncerts = 0;
+ STACK_OF(X509_INFO) *sk;
+ modssl_pk_proxy_t *pkp = mctx->pkp;
+
+ SSL_CTX_set_client_cert_cb(mctx->ssl_ctx,
+ ssl_callback_proxy_cert);
+
+ if (!(pkp->cert_file || pkp->cert_path)) {
+ return;
+ }
+
+ sk = sk_X509_INFO_new_null();
+
+ if (pkp->cert_file) {
+ SSL_X509_INFO_load_file(ptemp, sk, pkp->cert_file);
+ }
+
+ if (pkp->cert_path) {
+ SSL_X509_INFO_load_path(ptemp, sk, pkp->cert_path);
+ }
+
+ if ((ncerts = sk_X509_INFO_num(sk)) <= 0) {
+ sk_X509_INFO_free(sk);
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "no client certs found for SSL proxy");
+ return;
+ }
+
+ /* Check that all client certs have got certificates and private
+ * keys. */
+ for (n = 0; n < ncerts; n++) {
+ X509_INFO *inf = sk_X509_INFO_value(sk, n);
+
+ if (!inf->x509 || !inf->x_pkey) {
+ sk_X509_INFO_free(sk);
+ ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, s,
+ "incomplete client cert configured for SSL proxy "
+ "(missing or encrypted private key?)");
+ ssl_die();
+ return;
+ }
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "loaded %d client certs for SSL proxy",
+ ncerts);
+ pkp->certs = sk;
+}
+
+static void ssl_init_proxy_ctx(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ SSLSrvConfigRec *sc)
+{
+ ssl_init_ctx(s, p, ptemp, sc->proxy);
+
+ ssl_init_proxy_certs(s, p, ptemp, sc->proxy);
+}
+
+static void ssl_init_server_ctx(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ SSLSrvConfigRec *sc)
+{
+ ssl_init_server_check(s, p, ptemp, sc->server);
+
+ ssl_init_ctx(s, p, ptemp, sc->server);
+
+ ssl_init_server_certs(s, p, ptemp, sc->server);
+}
+
+/*
+ * Configure a particular server
+ */
+void ssl_init_ConfigureServer(server_rec *s,
+ apr_pool_t *p,
+ apr_pool_t *ptemp,
+ SSLSrvConfigRec *sc)
+{
+ if (sc->enabled) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Configuring server for SSL protocol");
+ ssl_init_server_ctx(s, p, ptemp, sc);
+ }
+
+ if (sc->proxy_enabled) {
+ ssl_init_proxy_ctx(s, p, ptemp, sc);
+ }
+}
+
+void ssl_init_CheckServers(server_rec *base_server, apr_pool_t *p)
+{
+ server_rec *s, *ps;
+ SSLSrvConfigRec *sc;
+ apr_hash_t *table;
+ const char *key;
+ apr_ssize_t klen;
+
+ BOOL conflict = FALSE;
+
+ /*
+ * Give out warnings when a server has HTTPS configured
+ * for the HTTP port or vice versa
+ */
+ for (s = base_server; s; s = s->next) {
+ sc = mySrvConfig(s);
+
+ if (sc->enabled && (s->port == DEFAULT_HTTP_PORT)) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0,
+ base_server,
+ "Init: (%s) You configured HTTPS(%d) "
+ "on the standard HTTP(%d) port!",
+ ssl_util_vhostid(p, s),
+ DEFAULT_HTTPS_PORT, DEFAULT_HTTP_PORT);
+ }
+
+ if (!sc->enabled && (s->port == DEFAULT_HTTPS_PORT)) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0,
+ base_server,
+ "Init: (%s) You configured HTTP(%d) "
+ "on the standard HTTPS(%d) port!",
+ ssl_util_vhostid(p, s),
+ DEFAULT_HTTP_PORT, DEFAULT_HTTPS_PORT);
+ }
+ }
+
+ /*
+ * Give out warnings when more than one SSL-aware virtual server uses the
+ * same IP:port. This doesn't work because mod_ssl then will always use
+ * just the certificate/keys of one virtual host (which one cannot be said
+ * easily - but that doesn't matter here).
+ */
+ table = apr_hash_make(p);
+
+ for (s = base_server; s; s = s->next) {
+ sc = mySrvConfig(s);
+
+ if (!(sc->enabled && s->addrs)) {
+ continue;
+ }
+
+ key = apr_psprintf(p, "%pA:%u",
+ &s->addrs->host_addr, s->addrs->host_port);
+ klen = strlen(key);
+
+ if ((ps = (server_rec *)apr_hash_get(table, key, klen))) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0,
+ base_server,
+ "Init: SSL server IP/port conflict: "
+ "%s (%s:%d) vs. %s (%s:%d)",
+ ssl_util_vhostid(p, s),
+ (s->defn_name ? s->defn_name : "unknown"),
+ s->defn_line_number,
+ ssl_util_vhostid(p, ps),
+ (ps->defn_name ? ps->defn_name : "unknown"),
+ ps->defn_line_number);
+ conflict = TRUE;
+ continue;
+ }
+
+ apr_hash_set(table, key, klen, s);
+ }
+
+ if (conflict) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, base_server,
+ "Init: You should not use name-based "
+ "virtual hosts in conjunction with SSL!!");
+ }
+}
+
+#ifdef SSLC_VERSION_NUMBER
+static int ssl_init_FindCAList_X509NameCmp(char **a, char **b)
+{
+ return(X509_NAME_cmp((void*)*a, (void*)*b));
+}
+#else
+static int ssl_init_FindCAList_X509NameCmp(X509_NAME **a, X509_NAME **b)
+{
+ return(X509_NAME_cmp(*a, *b));
+}
+#endif
+
+static void ssl_init_PushCAList(STACK_OF(X509_NAME) *ca_list,
+ server_rec *s, const char *file)
+{
+ int n;
+ STACK_OF(X509_NAME) *sk;
+
+ sk = (STACK_OF(X509_NAME) *)
+ SSL_load_client_CA_file(MODSSL_PCHAR_CAST file);
+
+ if (!sk) {
+ return;
+ }
+
+ for (n = 0; n < sk_X509_NAME_num(sk); n++) {
+ char name_buf[256];
+ X509_NAME *name = sk_X509_NAME_value(sk, n);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "CA certificate: %s",
+ X509_NAME_oneline(name, name_buf, sizeof(name_buf)));
+
+ /*
+ * note that SSL_load_client_CA_file() checks for duplicates,
+ * but since we call it multiple times when reading a directory
+ * we must also check for duplicates ourselves.
+ */
+
+ if (sk_X509_NAME_find(ca_list, name) < 0) {
+ /* this will be freed when ca_list is */
+ sk_X509_NAME_push(ca_list, name);
+ }
+ else {
+ /* need to free this ourselves, else it will leak */
+ X509_NAME_free(name);
+ }
+ }
+
+ sk_X509_NAME_free(sk);
+}
+
+STACK_OF(X509_NAME) *ssl_init_FindCAList(server_rec *s,
+ apr_pool_t *ptemp,
+ const char *ca_file,
+ const char *ca_path)
+{
+ STACK_OF(X509_NAME) *ca_list;
+
+ /*
+ * Start with a empty stack/list where new
+ * entries get added in sorted order.
+ */
+ ca_list = sk_X509_NAME_new(ssl_init_FindCAList_X509NameCmp);
+
+ /*
+ * Process CA certificate bundle file
+ */
+ if (ca_file) {
+ ssl_init_PushCAList(ca_list, s, ca_file);
+ }
+
+ /*
+ * Process CA certificate path files
+ */
+ if (ca_path) {
+ apr_dir_t *dir;
+ apr_finfo_t direntry;
+ apr_int32_t finfo_flags = APR_FINFO_TYPE|APR_FINFO_NAME;
+ apr_status_t rv;
+
+ if ((rv = apr_dir_open(&dir, ca_path, ptemp)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Failed to open SSLCACertificatePath `%s'",
+ ca_path);
+ ssl_die();
+ }
+
+ while ((apr_dir_read(&direntry, finfo_flags, dir)) == APR_SUCCESS) {
+ const char *file;
+ if (direntry.filetype == APR_DIR) {
+ continue; /* don't try to load directories */
+ }
+ file = apr_pstrcat(ptemp, ca_path, "/", direntry.name, NULL);
+ ssl_init_PushCAList(ca_list, s, file);
+ }
+
+ apr_dir_close(dir);
+ }
+
+ /*
+ * Cleanup
+ */
+ sk_X509_NAME_set_cmp_func(ca_list, NULL);
+
+ return ca_list;
+}
+
+void ssl_init_Child(apr_pool_t *p, server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ mc->pid = getpid(); /* only call getpid() once per-process */
+
+ /* XXX: there should be an ap_srand() function */
+ srand((unsigned int)time(NULL));
+
+ /* open the mutex lockfile */
+ ssl_mutex_reinit(s, p);
+}
+
+#define MODSSL_CFG_ITEM_FREE(func, item) \
+ if (item) { \
+ func(item); \
+ item = NULL; \
+ }
+
+static void ssl_init_ctx_cleanup(modssl_ctx_t *mctx)
+{
+ MODSSL_CFG_ITEM_FREE(X509_STORE_free, mctx->crl);
+
+ MODSSL_CFG_ITEM_FREE(SSL_CTX_free, mctx->ssl_ctx);
+}
+
+static void ssl_init_ctx_cleanup_proxy(modssl_ctx_t *mctx)
+{
+ ssl_init_ctx_cleanup(mctx);
+
+ if (mctx->pkp->certs) {
+ sk_X509_INFO_pop_free(mctx->pkp->certs, X509_INFO_free);
+ }
+}
+
+static void ssl_init_ctx_cleanup_server(modssl_ctx_t *mctx)
+{
+ int i;
+
+ ssl_init_ctx_cleanup(mctx);
+
+ for (i=0; i < SSL_AIDX_MAX; i++) {
+ MODSSL_CFG_ITEM_FREE(X509_free,
+ mctx->pks->certs[i]);
+
+ MODSSL_CFG_ITEM_FREE(EVP_PKEY_free,
+ mctx->pks->keys[i]);
+ }
+}
+
+apr_status_t ssl_init_ModuleKill(void *data)
+{
+ SSLSrvConfigRec *sc;
+ server_rec *base_server = (server_rec *)data;
+ server_rec *s;
+
+ /*
+ * Drop the session cache and mutex
+ */
+ ssl_scache_kill(base_server);
+
+ /*
+ * Destroy the temporary keys and params
+ */
+ ssl_tmp_keys_free(base_server);
+
+ /*
+ * Free the non-pool allocated structures
+ * in the per-server configurations
+ */
+ for (s = base_server; s; s = s->next) {
+ sc = mySrvConfig(s);
+
+ ssl_init_ctx_cleanup_proxy(sc->proxy);
+
+ ssl_init_ctx_cleanup_server(sc->server);
+ }
+
+ /*
+ * Try to kill the internals of the SSL library.
+ */
+ ERR_remove_state(0);
+ EVP_cleanup();
+
+ return APR_SUCCESS;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_io.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_io.c
new file mode 100644
index 00000000..c5fe6b8c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_io.c
@@ -0,0 +1,1746 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_engine_io.c
+ * I/O Functions
+ */
+ /* ``MY HACK: This universe.
+ Just one little problem:
+ core keeps dumping.''
+ -- Unknown */
+#include "mod_ssl.h"
+
+/* _________________________________________________________________
+**
+** I/O Hooks
+** _________________________________________________________________
+*/
+
+/* This file is designed to be the bridge between OpenSSL and httpd.
+ * However, we really don't expect anyone (let alone ourselves) to
+ * remember what is in this file. So, first, a quick overview.
+ *
+ * In this file, you will find:
+ * - ssl_io_filter_input (Apache input filter)
+ * - ssl_io_filter_output (Apache output filter)
+ *
+ * - bio_filter_in_* (OpenSSL input filter)
+ * - bio_filter_out_* (OpenSSL output filter)
+ *
+ * The input chain is roughly:
+ *
+ * ssl_io_filter_input->ssl_io_input_read->SSL_read->...
+ * ...->bio_filter_in_read->ap_get_brigade/next-httpd-filter
+ *
+ * In mortal terminology, we do the following:
+ * - Receive a request for data to the SSL input filter
+ * - Call a helper function once we know we should perform a read
+ * - Call OpenSSL's SSL_read()
+ * - SSL_read() will then call bio_filter_in_read
+ * - bio_filter_in_read will then try to fetch data from the next httpd filter
+ * - bio_filter_in_read will flatten that data and return it to SSL_read
+ * - SSL_read will then decrypt the data
+ * - ssl_io_input_read will then receive decrypted data as a char* and
+ * ensure that there were no read errors
+ * - The char* is placed in a brigade and returned
+ *
+ * Since connection-level input filters in httpd need to be able to
+ * handle AP_MODE_GETLINE calls (namely identifying LF-terminated strings),
+ * ssl_io_input_getline which will handle this special case.
+ *
+ * Due to AP_MODE_GETLINE and AP_MODE_SPECULATIVE, we may sometimes have
+ * 'leftover' decoded data which must be setaside for the next read. That
+ * is currently handled by the char_buffer_{read|write} functions. So,
+ * ssl_io_input_read may be able to fulfill reads without invoking
+ * SSL_read().
+ *
+ * Note that the filter context of ssl_io_filter_input and bio_filter_in_*
+ * are shared as bio_filter_in_ctx_t.
+ *
+ * Note that the filter is by choice limited to reading at most
+ * AP_IOBUFSIZE (8192 bytes) per call.
+ *
+ */
+
+/* this custom BIO allows us to hook SSL_write directly into
+ * an apr_bucket_brigade and use transient buckets with the SSL
+ * malloc-ed buffer, rather than copying into a mem BIO.
+ * also allows us to pass the brigade as data is being written
+ * rather than buffering up the entire response in the mem BIO.
+ *
+ * when SSL needs to flush (e.g. SSL_accept()), it will call BIO_flush()
+ * which will trigger a call to bio_filter_out_ctrl() -> bio_filter_out_flush().
+ * so we only need to flush the output ourselves if we receive an
+ * EOS or FLUSH bucket. this was not possible with the mem BIO where we
+ * had to flush all over the place not really knowing when it was required
+ * to do so.
+ */
+
+typedef struct {
+ SSL *pssl;
+ BIO *pbioRead;
+ BIO *pbioWrite;
+ ap_filter_t *pInputFilter;
+ ap_filter_t *pOutputFilter;
+ int nobuffer; /* non-zero to prevent buffering */
+ SSLConnRec *config;
+} ssl_filter_ctx_t;
+
+typedef struct {
+ ssl_filter_ctx_t *filter_ctx;
+ conn_rec *c;
+ apr_bucket_brigade *bb;
+ apr_size_t length;
+ char buffer[AP_IOBUFSIZE];
+ apr_size_t blen;
+ apr_status_t rc;
+} bio_filter_out_ctx_t;
+
+static bio_filter_out_ctx_t *bio_filter_out_ctx_new(ssl_filter_ctx_t *filter_ctx,
+ conn_rec *c)
+{
+ bio_filter_out_ctx_t *outctx = apr_palloc(c->pool, sizeof(*outctx));
+
+ outctx->filter_ctx = filter_ctx;
+ outctx->c = c;
+ outctx->bb = apr_brigade_create(c->pool, c->bucket_alloc);
+ outctx->blen = 0;
+ outctx->length = 0;
+
+ return outctx;
+}
+
+static int bio_filter_out_flush(BIO *bio)
+{
+ bio_filter_out_ctx_t *outctx = (bio_filter_out_ctx_t *)(bio->ptr);
+ apr_bucket *e;
+
+ if (!(outctx->blen || outctx->length)) {
+ outctx->rc = APR_SUCCESS;
+ return 1;
+ }
+
+ if (outctx->blen) {
+ e = apr_bucket_transient_create(outctx->buffer, outctx->blen,
+ outctx->bb->bucket_alloc);
+ /* we filled this buffer first so add it to the
+ * head of the brigade
+ */
+ APR_BRIGADE_INSERT_HEAD(outctx->bb, e);
+ outctx->blen = 0;
+ }
+
+ outctx->length = 0;
+ e = apr_bucket_flush_create(outctx->bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(outctx->bb, e);
+
+ outctx->rc = ap_pass_brigade(outctx->filter_ctx->pOutputFilter->next,
+ outctx->bb);
+ /* Fail if the connection was reset: */
+ if (outctx->rc == APR_SUCCESS && outctx->c->aborted) {
+ outctx->rc = APR_ECONNRESET;
+ }
+ return (outctx->rc == APR_SUCCESS) ? 1 : -1;
+}
+
+static int bio_filter_create(BIO *bio)
+{
+ bio->shutdown = 1;
+ bio->init = 1;
+ bio->num = -1;
+ bio->ptr = NULL;
+
+ return 1;
+}
+
+static int bio_filter_destroy(BIO *bio)
+{
+ if (bio == NULL) {
+ return 0;
+ }
+
+ /* nothing to free here.
+ * apache will destroy the bucket brigade for us
+ */
+ return 1;
+}
+
+static int bio_filter_out_read(BIO *bio, char *out, int outl)
+{
+ /* this is never called */
+ return -1;
+}
+
+static int bio_filter_out_write(BIO *bio, const char *in, int inl)
+{
+ bio_filter_out_ctx_t *outctx = (bio_filter_out_ctx_t *)(bio->ptr);
+
+ /* Abort early if the client has initiated a renegotiation. */
+ if (outctx->filter_ctx->config->reneg_state == RENEG_ABORT) {
+ outctx->rc = APR_ECONNABORTED;
+ return -1;
+ }
+
+ /* when handshaking we'll have a small number of bytes.
+ * max size SSL will pass us here is about 16k.
+ * (16413 bytes to be exact)
+ */
+ BIO_clear_retry_flags(bio);
+
+ if (!outctx->length && (inl + outctx->blen < sizeof(outctx->buffer)) &&
+ !outctx->filter_ctx->nobuffer) {
+ /* the first two SSL_writes (of 1024 and 261 bytes)
+ * need to be in the same packet (vec[0].iov_base)
+ */
+ /* XXX: could use apr_brigade_write() to make code look cleaner
+ * but this way we avoid the malloc(APR_BUCKET_BUFF_SIZE)
+ * and free() of it later
+ */
+ memcpy(&outctx->buffer[outctx->blen], in, inl);
+ outctx->blen += inl;
+ }
+ else {
+ /* pass along the encrypted data
+ * need to flush since we're using SSL's malloc-ed buffer
+ * which will be overwritten once we leave here
+ */
+ apr_bucket *bucket = apr_bucket_transient_create(in, inl,
+ outctx->bb->bucket_alloc);
+
+ outctx->length += inl;
+ APR_BRIGADE_INSERT_TAIL(outctx->bb, bucket);
+
+ if (bio_filter_out_flush(bio) < 0) {
+ return -1;
+ }
+ }
+
+ return inl;
+}
+
+static long bio_filter_out_ctrl(BIO *bio, int cmd, long num, void *ptr)
+{
+ long ret = 1;
+ char **pptr;
+
+ bio_filter_out_ctx_t *outctx = (bio_filter_out_ctx_t *)(bio->ptr);
+
+ switch (cmd) {
+ case BIO_CTRL_RESET:
+ outctx->blen = outctx->length = 0;
+ break;
+ case BIO_CTRL_EOF:
+ ret = (long)((outctx->blen + outctx->length) == 0);
+ break;
+ case BIO_C_SET_BUF_MEM_EOF_RETURN:
+ outctx->blen = outctx->length = (apr_size_t)num;
+ break;
+ case BIO_CTRL_INFO:
+ ret = (long)(outctx->blen + outctx->length);
+ if (ptr) {
+ pptr = (char **)ptr;
+ *pptr = (char *)&(outctx->buffer[0]);
+ }
+ break;
+ case BIO_CTRL_GET_CLOSE:
+ ret = (long)bio->shutdown;
+ break;
+ case BIO_CTRL_SET_CLOSE:
+ bio->shutdown = (int)num;
+ break;
+ case BIO_CTRL_WPENDING:
+ ret = 0L;
+ break;
+ case BIO_CTRL_PENDING:
+ ret = (long)(outctx->blen + outctx->length);
+ break;
+ case BIO_CTRL_FLUSH:
+ ret = bio_filter_out_flush(bio);
+ break;
+ case BIO_CTRL_DUP:
+ ret = 1;
+ break;
+ /* N/A */
+ case BIO_C_SET_BUF_MEM:
+ case BIO_C_GET_BUF_MEM_PTR:
+ /* we don't care */
+ case BIO_CTRL_PUSH:
+ case BIO_CTRL_POP:
+ default:
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int bio_filter_out_gets(BIO *bio, char *buf, int size)
+{
+ /* this is never called */
+ return -1;
+}
+
+static int bio_filter_out_puts(BIO *bio, const char *str)
+{
+ /* this is never called */
+ return -1;
+}
+
+static BIO_METHOD bio_filter_out_method = {
+ BIO_TYPE_MEM,
+ "APR output filter",
+ bio_filter_out_write,
+ bio_filter_out_read, /* read is never called */
+ bio_filter_out_puts, /* puts is never called */
+ bio_filter_out_gets, /* gets is never called */
+ bio_filter_out_ctrl,
+ bio_filter_create,
+ bio_filter_destroy,
+#ifdef OPENSSL_VERSION_NUMBER
+ NULL /* sslc does not have the callback_ctrl field */
+#endif
+};
+
+typedef struct {
+ int length;
+ char *value;
+} char_buffer_t;
+
+typedef struct {
+ SSL *ssl;
+ BIO *bio_out;
+ ap_filter_t *f;
+ apr_status_t rc;
+ ap_input_mode_t mode;
+ apr_read_type_e block;
+ apr_bucket_brigade *bb;
+ char_buffer_t cbuf;
+ apr_pool_t *pool;
+ char buffer[AP_IOBUFSIZE];
+ ssl_filter_ctx_t *filter_ctx;
+} bio_filter_in_ctx_t;
+
+/*
+ * this char_buffer api might seem silly, but we don't need to copy
+ * any of this data and we need to remember the length.
+ */
+
+/* Copy up to INL bytes from the char_buffer BUFFER into IN. Note
+ * that due to the strange way this API is designed/used, the
+ * char_buffer object is used to cache a segment of inctx->buffer, and
+ * then this function called to copy (part of) that segment to the
+ * beginning of inctx->buffer. So the segments to copy cannot be
+ * presumed to be non-overlapping, and memmove must be used. */
+static int char_buffer_read(char_buffer_t *buffer, char *in, int inl)
+{
+ if (!buffer->length) {
+ return 0;
+ }
+
+ if (buffer->length > inl) {
+ /* we have have enough to fill the caller's buffer */
+ memmove(in, buffer->value, inl);
+ buffer->value += inl;
+ buffer->length -= inl;
+ }
+ else {
+ /* swallow remainder of the buffer */
+ memmove(in, buffer->value, buffer->length);
+ inl = buffer->length;
+ buffer->value = NULL;
+ buffer->length = 0;
+ }
+
+ return inl;
+}
+
+static int char_buffer_write(char_buffer_t *buffer, char *in, int inl)
+{
+ buffer->value = in;
+ buffer->length = inl;
+ return inl;
+}
+
+/* This function will read from a brigade and discard the read buckets as it
+ * proceeds. It will read at most *len bytes.
+ */
+static apr_status_t brigade_consume(apr_bucket_brigade *bb,
+ apr_read_type_e block,
+ char *c, apr_size_t *len)
+{
+ apr_size_t actual = 0;
+ apr_status_t status = APR_SUCCESS;
+
+ while (!APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket *b = APR_BRIGADE_FIRST(bb);
+ const char *str;
+ apr_size_t str_len;
+ apr_size_t consume;
+
+ /* Justin points out this is an http-ism that might
+ * not fit if brigade_consume is added to APR. Perhaps
+ * apr_bucket_read(eos_bucket) should return APR_EOF?
+ * Then this becomes mainline instead of a one-off.
+ */
+ if (APR_BUCKET_IS_EOS(b)) {
+ status = APR_EOF;
+ break;
+ }
+
+ /* The reason I'm not offering brigade_consume yet
+ * across to apr-util is that the following call
+ * illustrates how borked that API really is. For
+ * this sort of case (caller provided buffer) it
+ * would be much more trivial for apr_bucket_consume
+ * to do all the work that follows, based on the
+ * particular characteristics of the bucket we are
+ * consuming here.
+ */
+ status = apr_bucket_read(b, &str, &str_len, block);
+
+ if (status != APR_SUCCESS) {
+ if (APR_STATUS_IS_EOF(status)) {
+ /* This stream bucket was consumed */
+ apr_bucket_delete(b);
+ continue;
+ }
+ break;
+ }
+
+ if (str_len > 0) {
+ /* Do not block once some data has been consumed */
+ block = APR_NONBLOCK_READ;
+
+ /* Assure we don't overflow. */
+ consume = (str_len + actual > *len) ? *len - actual : str_len;
+
+ memcpy(c, str, consume);
+
+ c += consume;
+ actual += consume;
+
+ if (consume >= b->length) {
+ /* This physical bucket was consumed */
+ apr_bucket_delete(b);
+ }
+ else {
+ /* Only part of this physical bucket was consumed */
+ b->start += consume;
+ b->length -= consume;
+ }
+ }
+ else if (b->length == 0) {
+ apr_bucket_delete(b);
+ }
+
+ /* This could probably be actual == *len, but be safe from stray
+ * photons. */
+ if (actual >= *len) {
+ break;
+ }
+ }
+
+ *len = actual;
+ return status;
+}
+
+/*
+ * this is the function called by SSL_read()
+ */
+static int bio_filter_in_read(BIO *bio, char *in, int inlen)
+{
+ apr_size_t inl = inlen;
+ bio_filter_in_ctx_t *inctx = (bio_filter_in_ctx_t *)(bio->ptr);
+ apr_read_type_e block = inctx->block;
+ SSLConnRec *sslconn = myConnConfig(inctx->f->c);
+
+ inctx->rc = APR_SUCCESS;
+
+ /* OpenSSL catches this case, so should we. */
+ if (!in)
+ return 0;
+
+ /* Abort early if the client has initiated a renegotiation. */
+ if (inctx->filter_ctx->config->reneg_state == RENEG_ABORT) {
+ inctx->rc = APR_ECONNABORTED;
+ return -1;
+ }
+
+ /* XXX: flush here only required for SSLv2;
+ * OpenSSL calls BIO_flush() at the appropriate times for
+ * the other protocols.
+ */
+ if ((SSL_version(inctx->ssl) == SSL2_VERSION) || sslconn->is_proxy) {
+ if (bio_filter_out_flush(inctx->bio_out) < 0) {
+ bio_filter_out_ctx_t *outctx =
+ (bio_filter_out_ctx_t *)(inctx->bio_out->ptr);
+ inctx->rc = outctx->rc;
+ return -1;
+ }
+ }
+
+ BIO_clear_retry_flags(bio);
+
+ if (!inctx->bb) {
+ inctx->rc = APR_EOF;
+ return -1;
+ }
+
+ if (APR_BRIGADE_EMPTY(inctx->bb)) {
+
+ inctx->rc = ap_get_brigade(inctx->f->next, inctx->bb,
+ AP_MODE_READBYTES, block,
+ inl);
+
+ /* Not a problem, there was simply no data ready yet.
+ */
+ if (APR_STATUS_IS_EAGAIN(inctx->rc) || APR_STATUS_IS_EINTR(inctx->rc)
+ || (inctx->rc == APR_SUCCESS && APR_BRIGADE_EMPTY(inctx->bb))) {
+ BIO_set_retry_read(bio);
+ return 0;
+ }
+
+ if (inctx->rc != APR_SUCCESS) {
+ /* Unexpected errors discard the brigade */
+ apr_brigade_cleanup(inctx->bb);
+ inctx->bb = NULL;
+ return -1;
+ }
+ }
+
+ inctx->rc = brigade_consume(inctx->bb, block, in, &inl);
+
+ if (inctx->rc == APR_SUCCESS) {
+ return (int)inl;
+ }
+
+ if (APR_STATUS_IS_EAGAIN(inctx->rc)
+ || APR_STATUS_IS_EINTR(inctx->rc)) {
+ BIO_set_retry_read(bio);
+ return (int)inl;
+ }
+
+ /* Unexpected errors and APR_EOF clean out the brigade.
+ * Subsequent calls will return APR_EOF.
+ */
+ apr_brigade_cleanup(inctx->bb);
+ inctx->bb = NULL;
+
+ if (APR_STATUS_IS_EOF(inctx->rc) && inl) {
+ /* Provide the results of this read pass,
+ * without resetting the BIO retry_read flag
+ */
+ return (int)inl;
+ }
+
+ return -1;
+}
+
+
+static BIO_METHOD bio_filter_in_method = {
+ BIO_TYPE_MEM,
+ "APR input filter",
+ NULL, /* write is never called */
+ bio_filter_in_read,
+ NULL, /* puts is never called */
+ NULL, /* gets is never called */
+ NULL, /* ctrl is never called */
+ bio_filter_create,
+ bio_filter_destroy,
+#ifdef OPENSSL_VERSION_NUMBER
+ NULL /* sslc does not have the callback_ctrl field */
+#endif
+};
+
+
+static apr_status_t ssl_io_input_read(bio_filter_in_ctx_t *inctx,
+ char *buf,
+ apr_size_t *len)
+{
+ apr_size_t wanted = *len;
+ apr_size_t bytes = 0;
+ int rc;
+
+ *len = 0;
+
+ /* If we have something leftover from last time, try that first. */
+ if ((bytes = char_buffer_read(&inctx->cbuf, buf, wanted))) {
+ *len = bytes;
+ if (inctx->mode == AP_MODE_SPECULATIVE) {
+ /* We want to rollback this read. */
+ if (inctx->cbuf.length > 0) {
+ inctx->cbuf.value -= bytes;
+ inctx->cbuf.length += bytes;
+ } else {
+ char_buffer_write(&inctx->cbuf, buf, (int)bytes);
+ }
+ return APR_SUCCESS;
+ }
+ /* This could probably be *len == wanted, but be safe from stray
+ * photons.
+ */
+ if (*len >= wanted) {
+ return APR_SUCCESS;
+ }
+ if (inctx->mode == AP_MODE_GETLINE) {
+ if (memchr(buf, APR_ASCII_LF, *len)) {
+ return APR_SUCCESS;
+ }
+ }
+ else {
+ /* Down to a nonblock pattern as we have some data already
+ */
+ inctx->block = APR_NONBLOCK_READ;
+ }
+ }
+
+ while (1) {
+
+ if (!inctx->filter_ctx->pssl) {
+ /* Ensure a non-zero error code is returned */
+ if (inctx->rc == APR_SUCCESS) {
+ inctx->rc = APR_EGENERAL;
+ }
+ break;
+ }
+
+ /* SSL_read may not read because we haven't taken enough data
+ * from the stack. This is where we want to consider all of
+ * the blocking and SPECULATIVE semantics
+ */
+ rc = SSL_read(inctx->filter_ctx->pssl, buf + bytes, wanted - bytes);
+
+ if (rc > 0) {
+ *len += rc;
+ if (inctx->mode == AP_MODE_SPECULATIVE) {
+ /* We want to rollback this read. */
+ char_buffer_write(&inctx->cbuf, buf, rc);
+ }
+ return inctx->rc;
+ }
+ else if (rc == 0) {
+ /* If EAGAIN, we will loop given a blocking read,
+ * otherwise consider ourselves at EOF.
+ */
+ if (APR_STATUS_IS_EAGAIN(inctx->rc)
+ || APR_STATUS_IS_EINTR(inctx->rc)) {
+ /* Already read something, return APR_SUCCESS instead.
+ * On win32 in particular, but perhaps on other kernels,
+ * a blocking call isn't 'always' blocking.
+ */
+ if (*len > 0) {
+ inctx->rc = APR_SUCCESS;
+ break;
+ }
+ if (inctx->block == APR_NONBLOCK_READ) {
+ break;
+ }
+ }
+ else {
+ if (*len > 0) {
+ inctx->rc = APR_SUCCESS;
+ }
+ else {
+ inctx->rc = APR_EOF;
+ }
+ break;
+ }
+ }
+ else /* (rc < 0) */ {
+ int ssl_err = SSL_get_error(inctx->filter_ctx->pssl, rc);
+ conn_rec *c = (conn_rec*)SSL_get_app_data(inctx->filter_ctx->pssl);
+
+ if (ssl_err == SSL_ERROR_WANT_READ) {
+ /*
+ * If OpenSSL wants to read more, and we were nonblocking,
+ * report as an EAGAIN. Otherwise loop, pulling more
+ * data from network filter.
+ *
+ * (This is usually the case when the client forces an SSL
+ * renegotation which is handled implicitly by OpenSSL.)
+ */
+ inctx->rc = APR_EAGAIN;
+
+ if (*len > 0) {
+ inctx->rc = APR_SUCCESS;
+ break;
+ }
+ if (inctx->block == APR_NONBLOCK_READ) {
+ break;
+ }
+ continue; /* Blocking and nothing yet? Try again. */
+ }
+ else if (ssl_err == SSL_ERROR_SYSCALL) {
+ if (APR_STATUS_IS_EAGAIN(inctx->rc)
+ || APR_STATUS_IS_EINTR(inctx->rc)) {
+ /* Already read something, return APR_SUCCESS instead. */
+ if (*len > 0) {
+ inctx->rc = APR_SUCCESS;
+ break;
+ }
+ if (inctx->block == APR_NONBLOCK_READ) {
+ break;
+ }
+ continue; /* Blocking and nothing yet? Try again. */
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_INFO, inctx->rc, c->base_server,
+ "SSL input filter read failed.");
+ }
+ }
+ else /* if (ssl_err == SSL_ERROR_SSL) */ {
+ /*
+ * Log SSL errors and any unexpected conditions.
+ */
+ ap_log_error(APLOG_MARK, APLOG_INFO, inctx->rc, c->base_server,
+ "SSL library error %d reading data", ssl_err);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_INFO, c->base_server);
+
+ }
+ if (inctx->rc == APR_SUCCESS) {
+ inctx->rc = APR_EGENERAL;
+ }
+ break;
+ }
+ }
+ return inctx->rc;
+}
+
+static apr_status_t ssl_io_input_getline(bio_filter_in_ctx_t *inctx,
+ char *buf,
+ apr_size_t *len)
+{
+ const char *pos = NULL;
+ apr_status_t status;
+ apr_size_t tmplen = *len, buflen = *len, offset = 0;
+
+ *len = 0;
+
+ /*
+ * in most cases we get all the headers on the first SSL_read.
+ * however, in certain cases SSL_read will only get a partial
+ * chunk of the headers, so we try to read until LF is seen.
+ */
+
+ while (tmplen > 0) {
+ status = ssl_io_input_read(inctx, buf + offset, &tmplen);
+
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ *len += tmplen;
+
+ if ((pos = memchr(buf, APR_ASCII_LF, *len))) {
+ break;
+ }
+
+ offset += tmplen;
+ tmplen = buflen - offset;
+ }
+
+ if (pos) {
+ char *value;
+ int length;
+ apr_size_t bytes = pos - buf;
+
+ bytes += 1;
+ value = buf + bytes;
+ length = *len - bytes;
+
+ char_buffer_write(&inctx->cbuf, value, length);
+
+ *len = bytes;
+ }
+
+ return APR_SUCCESS;
+}
+
+
+static apr_status_t ssl_filter_write(ap_filter_t *f,
+ const char *data,
+ apr_size_t len)
+{
+ ssl_filter_ctx_t *filter_ctx = f->ctx;
+ bio_filter_out_ctx_t *outctx;
+ int res;
+
+ /* write SSL */
+ if (filter_ctx->pssl == NULL) {
+ return APR_EGENERAL;
+ }
+
+ outctx = (bio_filter_out_ctx_t *)filter_ctx->pbioWrite->ptr;
+ res = SSL_write(filter_ctx->pssl, (unsigned char *)data, len);
+
+ if (res < 0) {
+ int ssl_err = SSL_get_error(filter_ctx->pssl, res);
+ conn_rec *c = (conn_rec*)SSL_get_app_data(outctx->filter_ctx->pssl);
+
+ if (ssl_err == SSL_ERROR_WANT_WRITE) {
+ /*
+ * If OpenSSL wants to write more, and we were nonblocking,
+ * report as an EAGAIN. Otherwise loop, pushing more
+ * data at the network filter.
+ *
+ * (This is usually the case when the client forces an SSL
+ * renegotation which is handled implicitly by OpenSSL.)
+ */
+ outctx->rc = APR_EAGAIN;
+ }
+ else if (ssl_err == SSL_ERROR_SYSCALL) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, outctx->rc, c->base_server,
+ "SSL output filter write failed.");
+ }
+ else /* if (ssl_err == SSL_ERROR_SSL) */ {
+ /*
+ * Log SSL errors
+ */
+ ap_log_error(APLOG_MARK, APLOG_INFO, outctx->rc, c->base_server,
+ "SSL library error %d writing data", ssl_err);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_INFO, c->base_server);
+ }
+ if (outctx->rc == APR_SUCCESS) {
+ outctx->rc = APR_EGENERAL;
+ }
+ }
+ else if ((apr_size_t)res != len) {
+ conn_rec *c = f->c;
+ char *reason = "reason unknown";
+
+ /* XXX: probably a better way to determine this */
+ if (SSL_total_renegotiations(filter_ctx->pssl)) {
+ reason = "likely due to failed renegotiation";
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, outctx->rc, c->base_server,
+ "failed to write %d of %d bytes (%s)",
+ len - (apr_size_t)res, len, reason);
+
+ outctx->rc = APR_EGENERAL;
+ }
+ return outctx->rc;
+}
+
+/* Just use a simple request. Any request will work for this, because
+ * we use a flag in the conn_rec->conn_vector now. The fake request just
+ * gets the request back to the Apache core so that a response can be sent.
+ *
+ * To avoid calling back for more data from the socket, use an HTTP/0.9
+ * request, and tack on an EOS bucket.
+ */
+#define HTTP_ON_HTTPS_PORT \
+ "GET /" CRLF
+
+#define HTTP_ON_HTTPS_PORT_BUCKET(alloc) \
+ apr_bucket_immortal_create(HTTP_ON_HTTPS_PORT, \
+ sizeof(HTTP_ON_HTTPS_PORT) - 1, \
+ alloc)
+
+static void ssl_io_filter_disable(SSLConnRec *sslconn, ap_filter_t *f)
+{
+ bio_filter_in_ctx_t *inctx = f->ctx;
+ SSL_free(inctx->ssl);
+ sslconn->ssl = NULL;
+ inctx->ssl = NULL;
+ inctx->filter_ctx->pssl = NULL;
+}
+
+static apr_status_t ssl_io_filter_error(ap_filter_t *f,
+ apr_bucket_brigade *bb,
+ apr_status_t status)
+{
+ SSLConnRec *sslconn = myConnConfig(f->c);
+ apr_bucket *bucket;
+
+ switch (status) {
+ case HTTP_BAD_REQUEST:
+ /* log the situation */
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0,
+ f->c->base_server,
+ "SSL handshake failed: HTTP spoken on HTTPS port; "
+ "trying to send HTML error page");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_INFO, f->c->base_server);
+
+ sslconn->non_ssl_request = 1;
+ ssl_io_filter_disable(sslconn, f);
+
+ /* fake the request line */
+ bucket = HTTP_ON_HTTPS_PORT_BUCKET(f->c->bucket_alloc);
+ break;
+
+ default:
+ return status;
+ }
+
+ APR_BRIGADE_INSERT_TAIL(bb, bucket);
+ bucket = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, bucket);
+
+ return APR_SUCCESS;
+}
+
+static const char ssl_io_filter[] = "SSL/TLS Filter";
+static const char ssl_io_buffer[] = "SSL/TLS Buffer";
+
+/*
+ * Close the SSL part of the socket connection
+ * (called immediately _before_ the socket is closed)
+ * or called with
+ */
+static apr_status_t ssl_filter_io_shutdown(ssl_filter_ctx_t *filter_ctx,
+ conn_rec *c,
+ int abortive)
+{
+ SSL *ssl = filter_ctx->pssl;
+ const char *type = "";
+ SSLConnRec *sslconn = myConnConfig(c);
+ int shutdown_type;
+
+ if (!ssl) {
+ return APR_SUCCESS;
+ }
+
+ /*
+ * Now close the SSL layer of the connection. We've to take
+ * the TLSv1 standard into account here:
+ *
+ * | 7.2.1. Closure alerts
+ * |
+ * | The client and the server must share knowledge that the connection is
+ * | ending in order to avoid a truncation attack. Either party may
+ * | initiate the exchange of closing messages.
+ * |
+ * | close_notify
+ * | This message notifies the recipient that the sender will not send
+ * | any more messages on this connection. The session becomes
+ * | unresumable if any connection is terminated without proper
+ * | close_notify messages with level equal to warning.
+ * |
+ * | Either party may initiate a close by sending a close_notify alert.
+ * | Any data received after a closure alert is ignored.
+ * |
+ * | Each party is required to send a close_notify alert before closing
+ * | the write side of the connection. It is required that the other party
+ * | respond with a close_notify alert of its own and close down the
+ * | connection immediately, discarding any pending writes. It is not
+ * | required for the initiator of the close to wait for the responding
+ * | close_notify alert before closing the read side of the connection.
+ *
+ * This means we've to send a close notify message, but haven't to wait
+ * for the close notify of the client. Actually we cannot wait for the
+ * close notify of the client because some clients (including Netscape
+ * 4.x) don't send one, so we would hang.
+ */
+
+ /*
+ * exchange close notify messages, but allow the user
+ * to force the type of handshake via SetEnvIf directive
+ */
+ if (abortive) {
+ shutdown_type = SSL_SENT_SHUTDOWN|SSL_RECEIVED_SHUTDOWN;
+ type = "abortive";
+ }
+ else switch (sslconn->shutdown_type) {
+ case SSL_SHUTDOWN_TYPE_UNCLEAN:
+ /* perform no close notify handshake at all
+ (violates the SSL/TLS standard!) */
+ shutdown_type = SSL_SENT_SHUTDOWN|SSL_RECEIVED_SHUTDOWN;
+ type = "unclean";
+ break;
+ case SSL_SHUTDOWN_TYPE_ACCURATE:
+ /* send close notify and wait for clients close notify
+ (standard compliant, but usually causes connection hangs) */
+ shutdown_type = 0;
+ type = "accurate";
+ break;
+ default:
+ /*
+ * case SSL_SHUTDOWN_TYPE_UNSET:
+ * case SSL_SHUTDOWN_TYPE_STANDARD:
+ */
+ /* send close notify, but don't wait for clients close notify
+ (standard compliant and safe, so it's the DEFAULT!) */
+ shutdown_type = SSL_RECEIVED_SHUTDOWN;
+ type = "standard";
+ break;
+ }
+
+ SSL_set_shutdown(ssl, shutdown_type);
+ SSL_smart_shutdown(ssl);
+
+ /* and finally log the fact that we've closed the connection */
+ if (c->base_server->loglevel >= APLOG_INFO) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, c->base_server,
+ "Connection to child %ld closed with %s shutdown"
+ "(server %s, client %s)",
+ c->id, type,
+ ssl_util_vhostid(c->pool, c->base_server),
+ c->remote_ip ? c->remote_ip : "unknown");
+ }
+
+ /* deallocate the SSL connection */
+ if (sslconn->client_cert) {
+ X509_free(sslconn->client_cert);
+ sslconn->client_cert = NULL;
+ }
+ SSL_free(ssl);
+ sslconn->ssl = NULL;
+ filter_ctx->pssl = NULL; /* so filters know we've been shutdown */
+
+ if (abortive) {
+ /* prevent any further I/O */
+ c->aborted = 1;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t ssl_io_filter_cleanup(void *data)
+{
+ ssl_filter_ctx_t *filter_ctx = data;
+
+ if (filter_ctx->pssl) {
+ conn_rec *c = (conn_rec *)SSL_get_app_data(filter_ctx->pssl);
+ SSLConnRec *sslconn = myConnConfig(c);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL,
+ "SSL connection destroyed without being closed");
+
+ SSL_free(filter_ctx->pssl);
+ sslconn->ssl = filter_ctx->pssl = NULL;
+ }
+
+ return APR_SUCCESS;
+}
+
+/*
+ * The hook is NOT registered with ap_hook_process_connection. Instead, it is
+ * called manually from the churn () before it tries to read any data.
+ * There is some problem if I accept conn_rec *. Still investigating..
+ * Adv. if conn_rec * can be accepted is we can hook this function using the
+ * ap_hook_process_connection hook.
+ */
+static int ssl_io_filter_connect(ssl_filter_ctx_t *filter_ctx)
+{
+ conn_rec *c = (conn_rec *)SSL_get_app_data(filter_ctx->pssl);
+ SSLConnRec *sslconn = myConnConfig(c);
+ SSLSrvConfigRec *sc = mySrvConfig(c->base_server);
+ X509 *cert;
+ int n;
+ int ssl_err;
+ long verify_result;
+
+ if (SSL_is_init_finished(filter_ctx->pssl)) {
+ return APR_SUCCESS;
+ }
+
+ if (sslconn->is_proxy) {
+ if ((n = SSL_connect(filter_ctx->pssl)) <= 0) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0,
+ c->base_server,
+ "SSL Proxy connect failed");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_INFO, c->base_server);
+ return ssl_filter_io_shutdown(filter_ctx, c, 1);
+ }
+
+ return APR_SUCCESS;
+ }
+
+ if ((n = SSL_accept(filter_ctx->pssl)) <= 0) {
+ bio_filter_in_ctx_t *inctx = (bio_filter_in_ctx_t *)
+ (filter_ctx->pbioRead->ptr);
+ bio_filter_out_ctx_t *outctx = (bio_filter_out_ctx_t *)
+ (filter_ctx->pbioWrite->ptr);
+ apr_status_t rc = inctx->rc ? inctx->rc : outctx->rc ;
+ ssl_err = SSL_get_error(filter_ctx->pssl, n);
+
+ if (ssl_err == SSL_ERROR_ZERO_RETURN) {
+ /*
+ * The case where the connection was closed before any data
+ * was transferred. That's not a real error and can occur
+ * sporadically with some clients.
+ */
+ ap_log_error(APLOG_MARK, APLOG_INFO, rc,
+ c->base_server,
+ "SSL handshake stopped: connection was closed");
+ }
+ else if (ssl_err == SSL_ERROR_WANT_READ) {
+ /*
+ * This is in addition to what was present earlier. It is
+ * borrowed from openssl_state_machine.c [mod_tls].
+ * TBD.
+ */
+ outctx->rc = APR_EAGAIN;
+ return SSL_ERROR_WANT_READ;
+ }
+ else if (ERR_GET_LIB(ERR_peek_error()) == ERR_LIB_SSL &&
+ ERR_GET_REASON(ERR_peek_error()) == SSL_R_HTTP_REQUEST) {
+ /*
+ * The case where OpenSSL has recognized a HTTP request:
+ * This means the client speaks plain HTTP on our HTTPS port.
+ * ssl_io_filter_error will disable the ssl filters when it
+ * sees this status code.
+ */
+ return HTTP_BAD_REQUEST;
+ }
+ else if (ssl_err == SSL_ERROR_SYSCALL) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, rc, c->base_server,
+ "SSL handshake interrupted by system "
+ "[Hint: Stop button pressed in browser?!]");
+ }
+ else /* if (ssl_err == SSL_ERROR_SSL) */ {
+ /*
+ * Log SSL errors and any unexpected conditions.
+ */
+ ap_log_error(APLOG_MARK, APLOG_INFO, rc, c->base_server,
+ "SSL library error %d in handshake "
+ "(server %s, client %s)", ssl_err,
+ ssl_util_vhostid(c->pool, c->base_server),
+ c->remote_ip ? c->remote_ip : "unknown");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_INFO, c->base_server);
+
+ }
+ if (inctx->rc == APR_SUCCESS) {
+ inctx->rc = APR_EGENERAL;
+ }
+
+ return ssl_filter_io_shutdown(filter_ctx, c, 1);
+ }
+
+ /*
+ * Check for failed client authentication
+ */
+ verify_result = SSL_get_verify_result(filter_ctx->pssl);
+
+ if ((verify_result != X509_V_OK) ||
+ sslconn->verify_error)
+ {
+ if (ssl_verify_error_is_optional(verify_result) &&
+ (sc->server->auth.verify_mode == SSL_CVERIFY_OPTIONAL_NO_CA))
+ {
+ /* leaving this log message as an error for the moment,
+ * according to the mod_ssl docs:
+ * "level optional_no_ca is actually against the idea
+ * of authentication (but can be used to establish
+ * SSL test pages, etc.)"
+ * optional_no_ca doesn't appear to work as advertised
+ * in 1.x
+ */
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0,
+ c->base_server,
+ "SSL client authentication failed, "
+ "accepting certificate based on "
+ "\"SSLVerifyClient optional_no_ca\" "
+ "configuration");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_INFO, c->base_server);
+ }
+ else {
+ const char *error = sslconn->verify_error ?
+ sslconn->verify_error :
+ X509_verify_cert_error_string(verify_result);
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0,
+ c->base_server,
+ "SSL client authentication failed: %s",
+ error ? error : "unknown");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_INFO, c->base_server);
+
+ return ssl_filter_io_shutdown(filter_ctx, c, 1);
+ }
+ }
+
+ /*
+ * Remember the peer certificate's DN
+ */
+ if ((cert = SSL_get_peer_certificate(filter_ctx->pssl))) {
+ if (sslconn->client_cert) {
+ X509_free(sslconn->client_cert);
+ }
+ sslconn->client_cert = cert;
+ sslconn->client_dn = NULL;
+ }
+
+ /*
+ * Make really sure that when a peer certificate
+ * is required we really got one... (be paranoid)
+ */
+ if ((sc->server->auth.verify_mode == SSL_CVERIFY_REQUIRE) &&
+ !sslconn->client_cert)
+ {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, c->base_server,
+ "No acceptable peer certificate available");
+
+ return ssl_filter_io_shutdown(filter_ctx, c, 1);
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t ssl_io_filter_input(ap_filter_t *f,
+ apr_bucket_brigade *bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ apr_status_t status;
+ bio_filter_in_ctx_t *inctx = f->ctx;
+
+ apr_size_t len = sizeof(inctx->buffer);
+ int is_init = (mode == AP_MODE_INIT);
+
+ if (f->c->aborted) {
+ /* XXX: Ok, if we aborted, we ARE at the EOS. We also have
+ * aborted. This 'double protection' is probably redundant,
+ * but also effective against just about anything.
+ */
+ apr_bucket *bucket = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, bucket);
+ return APR_ECONNABORTED;
+ }
+
+ if (!inctx->ssl) {
+ return ap_get_brigade(f->next, bb, mode, block, readbytes);
+ }
+
+ /* XXX: we don't currently support anything other than these modes. */
+ if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE &&
+ mode != AP_MODE_SPECULATIVE && mode != AP_MODE_INIT) {
+ return APR_ENOTIMPL;
+ }
+
+ inctx->mode = mode;
+ inctx->block = block;
+
+ /* XXX: we could actually move ssl_io_filter_connect to an
+ * ap_hook_process_connection but would still need to call it for
+ * AP_MODE_INIT for protocols that may upgrade the connection
+ * rather than have SSLEngine On configured.
+ */
+ if ((status = ssl_io_filter_connect(inctx->filter_ctx)) != APR_SUCCESS) {
+ return ssl_io_filter_error(f, bb, status);
+ }
+
+ if (is_init) {
+ /* protocol module needs to handshake before sending
+ * data to client (e.g. NNTP or FTP)
+ */
+ return APR_SUCCESS;
+ }
+
+ if (inctx->mode == AP_MODE_READBYTES ||
+ inctx->mode == AP_MODE_SPECULATIVE) {
+ /* Protected from truncation, readbytes < MAX_SIZE_T
+ * FIXME: No, it's *not* protected. -- jre */
+ if (readbytes < len) {
+ len = (apr_size_t)readbytes;
+ }
+ status = ssl_io_input_read(inctx, inctx->buffer, &len);
+ }
+ else if (inctx->mode == AP_MODE_GETLINE) {
+ status = ssl_io_input_getline(inctx, inctx->buffer, &len);
+ }
+ else {
+ /* We have no idea what you are talking about, so return an error. */
+ status = APR_ENOTIMPL;
+ }
+
+ /* It is possible for mod_ssl's BIO to be used outside of the
+ * direct control of mod_ssl's input or output filter -- notably,
+ * when mod_ssl initiates a renegotiation. Switching the BIO mode
+ * back to "blocking" here ensures such operations don't fail with
+ * SSL_ERROR_WANT_READ. */
+ inctx->block = APR_BLOCK_READ;
+
+ /* Handle custom errors. */
+ if (status != APR_SUCCESS) {
+ return ssl_io_filter_error(f, bb, status);
+ }
+
+ /* Create a transient bucket out of the decrypted data. */
+ if (len > 0) {
+ apr_bucket *bucket =
+ apr_bucket_transient_create(inctx->buffer, len, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, bucket);
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t ssl_io_filter_output(ap_filter_t *f,
+ apr_bucket_brigade *bb)
+{
+ apr_status_t status = APR_SUCCESS;
+ ssl_filter_ctx_t *filter_ctx = f->ctx;
+ bio_filter_in_ctx_t *inctx;
+ bio_filter_out_ctx_t *outctx;
+ apr_read_type_e rblock = APR_NONBLOCK_READ;
+
+ if (f->c->aborted) {
+ apr_brigade_cleanup(bb);
+ return APR_ECONNABORTED;
+ }
+
+ if (!filter_ctx->pssl) {
+ /* ssl_filter_io_shutdown was called */
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ inctx = (bio_filter_in_ctx_t *)filter_ctx->pbioRead->ptr;
+ outctx = (bio_filter_out_ctx_t *)filter_ctx->pbioWrite->ptr;
+
+ /* When we are the writer, we must initialize the inctx
+ * mode so that we block for any required ssl input, because
+ * output filtering is always nonblocking.
+ */
+ inctx->mode = AP_MODE_READBYTES;
+ inctx->block = APR_BLOCK_READ;
+
+ if ((status = ssl_io_filter_connect(filter_ctx)) != APR_SUCCESS) {
+ return ssl_io_filter_error(f, bb, status);
+ }
+
+ while (!APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket *bucket = APR_BRIGADE_FIRST(bb);
+
+ /* If it is a flush or EOS, we need to pass this down.
+ * These types do not require translation by OpenSSL.
+ */
+ if (APR_BUCKET_IS_EOS(bucket) || APR_BUCKET_IS_FLUSH(bucket)) {
+ if (bio_filter_out_flush(filter_ctx->pbioWrite) < 0) {
+ status = outctx->rc;
+ break;
+ }
+
+ if (APR_BUCKET_IS_EOS(bucket)) {
+ /*
+ * By definition, nothing can come after EOS.
+ * which also means we can pass the rest of this brigade
+ * without creating a new one since it only contains the
+ * EOS bucket.
+ */
+
+ if ((status = ap_pass_brigade(f->next, bb)) != APR_SUCCESS) {
+ return status;
+ }
+ break;
+ }
+ else {
+ /* bio_filter_out_flush() already passed down a flush bucket
+ * if there was any data to be flushed.
+ */
+ apr_bucket_delete(bucket);
+ }
+ }
+ else if (AP_BUCKET_IS_EOC(bucket)) {
+ /* The special "EOC" bucket means a shutdown is needed;
+ * - turn off buffering in bio_filter_out_write
+ * - issue the SSL_shutdown
+ */
+ filter_ctx->nobuffer = 1;
+ status = ssl_filter_io_shutdown(filter_ctx, f->c, 0);
+ if (status != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, status, NULL,
+ "SSL filter error shutting down I/O");
+ }
+ if ((status = ap_pass_brigade(f->next, bb)) != APR_SUCCESS) {
+ return status;
+ }
+ break;
+ }
+ else {
+ /* filter output */
+ const char *data;
+ apr_size_t len;
+
+ status = apr_bucket_read(bucket, &data, &len, rblock);
+
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ /* No data available: flush... */
+ if (bio_filter_out_flush(filter_ctx->pbioWrite) < 0) {
+ status = outctx->rc;
+ break;
+ }
+ rblock = APR_BLOCK_READ;
+ continue; /* and try again with a blocking read. */
+ }
+
+ rblock = APR_NONBLOCK_READ;
+
+ if (!APR_STATUS_IS_EOF(status) && (status != APR_SUCCESS)) {
+ break;
+ }
+
+ status = ssl_filter_write(f, data, len);
+ apr_bucket_delete(bucket);
+
+ if (status != APR_SUCCESS) {
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
+/* 128K maximum buffer size by default. */
+#ifndef SSL_MAX_IO_BUFFER
+#define SSL_MAX_IO_BUFFER (128 * 1024)
+#endif
+
+struct modssl_buffer_ctx {
+ apr_bucket_brigade *bb;
+ apr_pool_t *pool;
+};
+
+int ssl_io_buffer_fill(request_rec *r)
+{
+ conn_rec *c = r->connection;
+ struct modssl_buffer_ctx *ctx;
+ apr_bucket_brigade *tempb;
+ apr_off_t total = 0; /* total length buffered */
+ int eos = 0; /* non-zero once EOS is seen */
+
+ /* Create the context which will be passed to the input filter;
+ * containing a setaside pool and a brigade which constrain the
+ * lifetime of the buffered data. */
+ ctx = apr_palloc(r->pool, sizeof *ctx);
+ apr_pool_create(&ctx->pool, r->pool);
+ ctx->bb = apr_brigade_create(ctx->pool, c->bucket_alloc);
+
+ /* ... and a temporary brigade. */
+ tempb = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "filling buffer");
+
+ do {
+ apr_status_t rv;
+ apr_bucket *e, *next;
+
+ /* The request body is read from the protocol-level input
+ * filters; the buffering filter will reinject it from that
+ * level, allowing content/resource filters to run later, if
+ * necessary. */
+
+ rv = ap_get_brigade(r->proto_input_filters, tempb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, 8192);
+ if (rv) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "could not read request body for SSL buffer");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* Iterate through the returned brigade: setaside each bucket
+ * into the context's pool and move it into the brigade. */
+ for (e = APR_BRIGADE_FIRST(tempb);
+ e != APR_BRIGADE_SENTINEL(tempb) && !eos; e = next) {
+ const char *data;
+ apr_size_t len;
+
+ next = APR_BUCKET_NEXT(e);
+
+ if (APR_BUCKET_IS_EOS(e)) {
+ eos = 1;
+ } else if (!APR_BUCKET_IS_METADATA(e)) {
+ rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "could not read bucket for SSL buffer");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ total += len;
+ }
+
+ rv = apr_bucket_setaside(e, ctx->pool);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "could not setaside bucket for SSL buffer");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, e);
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "total of %" APR_OFF_T_FMT " bytes in buffer, eos=%d",
+ total, eos);
+
+ /* Fail if this exceeds the maximum buffer size. */
+ if (total > SSL_MAX_IO_BUFFER) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "request body exceeds maximum size for SSL buffer");
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
+ } while (!eos);
+
+ apr_brigade_destroy(tempb);
+
+ /* Insert the filter which will supply the buffered data. */
+ ap_add_input_filter(ssl_io_buffer, ctx, r, c);
+
+ return 0;
+}
+
+/* This input filter supplies the buffered request body to the caller
+ * from the brigade stored in f->ctx. */
+static apr_status_t ssl_io_filter_buffer(ap_filter_t *f,
+ apr_bucket_brigade *bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t bytes)
+{
+ struct modssl_buffer_ctx *ctx = f->ctx;
+ apr_status_t rv;
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
+ "read from buffered SSL brigade, mode %d, "
+ "%" APR_OFF_T_FMT " bytes",
+ mode, bytes);
+
+ if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) {
+ return APR_ENOTIMPL;
+ }
+
+ if (mode == AP_MODE_READBYTES) {
+ apr_bucket *e;
+
+ /* Partition the buffered brigade. */
+ rv = apr_brigade_partition(ctx->bb, bytes, &e);
+ if (rv && rv != APR_INCOMPLETE) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r,
+ "could not partition buffered SSL brigade");
+ ap_remove_input_filter(f);
+ return rv;
+ }
+
+ /* If the buffered brigade contains less then the requested
+ * length, just pass it all back. */
+ if (rv == APR_INCOMPLETE) {
+ APR_BRIGADE_CONCAT(bb, ctx->bb);
+ } else {
+ apr_bucket *d = APR_BRIGADE_FIRST(ctx->bb);
+
+ e = APR_BUCKET_PREV(e);
+
+ /* Unsplice the partitioned segment and move it into the
+ * passed-in brigade; no convenient way to do this with
+ * the APR_BRIGADE_* macros. */
+ APR_RING_UNSPLICE(d, e, link);
+ APR_RING_SPLICE_HEAD(&bb->list, d, e, apr_bucket, link);
+
+ APR_BRIGADE_CHECK_CONSISTENCY(bb);
+ APR_BRIGADE_CHECK_CONSISTENCY(ctx->bb);
+ }
+ }
+ else {
+ /* Split a line into the passed-in brigade. */
+ rv = apr_brigade_split_line(bb, ctx->bb, mode, bytes);
+
+ if (rv) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r,
+ "could not split line from buffered SSL brigade");
+ ap_remove_input_filter(f);
+ return rv;
+ }
+ }
+
+ if (APR_BRIGADE_EMPTY(ctx->bb)) {
+ apr_bucket *e = APR_BRIGADE_LAST(bb);
+
+ /* Ensure that the brigade is terminated by an EOS if the
+ * buffered request body has been entirely consumed. */
+ if (e == APR_BRIGADE_SENTINEL(bb) || !APR_BUCKET_IS_EOS(e)) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
+ "buffered SSL brigade now exhausted; removing filter");
+ ap_remove_input_filter(f);
+ }
+
+ return APR_SUCCESS;
+}
+
+static void ssl_io_input_add_filter(ssl_filter_ctx_t *filter_ctx, conn_rec *c,
+ SSL *ssl)
+{
+ bio_filter_in_ctx_t *inctx;
+
+ inctx = apr_palloc(c->pool, sizeof(*inctx));
+
+ filter_ctx->pInputFilter = ap_add_input_filter(ssl_io_filter, inctx, NULL, c);
+
+ filter_ctx->pbioRead = BIO_new(&bio_filter_in_method);
+ filter_ctx->pbioRead->ptr = (void *)inctx;
+
+ inctx->ssl = ssl;
+ inctx->bio_out = filter_ctx->pbioWrite;
+ inctx->f = filter_ctx->pInputFilter;
+ inctx->rc = APR_SUCCESS;
+ inctx->mode = AP_MODE_READBYTES;
+ inctx->cbuf.length = 0;
+ inctx->bb = apr_brigade_create(c->pool, c->bucket_alloc);
+ inctx->block = APR_BLOCK_READ;
+ inctx->pool = c->pool;
+ inctx->filter_ctx = filter_ctx;
+}
+
+void ssl_io_filter_init(conn_rec *c, SSL *ssl)
+{
+ ssl_filter_ctx_t *filter_ctx;
+
+ filter_ctx = apr_palloc(c->pool, sizeof(ssl_filter_ctx_t));
+
+ filter_ctx->config = myConnConfig(c);
+
+ filter_ctx->nobuffer = 0;
+ filter_ctx->pOutputFilter = ap_add_output_filter(ssl_io_filter,
+ filter_ctx, NULL, c);
+
+ filter_ctx->pbioWrite = BIO_new(&bio_filter_out_method);
+ filter_ctx->pbioWrite->ptr = (void *)bio_filter_out_ctx_new(filter_ctx, c);
+
+ ssl_io_input_add_filter(filter_ctx, c, ssl);
+
+ SSL_set_bio(ssl, filter_ctx->pbioRead, filter_ctx->pbioWrite);
+ filter_ctx->pssl = ssl;
+
+ apr_pool_cleanup_register(c->pool, (void*)filter_ctx,
+ ssl_io_filter_cleanup, apr_pool_cleanup_null);
+
+ if (c->base_server->loglevel >= APLOG_DEBUG) {
+ BIO_set_callback(SSL_get_rbio(ssl), ssl_io_data_cb);
+ BIO_set_callback_arg(SSL_get_rbio(ssl), (void *)ssl);
+ }
+
+ return;
+}
+
+void ssl_io_filter_register(apr_pool_t *p)
+{
+ ap_register_input_filter (ssl_io_filter, ssl_io_filter_input, NULL, AP_FTYPE_CONNECTION + 5);
+ ap_register_output_filter (ssl_io_filter, ssl_io_filter_output, NULL, AP_FTYPE_CONNECTION + 5);
+
+ ap_register_input_filter (ssl_io_buffer, ssl_io_filter_buffer, NULL, AP_FTYPE_PROTOCOL - 1);
+
+ return;
+}
+
+/* _________________________________________________________________
+**
+** I/O Data Debugging
+** _________________________________________________________________
+*/
+
+#define DUMP_WIDTH 16
+
+static void ssl_io_data_dump(server_rec *srvr,
+ MODSSL_BIO_CB_ARG_TYPE *s,
+ long len)
+{
+ char buf[256];
+ char tmp[64];
+ int i, j, rows, trunc;
+ unsigned char ch;
+
+ trunc = 0;
+ for(; (len > 0) && ((s[len-1] == ' ') || (s[len-1] == '\0')); len--)
+ trunc++;
+ rows = (len / DUMP_WIDTH);
+ if ((rows * DUMP_WIDTH) < len)
+ rows++;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, srvr,
+ "+-------------------------------------------------------------------------+");
+ for(i = 0 ; i< rows; i++) {
+ apr_snprintf(tmp, sizeof(tmp), "| %04x: ", i * DUMP_WIDTH);
+ apr_cpystrn(buf, tmp, sizeof(buf));
+ for (j = 0; j < DUMP_WIDTH; j++) {
+ if (((i * DUMP_WIDTH) + j) >= len)
+ apr_cpystrn(buf+strlen(buf), " ", sizeof(buf)-strlen(buf));
+ else {
+ ch = ((unsigned char)*((char *)(s) + i * DUMP_WIDTH + j)) & 0xff;
+ apr_snprintf(tmp, sizeof(tmp), "%02x%c", ch , j==7 ? '-' : ' ');
+ apr_cpystrn(buf+strlen(buf), tmp, sizeof(buf)-strlen(buf));
+ }
+ }
+ apr_cpystrn(buf+strlen(buf), " ", sizeof(buf)-strlen(buf));
+ for (j = 0; j < DUMP_WIDTH; j++) {
+ if (((i * DUMP_WIDTH) + j) >= len)
+ apr_cpystrn(buf+strlen(buf), " ", sizeof(buf)-strlen(buf));
+ else {
+ ch = ((unsigned char)*((char *)(s) + i * DUMP_WIDTH + j)) & 0xff;
+ apr_snprintf(tmp, sizeof(tmp), "%c", ((ch >= ' ') && (ch <= '~')) ? ch : '.');
+ apr_cpystrn(buf+strlen(buf), tmp, sizeof(buf)-strlen(buf));
+ }
+ }
+ apr_cpystrn(buf+strlen(buf), " |", sizeof(buf)-strlen(buf));
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, srvr,
+ "%s", buf);
+ }
+ if (trunc > 0)
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, srvr,
+ "| %04ld - <SPACES/NULS>", len + trunc);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, srvr,
+ "+-------------------------------------------------------------------------+");
+ return;
+}
+
+long ssl_io_data_cb(BIO *bio, int cmd,
+ MODSSL_BIO_CB_ARG_TYPE *argp,
+ int argi, long argl, long rc)
+{
+ SSL *ssl;
+ conn_rec *c;
+ server_rec *s;
+
+ if ((ssl = (SSL *)BIO_get_callback_arg(bio)) == NULL)
+ return rc;
+ if ((c = (conn_rec *)SSL_get_app_data(ssl)) == NULL)
+ return rc;
+ s = c->base_server;
+
+ if ( cmd == (BIO_CB_WRITE|BIO_CB_RETURN)
+ || cmd == (BIO_CB_READ |BIO_CB_RETURN) ) {
+ if (rc >= 0) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "%s: %s %ld/%d bytes %s BIO#%pp [mem: %pp] %s",
+ SSL_LIBRARY_NAME,
+ (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "write" : "read"),
+ rc, argi, (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "to" : "from"),
+ bio, argp,
+ (argp != NULL ? "(BIO dump follows)" : "(Oops, no memory buffer?)"));
+ if (argp != NULL)
+ ssl_io_data_dump(s, argp, rc);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "%s: I/O error, %d bytes expected to %s on BIO#%pp [mem: %pp]",
+ SSL_LIBRARY_NAME, argi,
+ (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "write" : "read"),
+ bio, argp);
+ }
+ }
+ return rc;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_kernel.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_kernel.c
new file mode 100644
index 00000000..60133f7c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_kernel.c
@@ -0,0 +1,1876 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_engine_kernel.c
+ * The SSL engine kernel
+ */
+ /* ``It took me fifteen years to discover
+ I had no talent for programming, but
+ I couldn't give it up because by that
+ time I was too famous.''
+ -- Unknown */
+#include "mod_ssl.h"
+
+static void ssl_configure_env(request_rec *r, SSLConnRec *sslconn);
+
+/* Perform a speculative (and non-blocking) read from the connection
+ * filters for the given request, to determine whether there is any
+ * pending data to read. Return non-zero if there is, else zero. */
+static int has_buffered_data(request_rec *r)
+{
+ apr_bucket_brigade *bb;
+ apr_off_t len;
+ apr_status_t rv;
+ int result;
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+
+ rv = ap_get_brigade(r->connection->input_filters, bb, AP_MODE_SPECULATIVE,
+ APR_NONBLOCK_READ, 1);
+ result = rv == APR_SUCCESS
+ && apr_brigade_length(bb, 1, &len) == APR_SUCCESS
+ && len > 0;
+
+ apr_brigade_destroy(bb);
+
+ return result;
+}
+
+/*
+ * Post Read Request Handler
+ */
+int ssl_hook_ReadReq(request_rec *r)
+{
+ SSLConnRec *sslconn = myConnConfig(r->connection);
+ SSL *ssl;
+
+ if (!sslconn) {
+ return DECLINED;
+ }
+
+ if (sslconn->non_ssl_request) {
+ const char *errmsg;
+ char *thisurl;
+ char *thisport = "";
+ int port = ap_get_server_port(r);
+
+ if (!ap_is_default_port(port, r)) {
+ thisport = apr_psprintf(r->pool, ":%u", port);
+ }
+
+ thisurl = ap_escape_html(r->pool,
+ apr_psprintf(r->pool, "https://%s%s/",
+ ap_get_server_name(r),
+ thisport));
+
+ errmsg = apr_psprintf(r->pool,
+ "Reason: You're speaking plain HTTP "
+ "to an SSL-enabled server port.<br />\n"
+ "Instead use the HTTPS scheme to access "
+ "this URL, please.<br />\n"
+ "<blockquote>Hint: "
+ "<a href=\"%s\"><b>%s</b></a></blockquote>",
+ thisurl, thisurl);
+
+ apr_table_setn(r->notes, "error-notes", errmsg);
+
+ /* Now that we have caught this error, forget it. we are done
+ * with using SSL on this request.
+ */
+ sslconn->non_ssl_request = 0;
+
+
+ return HTTP_BAD_REQUEST;
+ }
+
+ /*
+ * Get the SSL connection structure and perform the
+ * delayed interlinking from SSL back to request_rec
+ */
+ ssl = sslconn->ssl;
+ if (!ssl) {
+ return DECLINED;
+ }
+ SSL_set_app_data2(ssl, r);
+
+ /*
+ * Log information about incoming HTTPS requests
+ */
+ if (r->server->loglevel >= APLOG_INFO && ap_is_initial_req(r)) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "%s HTTPS request received for child %ld (server %s)",
+ (r->connection->keepalives <= 0 ?
+ "Initial (No.1)" :
+ apr_psprintf(r->pool, "Subsequent (No.%d)",
+ r->connection->keepalives+1)),
+ r->connection->id,
+ ssl_util_vhostid(r->pool, r->server));
+ }
+
+ /* SetEnvIf ssl-*-shutdown flags can only be per-server,
+ * so they won't change across keepalive requests
+ */
+ if (sslconn->shutdown_type == SSL_SHUTDOWN_TYPE_UNSET) {
+ ssl_configure_env(r, sslconn);
+ }
+
+ return DECLINED;
+}
+
+/*
+ * Move SetEnvIf information from request_rec to conn_rec/BUFF
+ * to allow the close connection handler to use them.
+ */
+
+static void ssl_configure_env(request_rec *r, SSLConnRec *sslconn)
+{
+ int i;
+ const apr_array_header_t *arr = apr_table_elts(r->subprocess_env);
+ const apr_table_entry_t *elts = (const apr_table_entry_t *)arr->elts;
+
+ sslconn->shutdown_type = SSL_SHUTDOWN_TYPE_STANDARD;
+
+ for (i = 0; i < arr->nelts; i++) {
+ const char *key = elts[i].key;
+
+ switch (*key) {
+ case 's':
+ /* being case-sensitive here.
+ * and not checking for the -shutdown since these are the only
+ * SetEnvIf "flags" we support
+ */
+ if (!strncmp(key+1, "sl-", 3)) {
+ key += 4;
+ if (!strncmp(key, "unclean", 7)) {
+ sslconn->shutdown_type = SSL_SHUTDOWN_TYPE_UNCLEAN;
+ }
+ else if (!strncmp(key, "accurate", 8)) {
+ sslconn->shutdown_type = SSL_SHUTDOWN_TYPE_ACCURATE;
+ }
+ return; /* should only ever be one ssl-*-shutdown */
+ }
+ break;
+ }
+ }
+}
+
+/*
+ * Access Handler
+ */
+int ssl_hook_Access(request_rec *r)
+{
+ SSLDirConfigRec *dc = myDirConfig(r);
+ SSLSrvConfigRec *sc = mySrvConfig(r->server);
+ SSLConnRec *sslconn = myConnConfig(r->connection);
+ SSL *ssl = sslconn ? sslconn->ssl : NULL;
+ SSL_CTX *ctx = NULL;
+ apr_array_header_t *requires;
+ ssl_require_t *ssl_requires;
+ char *cp;
+ int ok, i;
+ BOOL renegotiate = FALSE, renegotiate_quick = FALSE;
+ X509 *cert;
+ X509 *peercert;
+ X509_STORE *cert_store = NULL;
+ X509_STORE_CTX cert_store_ctx;
+ STACK_OF(SSL_CIPHER) *cipher_list_old = NULL, *cipher_list = NULL;
+ SSL_CIPHER *cipher = NULL;
+ int depth, verify_old, verify, n;
+
+ if (ssl) {
+ ctx = SSL_get_SSL_CTX(ssl);
+ }
+
+ /*
+ * Support for SSLRequireSSL directive
+ */
+ if (dc->bSSLRequired && !ssl) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "access to %s failed, reason: %s",
+ r->filename, "SSL connection required");
+
+ /* remember forbidden access for strict require option */
+ apr_table_setn(r->notes, "ssl-access-forbidden", "1");
+
+ return HTTP_FORBIDDEN;
+ }
+
+ /*
+ * Check to see whether SSL is in use; if it's not, then no
+ * further access control checks are relevant. (the test for
+ * sc->enabled is probably strictly unnecessary)
+ */
+ if (!sc->enabled || !ssl) {
+ return DECLINED;
+ }
+
+ /*
+ * Support for per-directory reconfigured SSL connection parameters.
+ *
+ * This is implemented by forcing an SSL renegotiation with the
+ * reconfigured parameter suite. But Apache's internal API processing
+ * makes our life very hard here, because when internal sub-requests occur
+ * we nevertheless should avoid multiple unnecessary SSL handshakes (they
+ * require extra network I/O and especially time to perform).
+ *
+ * But the optimization for filtering out the unnecessary handshakes isn't
+ * obvious and trivial. Especially because while Apache is in its
+ * sub-request processing the client could force additional handshakes,
+ * too. And these take place perhaps without our notice. So the only
+ * possibility is to explicitly _ask_ OpenSSL whether the renegotiation
+ * has to be performed or not. It has to performed when some parameters
+ * which were previously known (by us) are not those we've now
+ * reconfigured (as known by OpenSSL) or (in optimized way) at least when
+ * the reconfigured parameter suite is stronger (more restrictions) than
+ * the currently active one.
+ */
+
+ /*
+ * Override of SSLCipherSuite
+ *
+ * We provide two options here:
+ *
+ * o The paranoid and default approach where we force a renegotiation when
+ * the cipher suite changed in _any_ way (which is straight-forward but
+ * often forces renegotiations too often and is perhaps not what the
+ * user actually wanted).
+ *
+ * o The optimized and still secure way where we force a renegotiation
+ * only if the currently active cipher is no longer contained in the
+ * reconfigured/new cipher suite. Any other changes are not important
+ * because it's the servers choice to select a cipher from the ones the
+ * client supports. So as long as the current cipher is still in the new
+ * cipher suite we're happy. Because we can assume we would have
+ * selected it again even when other (better) ciphers exists now in the
+ * new cipher suite. This approach is fine because the user explicitly
+ * has to enable this via ``SSLOptions +OptRenegotiate''. So we do no
+ * implicit optimizations.
+ */
+ if (dc->szCipherSuite) {
+ /* remember old state */
+
+ if (dc->nOptions & SSL_OPT_OPTRENEGOTIATE) {
+ cipher = SSL_get_current_cipher(ssl);
+ }
+ else {
+ cipher_list_old = (STACK_OF(SSL_CIPHER) *)SSL_get_ciphers(ssl);
+
+ if (cipher_list_old) {
+ cipher_list_old = sk_SSL_CIPHER_dup(cipher_list_old);
+ }
+ }
+
+ /* configure new state */
+ if (!modssl_set_cipher_list(ssl, dc->szCipherSuite)) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0,
+ r->server,
+ "Unable to reconfigure (per-directory) "
+ "permitted SSL ciphers");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, r->server);
+
+ if (cipher_list_old) {
+ sk_SSL_CIPHER_free(cipher_list_old);
+ }
+
+ return HTTP_FORBIDDEN;
+ }
+
+ /* determine whether a renegotiation has to be forced */
+ cipher_list = (STACK_OF(SSL_CIPHER) *)SSL_get_ciphers(ssl);
+
+ if (dc->nOptions & SSL_OPT_OPTRENEGOTIATE) {
+ /* optimized way */
+ if ((!cipher && cipher_list) ||
+ (cipher && !cipher_list))
+ {
+ renegotiate = TRUE;
+ }
+ else if (cipher && cipher_list &&
+ (sk_SSL_CIPHER_find(cipher_list, cipher) < 0))
+ {
+ renegotiate = TRUE;
+ }
+ }
+ else {
+ /* paranoid way */
+ if ((!cipher_list_old && cipher_list) ||
+ (cipher_list_old && !cipher_list))
+ {
+ renegotiate = TRUE;
+ }
+ else if (cipher_list_old && cipher_list) {
+ for (n = 0;
+ !renegotiate && (n < sk_SSL_CIPHER_num(cipher_list));
+ n++)
+ {
+ SSL_CIPHER *value = sk_SSL_CIPHER_value(cipher_list, n);
+
+ if (sk_SSL_CIPHER_find(cipher_list_old, value) < 0) {
+ renegotiate = TRUE;
+ }
+ }
+
+ for (n = 0;
+ !renegotiate && (n < sk_SSL_CIPHER_num(cipher_list_old));
+ n++)
+ {
+ SSL_CIPHER *value = sk_SSL_CIPHER_value(cipher_list_old, n);
+
+ if (sk_SSL_CIPHER_find(cipher_list, value) < 0) {
+ renegotiate = TRUE;
+ }
+ }
+ }
+ }
+
+ /* cleanup */
+ if (cipher_list_old) {
+ sk_SSL_CIPHER_free(cipher_list_old);
+ }
+
+ /* tracing */
+ if (renegotiate) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "Reconfigured cipher suite will force renegotiation");
+ }
+ }
+
+ /*
+ * override of SSLVerifyDepth
+ *
+ * The depth checks are handled by us manually inside the verify callback
+ * function and not by OpenSSL internally (and our function is aware of
+ * both the per-server and per-directory contexts). So we cannot ask
+ * OpenSSL about the currently verify depth. Instead we remember it in our
+ * ap_ctx attached to the SSL* of OpenSSL. We've to force the
+ * renegotiation if the reconfigured/new verify depth is less than the
+ * currently active/remembered verify depth (because this means more
+ * restriction on the certificate chain).
+ */
+ if (dc->nVerifyDepth != UNSET) {
+ /* XXX: doesnt look like sslconn->verify_depth is actually used */
+ if (!(n = sslconn->verify_depth)) {
+ sslconn->verify_depth = n = sc->server->auth.verify_depth;
+ }
+
+ /* determine whether a renegotiation has to be forced */
+ if (dc->nVerifyDepth < n) {
+ renegotiate = TRUE;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "Reduced client verification depth will force "
+ "renegotiation");
+ }
+ }
+
+ /*
+ * override of SSLVerifyClient
+ *
+ * We force a renegotiation if the reconfigured/new verify type is
+ * stronger than the currently active verify type.
+ *
+ * The order is: none << optional_no_ca << optional << require
+ *
+ * Additionally the following optimization is possible here: When the
+ * currently active verify type is "none" but a client certificate is
+ * already known/present, it's enough to manually force a client
+ * verification but at least skip the I/O-intensive renegotation
+ * handshake.
+ */
+ if (dc->nVerifyClient != SSL_CVERIFY_UNSET) {
+ /* remember old state */
+ verify_old = SSL_get_verify_mode(ssl);
+ /* configure new state */
+ verify = SSL_VERIFY_NONE;
+
+ if (dc->nVerifyClient == SSL_CVERIFY_REQUIRE) {
+ verify |= SSL_VERIFY_PEER_STRICT;
+ }
+
+ if ((dc->nVerifyClient == SSL_CVERIFY_OPTIONAL) ||
+ (dc->nVerifyClient == SSL_CVERIFY_OPTIONAL_NO_CA))
+ {
+ verify |= SSL_VERIFY_PEER;
+ }
+
+ modssl_set_verify(ssl, verify, ssl_callback_SSLVerify);
+ SSL_set_verify_result(ssl, X509_V_OK);
+
+ /* determine whether we've to force a renegotiation */
+ if (!renegotiate && verify != verify_old) {
+ if (((verify_old == SSL_VERIFY_NONE) &&
+ (verify != SSL_VERIFY_NONE)) ||
+
+ (!(verify_old & SSL_VERIFY_PEER) &&
+ (verify & SSL_VERIFY_PEER)) ||
+
+ (!(verify_old & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) &&
+ (verify & SSL_VERIFY_FAIL_IF_NO_PEER_CERT)))
+ {
+ renegotiate = TRUE;
+ /* optimization */
+
+ if ((dc->nOptions & SSL_OPT_OPTRENEGOTIATE) &&
+ (verify_old == SSL_VERIFY_NONE) &&
+ ((peercert = SSL_get_peer_certificate(ssl)) != NULL))
+ {
+ renegotiate_quick = TRUE;
+ X509_free(peercert);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
+ r->server,
+ "Changed client verification type will force "
+ "%srenegotiation",
+ renegotiate_quick ? "quick " : "");
+ }
+ }
+ }
+
+ /*
+ * override SSLCACertificateFile & SSLCACertificatePath
+ * This is only enabled if the SSL_set_cert_store() function
+ * is available in the ssl library. the 1.x based mod_ssl
+ * used SSL_CTX_set_cert_store which is not thread safe.
+ */
+
+#ifdef HAVE_SSL_SET_CERT_STORE
+ /*
+ * check if per-dir and per-server config field are not the same.
+ * if f is defined in per-dir and not defined in per-server
+ * or f is defined in both but not the equal ...
+ */
+#define MODSSL_CFG_NE(f) \
+ (dc->f && (!sc->f || (sc->f && strNE(dc->f, sc->f))))
+
+#define MODSSL_CFG_CA(f) \
+ (dc->f ? dc->f : sc->f)
+
+ if (MODSSL_CFG_NE(szCACertificateFile) ||
+ MODSSL_CFG_NE(szCACertificatePath))
+ {
+ STACK_OF(X509_NAME) *ca_list;
+ const char *ca_file = MODSSL_CFG_CA(szCACertificateFile);
+ const char *ca_path = MODSSL_CFG_CA(szCACertificatePath);
+
+ cert_store = X509_STORE_new();
+
+ if (!X509_STORE_load_locations(cert_store, ca_file, ca_path)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Unable to reconfigure verify locations "
+ "for client authentication");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, r->server);
+
+ X509_STORE_free(cert_store);
+
+ return HTTP_FORBIDDEN;
+ }
+
+ /* SSL_free will free cert_store */
+ SSL_set_cert_store(ssl, cert_store);
+
+ if (!(ca_list = ssl_init_FindCAList(r->server, r->pool,
+ ca_file, ca_path)))
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Unable to determine list of available "
+ "CA certificates for client authentication");
+
+ return HTTP_FORBIDDEN;
+ }
+
+ SSL_set_client_CA_list(ssl, ca_list);
+ renegotiate = TRUE;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "Changed client verification locations will force "
+ "renegotiation");
+ }
+#endif /* HAVE_SSL_SET_CERT_STORE */
+
+ /* If a renegotiation is now required for this location, and the
+ * request includes a message body (and the client has not
+ * requested a "100 Continue" response), then the client will be
+ * streaming the request body over the wire already. In that
+ * case, it is not possible to stop and perform a new SSL
+ * handshake immediately; once the SSL library moves to the
+ * "accept" state, it will reject the SSL packets which the client
+ * is sending for the request body.
+ *
+ * To allow authentication to complete in this auth hook, the
+ * solution used here is to fill a (bounded) buffer with the
+ * request body, and then to reinject that request body later.
+ */
+ if (renegotiate && !renegotiate_quick
+ && (apr_table_get(r->headers_in, "transfer-encoding")
+ || (apr_table_get(r->headers_in, "content-length")
+ && strcmp(apr_table_get(r->headers_in, "content-length"), "0")))
+ && !r->expecting_100) {
+ int rv;
+
+ /* Fill the I/O buffer with the request body if possible. */
+ rv = ssl_io_buffer_fill(r);
+
+ if (rv) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "could not buffer message body to allow "
+ "SSL renegotiation to proceed");
+ return rv;
+ }
+ }
+
+ /*
+ * now do the renegotiation if anything was actually reconfigured
+ */
+ if (renegotiate) {
+ /*
+ * Now we force the SSL renegotation by sending the Hello Request
+ * message to the client. Here we have to do a workaround: Actually
+ * OpenSSL returns immediately after sending the Hello Request (the
+ * intent AFAIK is because the SSL/TLS protocol says it's not a must
+ * that the client replies to a Hello Request). But because we insist
+ * on a reply (anything else is an error for us) we have to go to the
+ * ACCEPT state manually. Using SSL_set_accept_state() doesn't work
+ * here because it resets too much of the connection. So we set the
+ * state explicitly and continue the handshake manually.
+ */
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "Requesting connection re-negotiation");
+
+ if (renegotiate_quick) {
+ STACK_OF(X509) *cert_stack;
+
+ /* perform just a manual re-verification of the peer */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "Performing quick renegotiation: "
+ "just re-verifying the peer");
+
+ cert_stack = (STACK_OF(X509) *)SSL_get_peer_cert_chain(ssl);
+
+ cert = SSL_get_peer_certificate(ssl);
+
+ if (!cert_stack && cert) {
+ /* client cert is in the session cache, but there is
+ * no chain, since ssl3_get_client_certificate()
+ * sk_X509_shift-ed the peer cert out of the chain.
+ * we put it back here for the purpose of quick_renegotiation.
+ */
+ cert_stack = sk_new_null();
+ sk_X509_push(cert_stack, MODSSL_PCHAR_CAST cert);
+ }
+
+ if (!cert_stack || (sk_X509_num(cert_stack) == 0)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Cannot find peer certificate chain");
+
+ return HTTP_FORBIDDEN;
+ }
+
+ if (!(cert_store ||
+ (cert_store = SSL_CTX_get_cert_store(ctx))))
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Cannot find certificate storage");
+
+ return HTTP_FORBIDDEN;
+ }
+
+ if (!cert) {
+ cert = sk_X509_value(cert_stack, 0);
+ }
+
+ X509_STORE_CTX_init(&cert_store_ctx, cert_store, cert, cert_stack);
+ depth = SSL_get_verify_depth(ssl);
+
+ if (depth >= 0) {
+ X509_STORE_CTX_set_depth(&cert_store_ctx, depth);
+ }
+
+ X509_STORE_CTX_set_ex_data(&cert_store_ctx,
+ SSL_get_ex_data_X509_STORE_CTX_idx(),
+ (char *)ssl);
+
+ if (!modssl_X509_verify_cert(&cert_store_ctx)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Re-negotiation verification step failed");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, r->server);
+ }
+
+ SSL_set_verify_result(ssl, cert_store_ctx.error);
+ X509_STORE_CTX_cleanup(&cert_store_ctx);
+
+ if (cert_stack != SSL_get_peer_cert_chain(ssl)) {
+ /* we created this ourselves, so free it */
+ sk_X509_pop_free(cert_stack, X509_free);
+ }
+ }
+ else {
+ request_rec *id = r->main ? r->main : r;
+
+ /* Additional mitigation for CVE-2009-3555: At this point,
+ * before renegotiating, an (entire) request has been read
+ * from the connection. An attacker may have sent further
+ * data to "prefix" any subsequent request by the victim's
+ * client after the renegotiation; this data may already
+ * have been read and buffered. Forcing a connection
+ * closure after the response ensures such data will be
+ * discarded. Legimately pipelined HTTP requests will be
+ * retried anyway with this approach. */
+ if (has_buffered_data(r)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "insecure SSL re-negotiation required, but "
+ "a pipelined request is present; keepalive "
+ "disabled");
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+
+ /* Perform a full renegotiation. */
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "Performing full renegotiation: complete handshake "
+ "protocol (%s support secure renegotiation)",
+#if defined(SSL_get_secure_renegotiation_support)
+ SSL_get_secure_renegotiation_support(ssl) ?
+ "client does" : "client does not"
+#else
+ "server does not"
+#endif
+ );
+
+ SSL_set_session_id_context(ssl,
+ (unsigned char *)&id,
+ sizeof(id));
+
+ /* Toggle the renegotiation state to allow the new
+ * handshake to proceed. */
+ sslconn->reneg_state = RENEG_ALLOW;
+
+ SSL_renegotiate(ssl);
+ SSL_do_handshake(ssl);
+
+ if (SSL_get_state(ssl) != SSL_ST_OK) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Re-negotiation request failed");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, r->server);
+
+ r->connection->aborted = 1;
+ return HTTP_FORBIDDEN;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "Awaiting re-negotiation handshake");
+
+ SSL_set_state(ssl, SSL_ST_ACCEPT);
+ SSL_do_handshake(ssl);
+
+ sslconn->reneg_state = RENEG_REJECT;
+
+ if (SSL_get_state(ssl) != SSL_ST_OK) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Re-negotiation handshake failed: "
+ "Not accepted by client!?");
+
+ r->connection->aborted = 1;
+ return HTTP_FORBIDDEN;
+ }
+ }
+
+ /*
+ * Remember the peer certificate's DN
+ */
+ if ((cert = SSL_get_peer_certificate(ssl))) {
+ if (sslconn->client_cert) {
+ X509_free(sslconn->client_cert);
+ }
+ sslconn->client_cert = cert;
+ sslconn->client_dn = NULL;
+ }
+
+ /*
+ * Finally check for acceptable renegotiation results
+ */
+ if (dc->nVerifyClient != SSL_CVERIFY_NONE) {
+ BOOL do_verify = (dc->nVerifyClient == SSL_CVERIFY_REQUIRE);
+
+ if (do_verify && (SSL_get_verify_result(ssl) != X509_V_OK)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Re-negotiation handshake failed: "
+ "Client verification failed");
+
+ return HTTP_FORBIDDEN;
+ }
+
+ if (do_verify) {
+ if ((peercert = SSL_get_peer_certificate(ssl)) == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "Re-negotiation handshake failed: "
+ "Client certificate missing");
+
+ return HTTP_FORBIDDEN;
+ }
+
+ X509_free(peercert);
+ }
+ }
+
+ /*
+ * Also check that SSLCipherSuite has been enforced as expected.
+ */
+ if (cipher_list) {
+ cipher = SSL_get_current_cipher(ssl);
+ if (sk_SSL_CIPHER_find(cipher_list, cipher) < 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "SSL cipher suite not renegotiated: "
+ "access to %s denied using cipher %s",
+ r->filename,
+ SSL_CIPHER_get_name(cipher));
+ return HTTP_FORBIDDEN;
+ }
+ }
+ }
+
+ /* If we're trying to have the user name set from a client
+ * certificate then we need to set it here. This should be safe as
+ * the user name probably isn't important from an auth checking point
+ * of view as the certificate supplied acts in that capacity.
+ * However, if FakeAuth is being used then this isn't the case so
+ * we need to postpone setting the username until later.
+ */
+ if ((dc->nOptions & SSL_OPT_FAKEBASICAUTH) == 0 && dc->szUserName) {
+ char *val = ssl_var_lookup(r->pool, r->server, r->connection,
+ r, (char *)dc->szUserName);
+ if (val && val[0])
+ r->user = val;
+ }
+
+ /*
+ * Check SSLRequire boolean expressions
+ */
+ requires = dc->aRequirement;
+ ssl_requires = (ssl_require_t *)requires->elts;
+
+ for (i = 0; i < requires->nelts; i++) {
+ ssl_require_t *req = &ssl_requires[i];
+ ok = ssl_expr_exec(r, req->mpExpr);
+
+ if (ok < 0) {
+ cp = apr_psprintf(r->pool,
+ "Failed to execute "
+ "SSL requirement expression: %s",
+ ssl_expr_get_error());
+
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "access to %s failed, reason: %s",
+ r->filename, cp);
+
+ /* remember forbidden access for strict require option */
+ apr_table_setn(r->notes, "ssl-access-forbidden", "1");
+
+ return HTTP_FORBIDDEN;
+ }
+
+ if (ok != 1) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "Access to %s denied for %s "
+ "(requirement expression not fulfilled)",
+ r->filename, r->connection->remote_ip);
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "Failed expression: %s", req->cpExpr);
+
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "access to %s failed, reason: %s",
+ r->filename,
+ "SSL requirement expression not fulfilled "
+ "(see SSL logfile for more details)");
+
+ /* remember forbidden access for strict require option */
+ apr_table_setn(r->notes, "ssl-access-forbidden", "1");
+
+ return HTTP_FORBIDDEN;
+ }
+ }
+
+ /*
+ * Else access is granted from our point of view (except vendor
+ * handlers override). But we have to return DECLINED here instead
+ * of OK, because mod_auth and other modules still might want to
+ * deny access.
+ */
+
+ return DECLINED;
+}
+
+/*
+ * Authentication Handler:
+ * Fake a Basic authentication from the X509 client certificate.
+ *
+ * This must be run fairly early on to prevent a real authentication from
+ * occuring, in particular it must be run before anything else that
+ * authenticates a user. This means that the Module statement for this
+ * module should be LAST in the Configuration file.
+ */
+int ssl_hook_UserCheck(request_rec *r)
+{
+ SSLConnRec *sslconn = myConnConfig(r->connection);
+ SSLSrvConfigRec *sc = mySrvConfig(r->server);
+ SSLDirConfigRec *dc = myDirConfig(r);
+ char *clientdn;
+ const char *auth_line, *username, *password;
+
+ /*
+ * Additionally forbid access (again)
+ * when strict require option is used.
+ */
+ if ((dc->nOptions & SSL_OPT_STRICTREQUIRE) &&
+ (apr_table_get(r->notes, "ssl-access-forbidden")))
+ {
+ return HTTP_FORBIDDEN;
+ }
+
+ /*
+ * We decline when we are in a subrequest. The Authorization header
+ * would already be present if it was added in the main request.
+ */
+ if (!ap_is_initial_req(r)) {
+ return DECLINED;
+ }
+
+ /*
+ * Make sure the user is not able to fake the client certificate
+ * based authentication by just entering an X.509 Subject DN
+ * ("/XX=YYY/XX=YYY/..") as the username and "password" as the
+ * password.
+ */
+ if ((auth_line = apr_table_get(r->headers_in, "Authorization"))) {
+ if (strcEQ(ap_getword(r->pool, &auth_line, ' '), "Basic")) {
+ while ((*auth_line == ' ') || (*auth_line == '\t')) {
+ auth_line++;
+ }
+
+ auth_line = ap_pbase64decode(r->pool, auth_line);
+ username = ap_getword_nulls(r->pool, &auth_line, ':');
+ password = auth_line;
+
+ if ((username[0] == '/') && strEQ(password, "password")) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Encountered FakeBasicAuth spoof: %s", username);
+ return HTTP_FORBIDDEN;
+ }
+ }
+ }
+
+ /*
+ * We decline operation in various situations...
+ * - SSLOptions +FakeBasicAuth not configured
+ * - r->user already authenticated
+ * - ssl not enabled
+ * - client did not present a certificate
+ */
+ if (!(sc->enabled && sslconn && sslconn->ssl && sslconn->client_cert) ||
+ !(dc->nOptions & SSL_OPT_FAKEBASICAUTH) || r->user)
+ {
+ return DECLINED;
+ }
+
+ if (!sslconn->client_dn) {
+ X509_NAME *name = X509_get_subject_name(sslconn->client_cert);
+ char *cp = X509_NAME_oneline(name, NULL, 0);
+ sslconn->client_dn = apr_pstrdup(r->connection->pool, cp);
+ modssl_free(cp);
+ }
+
+ clientdn = (char *)sslconn->client_dn;
+
+ /*
+ * Fake a password - which one would be immaterial, as, it seems, an empty
+ * password in the users file would match ALL incoming passwords, if only
+ * we were using the standard crypt library routine. Unfortunately, OpenSSL
+ * "fixes" a "bug" in crypt and thus prevents blank passwords from
+ * working. (IMHO what they really fix is a bug in the users of the code
+ * - failing to program correctly for shadow passwords). We need,
+ * therefore, to provide a password. This password can be matched by
+ * adding the string "xxj31ZMTZzkVA" as the password in the user file.
+ * This is just the crypted variant of the word "password" ;-)
+ */
+ auth_line = apr_pstrcat(r->pool, "Basic ",
+ ap_pbase64encode(r->pool,
+ apr_pstrcat(r->pool, clientdn,
+ ":password", NULL)),
+ NULL);
+ apr_table_set(r->headers_in, "Authorization", auth_line);
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+ "Faking HTTP Basic Auth header: \"Authorization: %s\"",
+ auth_line);
+
+ return DECLINED;
+}
+
+/* authorization phase */
+int ssl_hook_Auth(request_rec *r)
+{
+ SSLDirConfigRec *dc = myDirConfig(r);
+
+ /*
+ * Additionally forbid access (again)
+ * when strict require option is used.
+ */
+ if ((dc->nOptions & SSL_OPT_STRICTREQUIRE) &&
+ (apr_table_get(r->notes, "ssl-access-forbidden")))
+ {
+ return HTTP_FORBIDDEN;
+ }
+
+ return DECLINED;
+}
+
+/*
+ * Fixup Handler
+ */
+
+static const char *ssl_hook_Fixup_vars[] = {
+ "SSL_VERSION_INTERFACE",
+ "SSL_VERSION_LIBRARY",
+ "SSL_PROTOCOL",
+ "SSL_SECURE_RENEG",
+ "SSL_CIPHER",
+ "SSL_CIPHER_EXPORT",
+ "SSL_CIPHER_USEKEYSIZE",
+ "SSL_CIPHER_ALGKEYSIZE",
+ "SSL_CLIENT_VERIFY",
+ "SSL_CLIENT_M_VERSION",
+ "SSL_CLIENT_M_SERIAL",
+ "SSL_CLIENT_V_START",
+ "SSL_CLIENT_V_END",
+ "SSL_CLIENT_S_DN",
+ "SSL_CLIENT_S_DN_C",
+ "SSL_CLIENT_S_DN_ST",
+ "SSL_CLIENT_S_DN_L",
+ "SSL_CLIENT_S_DN_O",
+ "SSL_CLIENT_S_DN_OU",
+ "SSL_CLIENT_S_DN_CN",
+ "SSL_CLIENT_S_DN_T",
+ "SSL_CLIENT_S_DN_I",
+ "SSL_CLIENT_S_DN_G",
+ "SSL_CLIENT_S_DN_S",
+ "SSL_CLIENT_S_DN_D",
+ "SSL_CLIENT_S_DN_UID",
+ "SSL_CLIENT_S_DN_Email",
+ "SSL_CLIENT_I_DN",
+ "SSL_CLIENT_I_DN_C",
+ "SSL_CLIENT_I_DN_ST",
+ "SSL_CLIENT_I_DN_L",
+ "SSL_CLIENT_I_DN_O",
+ "SSL_CLIENT_I_DN_OU",
+ "SSL_CLIENT_I_DN_CN",
+ "SSL_CLIENT_I_DN_T",
+ "SSL_CLIENT_I_DN_I",
+ "SSL_CLIENT_I_DN_G",
+ "SSL_CLIENT_I_DN_S",
+ "SSL_CLIENT_I_DN_D",
+ "SSL_CLIENT_I_DN_UID",
+ "SSL_CLIENT_I_DN_Email",
+ "SSL_CLIENT_A_KEY",
+ "SSL_CLIENT_A_SIG",
+ "SSL_SERVER_M_VERSION",
+ "SSL_SERVER_M_SERIAL",
+ "SSL_SERVER_V_START",
+ "SSL_SERVER_V_END",
+ "SSL_SERVER_S_DN",
+ "SSL_SERVER_S_DN_C",
+ "SSL_SERVER_S_DN_ST",
+ "SSL_SERVER_S_DN_L",
+ "SSL_SERVER_S_DN_O",
+ "SSL_SERVER_S_DN_OU",
+ "SSL_SERVER_S_DN_CN",
+ "SSL_SERVER_S_DN_T",
+ "SSL_SERVER_S_DN_I",
+ "SSL_SERVER_S_DN_G",
+ "SSL_SERVER_S_DN_S",
+ "SSL_SERVER_S_DN_D",
+ "SSL_SERVER_S_DN_UID",
+ "SSL_SERVER_S_DN_Email",
+ "SSL_SERVER_I_DN",
+ "SSL_SERVER_I_DN_C",
+ "SSL_SERVER_I_DN_ST",
+ "SSL_SERVER_I_DN_L",
+ "SSL_SERVER_I_DN_O",
+ "SSL_SERVER_I_DN_OU",
+ "SSL_SERVER_I_DN_CN",
+ "SSL_SERVER_I_DN_T",
+ "SSL_SERVER_I_DN_I",
+ "SSL_SERVER_I_DN_G",
+ "SSL_SERVER_I_DN_S",
+ "SSL_SERVER_I_DN_D",
+ "SSL_SERVER_I_DN_UID",
+ "SSL_SERVER_I_DN_Email",
+ "SSL_SERVER_A_KEY",
+ "SSL_SERVER_A_SIG",
+ "SSL_SESSION_ID",
+ NULL
+};
+
+int ssl_hook_Fixup(request_rec *r)
+{
+ SSLConnRec *sslconn = myConnConfig(r->connection);
+ SSLSrvConfigRec *sc = mySrvConfig(r->server);
+ SSLDirConfigRec *dc = myDirConfig(r);
+ apr_table_t *env = r->subprocess_env;
+ char *var, *val = "";
+ STACK_OF(X509) *peer_certs;
+ SSL *ssl;
+ int i;
+
+ /*
+ * Check to see if SSL is on
+ */
+ if (!(sc->enabled && sslconn && (ssl = sslconn->ssl))) {
+ return DECLINED;
+ }
+
+ /*
+ * Annotate the SSI/CGI environment with standard SSL information
+ */
+ /* the always present HTTPS (=HTTP over SSL) flag! */
+ apr_table_setn(env, "HTTPS", "on");
+
+ /* standard SSL environment variables */
+ if (dc->nOptions & SSL_OPT_STDENVVARS) {
+ for (i = 0; ssl_hook_Fixup_vars[i]; i++) {
+ var = (char *)ssl_hook_Fixup_vars[i];
+ val = ssl_var_lookup(r->pool, r->server, r->connection, r, var);
+ if (!strIsEmpty(val)) {
+ apr_table_setn(env, var, val);
+ }
+ }
+ }
+
+ /*
+ * On-demand bloat up the SSI/CGI environment with certificate data
+ */
+ if (dc->nOptions & SSL_OPT_EXPORTCERTDATA) {
+ val = ssl_var_lookup(r->pool, r->server, r->connection,
+ r, "SSL_SERVER_CERT");
+
+ apr_table_setn(env, "SSL_SERVER_CERT", val);
+
+ val = ssl_var_lookup(r->pool, r->server, r->connection,
+ r, "SSL_CLIENT_CERT");
+
+ apr_table_setn(env, "SSL_CLIENT_CERT", val);
+
+ if ((peer_certs = (STACK_OF(X509) *)SSL_get_peer_cert_chain(ssl))) {
+ for (i = 0; i < sk_X509_num(peer_certs); i++) {
+ var = apr_psprintf(r->pool, "SSL_CLIENT_CERT_CHAIN_%d", i);
+ val = ssl_var_lookup(r->pool, r->server, r->connection,
+ r, var);
+ if (val) {
+ apr_table_setn(env, var, val);
+ }
+ }
+ }
+ }
+
+
+#ifdef SSL_get_secure_renegotiation_support
+ apr_table_setn(r->notes, "ssl-secure-reneg",
+ SSL_get_secure_renegotiation_support(ssl) ? "1" : "0");
+#endif
+
+ return DECLINED;
+}
+
+/* _________________________________________________________________
+**
+** OpenSSL Callback Functions
+** _________________________________________________________________
+*/
+
+/*
+ * Handle out temporary RSA private keys on demand
+ *
+ * The background of this as the TLSv1 standard explains it:
+ *
+ * | D.1. Temporary RSA keys
+ * |
+ * | US Export restrictions limit RSA keys used for encryption to 512
+ * | bits, but do not place any limit on lengths of RSA keys used for
+ * | signing operations. Certificates often need to be larger than 512
+ * | bits, since 512-bit RSA keys are not secure enough for high-value
+ * | transactions or for applications requiring long-term security. Some
+ * | certificates are also designated signing-only, in which case they
+ * | cannot be used for key exchange.
+ * |
+ * | When the public key in the certificate cannot be used for encryption,
+ * | the server signs a temporary RSA key, which is then exchanged. In
+ * | exportable applications, the temporary RSA key should be the maximum
+ * | allowable length (i.e., 512 bits). Because 512-bit RSA keys are
+ * | relatively insecure, they should be changed often. For typical
+ * | electronic commerce applications, it is suggested that keys be
+ * | changed daily or every 500 transactions, and more often if possible.
+ * | Note that while it is acceptable to use the same temporary key for
+ * | multiple transactions, it must be signed each time it is used.
+ * |
+ * | RSA key generation is a time-consuming process. In many cases, a
+ * | low-priority process can be assigned the task of key generation.
+ * | Whenever a new key is completed, the existing temporary key can be
+ * | replaced with the new one.
+ *
+ * XXX: base on comment above, if thread support is enabled,
+ * we should spawn a low-priority thread to generate new keys
+ * on the fly.
+ *
+ * So we generated 512 and 1024 bit temporary keys on startup
+ * which we now just hand out on demand....
+ */
+
+RSA *ssl_callback_TmpRSA(SSL *ssl, int export, int keylen)
+{
+ conn_rec *c = (conn_rec *)SSL_get_app_data(ssl);
+ SSLModConfigRec *mc = myModConfig(c->base_server);
+ int idx;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
+ "handing out temporary %d bit RSA key", keylen);
+
+ /* doesn't matter if export flag is on,
+ * we won't be asked for keylen > 512 in that case.
+ * if we are asked for a keylen > 1024, it is too expensive
+ * to generate on the fly.
+ * XXX: any reason not to generate 2048 bit keys at startup?
+ */
+
+ switch (keylen) {
+ case 512:
+ idx = SSL_TMP_KEY_RSA_512;
+ break;
+
+ case 1024:
+ default:
+ idx = SSL_TMP_KEY_RSA_1024;
+ }
+
+ return (RSA *)mc->pTmpKeys[idx];
+}
+
+/*
+ * Hand out the already generated DH parameters...
+ */
+DH *ssl_callback_TmpDH(SSL *ssl, int export, int keylen)
+{
+ conn_rec *c = (conn_rec *)SSL_get_app_data(ssl);
+ SSLModConfigRec *mc = myModConfig(c->base_server);
+ int idx;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
+ "handing out temporary %d bit DH key", keylen);
+
+ switch (keylen) {
+ case 512:
+ idx = SSL_TMP_KEY_DH_512;
+ break;
+
+ case 1024:
+ default:
+ idx = SSL_TMP_KEY_DH_1024;
+ }
+
+ return (DH *)mc->pTmpKeys[idx];
+}
+
+/*
+ * This OpenSSL callback function is called when OpenSSL
+ * does client authentication and verifies the certificate chain.
+ */
+int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx)
+{
+ /* Get Apache context back through OpenSSL context */
+ SSL *ssl = X509_STORE_CTX_get_ex_data(ctx,
+ SSL_get_ex_data_X509_STORE_CTX_idx());
+ conn_rec *conn = (conn_rec *)SSL_get_app_data(ssl);
+ server_rec *s = conn->base_server;
+ request_rec *r = (request_rec *)SSL_get_app_data2(ssl);
+
+ SSLSrvConfigRec *sc = mySrvConfig(s);
+ SSLDirConfigRec *dc = r ? myDirConfig(r) : NULL;
+ SSLConnRec *sslconn = myConnConfig(conn);
+ modssl_ctx_t *mctx = myCtxConfig(sslconn, sc);
+
+ /* Get verify ingredients */
+ int errnum = X509_STORE_CTX_get_error(ctx);
+ int errdepth = X509_STORE_CTX_get_error_depth(ctx);
+ int depth, verify;
+
+ /*
+ * Log verification information
+ */
+ if (s->loglevel >= APLOG_DEBUG) {
+ X509 *cert = X509_STORE_CTX_get_current_cert(ctx);
+ char *sname = X509_NAME_oneline(X509_get_subject_name(cert), NULL, 0);
+ char *iname = X509_NAME_oneline(X509_get_issuer_name(cert), NULL, 0);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Certificate Verification: "
+ "depth: %d, subject: %s, issuer: %s",
+ errdepth,
+ sname ? sname : "-unknown-",
+ iname ? iname : "-unknown-");
+
+ if (sname) {
+ modssl_free(sname);
+ }
+
+ if (iname) {
+ modssl_free(iname);
+ }
+ }
+
+ /*
+ * Check for optionally acceptable non-verifiable issuer situation
+ */
+ if (dc && (dc->nVerifyClient != SSL_CVERIFY_UNSET)) {
+ verify = dc->nVerifyClient;
+ }
+ else {
+ verify = mctx->auth.verify_mode;
+ }
+
+ if (verify == SSL_CVERIFY_NONE) {
+ /*
+ * SSLProxyVerify is either not configured or set to "none".
+ * (this callback doesn't happen in the server context if SSLVerify
+ * is not configured or set to "none")
+ */
+ return TRUE;
+ }
+
+ if (ssl_verify_error_is_optional(errnum) &&
+ (verify == SSL_CVERIFY_OPTIONAL_NO_CA))
+ {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Certificate Verification: Verifiable Issuer is "
+ "configured as optional, therefore we're accepting "
+ "the certificate");
+
+ sslconn->verify_info = "GENEROUS";
+ ok = TRUE;
+ }
+
+ /*
+ * Additionally perform CRL-based revocation checks
+ */
+ if (ok) {
+ if (!(ok = ssl_callback_SSLVerify_CRL(ok, ctx, conn))) {
+ errnum = X509_STORE_CTX_get_error(ctx);
+ }
+ }
+
+ /*
+ * If we already know it's not ok, log the real reason
+ */
+ if (!ok) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Certificate Verification: Error (%d): %s",
+ errnum, X509_verify_cert_error_string(errnum));
+
+ if (sslconn->client_cert) {
+ X509_free(sslconn->client_cert);
+ sslconn->client_cert = NULL;
+ }
+ sslconn->client_dn = NULL;
+ sslconn->verify_error = X509_verify_cert_error_string(errnum);
+ }
+
+ /*
+ * Finally check the depth of the certificate verification
+ */
+ if (dc && (dc->nVerifyDepth != UNSET)) {
+ depth = dc->nVerifyDepth;
+ }
+ else {
+ depth = mctx->auth.verify_depth;
+ }
+
+ if (errdepth > depth) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Certificate Verification: Certificate Chain too long "
+ "(chain has %d certificates, but maximum allowed are "
+ "only %d)",
+ errdepth, depth);
+
+ errnum = X509_V_ERR_CERT_CHAIN_TOO_LONG;
+ sslconn->verify_error = X509_verify_cert_error_string(errnum);
+
+ ok = FALSE;
+ }
+
+ /*
+ * And finally signal OpenSSL the (perhaps changed) state
+ */
+ return ok;
+}
+
+int ssl_callback_SSLVerify_CRL(int ok, X509_STORE_CTX *ctx, conn_rec *c)
+{
+ server_rec *s = c->base_server;
+ SSLSrvConfigRec *sc = mySrvConfig(s);
+ SSLConnRec *sslconn = myConnConfig(c);
+ modssl_ctx_t *mctx = myCtxConfig(sslconn, sc);
+ X509_OBJECT obj;
+ X509_NAME *subject, *issuer;
+ X509 *cert;
+ X509_CRL *crl;
+ EVP_PKEY *pubkey;
+ int i, n, rc;
+
+ /*
+ * Unless a revocation store for CRLs was created we
+ * cannot do any CRL-based verification, of course.
+ */
+ if (!mctx->crl) {
+ return ok;
+ }
+
+ /*
+ * Determine certificate ingredients in advance
+ */
+ cert = X509_STORE_CTX_get_current_cert(ctx);
+ subject = X509_get_subject_name(cert);
+ issuer = X509_get_issuer_name(cert);
+
+ /*
+ * OpenSSL provides the general mechanism to deal with CRLs but does not
+ * use them automatically when verifying certificates, so we do it
+ * explicitly here. We will check the CRL for the currently checked
+ * certificate, if there is such a CRL in the store.
+ *
+ * We come through this procedure for each certificate in the certificate
+ * chain, starting with the root-CA's certificate. At each step we've to
+ * both verify the signature on the CRL (to make sure it's a valid CRL)
+ * and it's revocation list (to make sure the current certificate isn't
+ * revoked). But because to check the signature on the CRL we need the
+ * public key of the issuing CA certificate (which was already processed
+ * one round before), we've a little problem. But we can both solve it and
+ * at the same time optimize the processing by using the following
+ * verification scheme (idea and code snippets borrowed from the GLOBUS
+ * project):
+ *
+ * 1. We'll check the signature of a CRL in each step when we find a CRL
+ * through the _subject_ name of the current certificate. This CRL
+ * itself will be needed the first time in the next round, of course.
+ * But we do the signature processing one round before this where the
+ * public key of the CA is available.
+ *
+ * 2. We'll check the revocation list of a CRL in each step when
+ * we find a CRL through the _issuer_ name of the current certificate.
+ * This CRLs signature was then already verified one round before.
+ *
+ * This verification scheme allows a CA to revoke its own certificate as
+ * well, of course.
+ */
+
+ /*
+ * Try to retrieve a CRL corresponding to the _subject_ of
+ * the current certificate in order to verify it's integrity.
+ */
+ memset((char *)&obj, 0, sizeof(obj));
+ rc = SSL_X509_STORE_lookup(mctx->crl,
+ X509_LU_CRL, subject, &obj);
+ crl = obj.data.crl;
+
+ if ((rc > 0) && crl) {
+ /*
+ * Log information about CRL
+ * (A little bit complicated because of ASN.1 and BIOs...)
+ */
+ if (s->loglevel >= APLOG_DEBUG) {
+ char buff[512]; /* should be plenty */
+ BIO *bio = BIO_new(BIO_s_mem());
+
+ BIO_printf(bio, "CA CRL: Issuer: ");
+ X509_NAME_print(bio, issuer, 0);
+
+ BIO_printf(bio, ", lastUpdate: ");
+ ASN1_UTCTIME_print(bio, X509_CRL_get_lastUpdate(crl));
+
+ BIO_printf(bio, ", nextUpdate: ");
+ ASN1_UTCTIME_print(bio, X509_CRL_get_nextUpdate(crl));
+
+ n = BIO_read(bio, buff, sizeof(buff) - 1);
+ buff[n] = '\0';
+
+ BIO_free(bio);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, buff);
+ }
+
+ /*
+ * Verify the signature on this CRL
+ */
+ pubkey = X509_get_pubkey(cert);
+ rc = X509_CRL_verify(crl, pubkey);
+#ifdef OPENSSL_VERSION_NUMBER
+ /* Only refcounted in OpenSSL */
+ if (pubkey)
+ EVP_PKEY_free(pubkey);
+#endif
+ if (rc <= 0) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "Invalid signature on CRL");
+
+ X509_STORE_CTX_set_error(ctx, X509_V_ERR_CRL_SIGNATURE_FAILURE);
+ X509_OBJECT_free_contents(&obj);
+ return FALSE;
+ }
+
+ /*
+ * Check date of CRL to make sure it's not expired
+ */
+ i = X509_cmp_current_time(X509_CRL_get_nextUpdate(crl));
+
+ if (i == 0) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "Found CRL has invalid nextUpdate field");
+
+ X509_STORE_CTX_set_error(ctx,
+ X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD);
+ X509_OBJECT_free_contents(&obj);
+
+ return FALSE;
+ }
+
+ if (i < 0) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "Found CRL is expired - "
+ "revoking all certificates until you get updated CRL");
+
+ X509_STORE_CTX_set_error(ctx, X509_V_ERR_CRL_HAS_EXPIRED);
+ X509_OBJECT_free_contents(&obj);
+
+ return FALSE;
+ }
+
+ X509_OBJECT_free_contents(&obj);
+ }
+
+ /*
+ * Try to retrieve a CRL corresponding to the _issuer_ of
+ * the current certificate in order to check for revocation.
+ */
+ memset((char *)&obj, 0, sizeof(obj));
+ rc = SSL_X509_STORE_lookup(mctx->crl,
+ X509_LU_CRL, issuer, &obj);
+
+ crl = obj.data.crl;
+ if ((rc > 0) && crl) {
+ /*
+ * Check if the current certificate is revoked by this CRL
+ */
+ n = sk_X509_REVOKED_num(X509_CRL_get_REVOKED(crl));
+
+ for (i = 0; i < n; i++) {
+ X509_REVOKED *revoked =
+ sk_X509_REVOKED_value(X509_CRL_get_REVOKED(crl), i);
+
+ ASN1_INTEGER *sn = X509_REVOKED_get_serialNumber(revoked);
+
+ if (!ASN1_INTEGER_cmp(sn, X509_get_serialNumber(cert))) {
+ if (s->loglevel >= APLOG_DEBUG) {
+ char *cp = X509_NAME_oneline(issuer, NULL, 0);
+ long serial = ASN1_INTEGER_get(sn);
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Certificate with serial %ld (0x%lX) "
+ "revoked per CRL from issuer %s",
+ serial, serial, cp);
+ modssl_free(cp);
+ }
+
+ X509_STORE_CTX_set_error(ctx, X509_V_ERR_CERT_REVOKED);
+ X509_OBJECT_free_contents(&obj);
+
+ return FALSE;
+ }
+ }
+
+ X509_OBJECT_free_contents(&obj);
+ }
+
+ return ok;
+}
+
+#define SSLPROXY_CERT_CB_LOG_FMT \
+ "Proxy client certificate callback: (%s) "
+
+static void modssl_proxy_info_log(server_rec *s,
+ X509_INFO *info,
+ const char *msg)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(s);
+ char name_buf[256];
+ X509_NAME *name;
+ char *dn;
+
+ if (s->loglevel < APLOG_DEBUG) {
+ return;
+ }
+
+ name = X509_get_subject_name(info->x509);
+ dn = X509_NAME_oneline(name, name_buf, sizeof(name_buf));
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ SSLPROXY_CERT_CB_LOG_FMT "%s, sending %s",
+ sc->vhost_id, msg, dn ? dn : "-uknown-");
+}
+
+/*
+ * caller will decrement the cert and key reference
+ * so we need to increment here to prevent them from
+ * being freed.
+ */
+#define modssl_set_cert_info(info, cert, pkey) \
+ *cert = info->x509; \
+ X509_reference_inc(*cert); \
+ *pkey = info->x_pkey->dec_pkey; \
+ EVP_PKEY_reference_inc(*pkey)
+
+int ssl_callback_proxy_cert(SSL *ssl, MODSSL_CLIENT_CERT_CB_ARG_TYPE **x509, EVP_PKEY **pkey)
+{
+ conn_rec *c = (conn_rec *)SSL_get_app_data(ssl);
+ server_rec *s = c->base_server;
+ SSLSrvConfigRec *sc = mySrvConfig(s);
+ X509_NAME *ca_name, *issuer;
+ X509_INFO *info;
+ STACK_OF(X509_NAME) *ca_list;
+ STACK_OF(X509_INFO) *certs = sc->proxy->pkp->certs;
+ int i, j;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ SSLPROXY_CERT_CB_LOG_FMT "entered",
+ sc->vhost_id);
+
+ if (!certs || (sk_X509_INFO_num(certs) <= 0)) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ SSLPROXY_CERT_CB_LOG_FMT
+ "downstream server wanted client certificate "
+ "but none are configured", sc->vhost_id);
+ return FALSE;
+ }
+
+ ca_list = SSL_get_client_CA_list(ssl);
+
+ if (!ca_list || (sk_X509_NAME_num(ca_list) <= 0)) {
+ /*
+ * downstream server didn't send us a list of acceptable CA certs,
+ * so we send the first client cert in the list.
+ */
+ info = sk_X509_INFO_value(certs, 0);
+
+ modssl_proxy_info_log(s, info, "no acceptable CA list");
+
+ modssl_set_cert_info(info, x509, pkey);
+
+ return TRUE;
+ }
+
+ for (i = 0; i < sk_X509_NAME_num(ca_list); i++) {
+ ca_name = sk_X509_NAME_value(ca_list, i);
+
+ for (j = 0; j < sk_X509_INFO_num(certs); j++) {
+ info = sk_X509_INFO_value(certs, j);
+ issuer = X509_get_issuer_name(info->x509);
+
+ if (X509_NAME_cmp(issuer, ca_name) == 0) {
+ modssl_proxy_info_log(s, info, "found acceptable cert");
+
+ modssl_set_cert_info(info, x509, pkey);
+
+ return TRUE;
+ }
+ }
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ SSLPROXY_CERT_CB_LOG_FMT
+ "no client certificate found!?", sc->vhost_id);
+
+ return FALSE;
+}
+
+static void ssl_session_log(server_rec *s,
+ const char *request,
+ unsigned char *id,
+ unsigned int idlen,
+ const char *status,
+ const char *result,
+ long timeout)
+{
+ char buf[SSL_SESSION_ID_STRING_LEN];
+ char timeout_str[56] = {'\0'};
+
+ if (s->loglevel < APLOG_DEBUG) {
+ return;
+ }
+
+ if (timeout) {
+ apr_snprintf(timeout_str, sizeof(timeout_str),
+ "timeout=%lds ", (timeout - time(NULL)));
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Inter-Process Session Cache: "
+ "request=%s status=%s id=%s %s(session %s)",
+ request, status,
+ SSL_SESSION_id2sz(id, idlen, buf, sizeof(buf)),
+ timeout_str, result);
+}
+
+/*
+ * This callback function is executed by OpenSSL whenever a new SSL_SESSION is
+ * added to the internal OpenSSL session cache. We use this hook to spread the
+ * SSL_SESSION also to the inter-process disk-cache to make share it with our
+ * other Apache pre-forked server processes.
+ */
+int ssl_callback_NewSessionCacheEntry(SSL *ssl, SSL_SESSION *session)
+{
+ /* Get Apache context back through OpenSSL context */
+ conn_rec *conn = (conn_rec *)SSL_get_app_data(ssl);
+ server_rec *s = conn->base_server;
+ SSLSrvConfigRec *sc = mySrvConfig(s);
+ long timeout = sc->session_cache_timeout;
+ BOOL rc;
+ unsigned char *id;
+ unsigned int idlen;
+
+ /*
+ * Set the timeout also for the internal OpenSSL cache, because this way
+ * our inter-process cache is consulted only when it's really necessary.
+ */
+ SSL_set_timeout(session, timeout);
+
+ /*
+ * Store the SSL_SESSION in the inter-process cache with the
+ * same expire time, so it expires automatically there, too.
+ */
+ id = SSL_SESSION_get_session_id(session);
+ idlen = SSL_SESSION_get_session_id_length(session);
+
+ timeout += modssl_session_get_time(session);
+
+ rc = ssl_scache_store(s, id, idlen, timeout, session);
+
+ ssl_session_log(s, "SET", id, idlen,
+ rc == TRUE ? "OK" : "BAD",
+ "caching", timeout);
+
+ /*
+ * return 0 which means to OpenSSL that the session is still
+ * valid and was not freed by us with SSL_SESSION_free().
+ */
+ return 0;
+}
+
+/*
+ * This callback function is executed by OpenSSL whenever a
+ * SSL_SESSION is looked up in the internal OpenSSL cache and it
+ * was not found. We use this to lookup the SSL_SESSION in the
+ * inter-process disk-cache where it was perhaps stored by one
+ * of our other Apache pre-forked server processes.
+ */
+SSL_SESSION *ssl_callback_GetSessionCacheEntry(SSL *ssl,
+ unsigned char *id,
+ int idlen, int *do_copy)
+{
+ /* Get Apache context back through OpenSSL context */
+ conn_rec *conn = (conn_rec *)SSL_get_app_data(ssl);
+ server_rec *s = conn->base_server;
+ SSL_SESSION *session;
+
+ /*
+ * Try to retrieve the SSL_SESSION from the inter-process cache
+ */
+ session = ssl_scache_retrieve(s, id, idlen);
+
+ ssl_session_log(s, "GET", id, idlen,
+ session ? "FOUND" : "MISSED",
+ session ? "reuse" : "renewal", 0);
+
+ /*
+ * Return NULL or the retrieved SSL_SESSION. But indicate (by
+ * setting do_copy to 0) that the reference count on the
+ * SSL_SESSION should not be incremented by the SSL library,
+ * because we will no longer hold a reference to it ourself.
+ */
+ *do_copy = 0;
+
+ return session;
+}
+
+/*
+ * This callback function is executed by OpenSSL whenever a
+ * SSL_SESSION is removed from the the internal OpenSSL cache.
+ * We use this to remove the SSL_SESSION in the inter-process
+ * disk-cache, too.
+ */
+void ssl_callback_DelSessionCacheEntry(SSL_CTX *ctx,
+ SSL_SESSION *session)
+{
+ server_rec *s;
+ SSLSrvConfigRec *sc;
+ unsigned char *id;
+ unsigned int idlen;
+
+ /*
+ * Get Apache context back through OpenSSL context
+ */
+ if (!(s = (server_rec *)SSL_CTX_get_app_data(ctx))) {
+ return; /* on server shutdown Apache is already gone */
+ }
+
+ sc = mySrvConfig(s);
+
+ /*
+ * Remove the SSL_SESSION from the inter-process cache
+ */
+ id = SSL_SESSION_get_session_id(session);
+ idlen = SSL_SESSION_get_session_id_length(session);
+
+ ssl_scache_remove(s, id, idlen);
+
+ ssl_session_log(s, "REM", id, idlen,
+ "OK", "dead", 0);
+
+ return;
+}
+
+/* Dump debugginfo trace to the log file. */
+static void log_tracing_state(MODSSL_INFO_CB_ARG_TYPE ssl, conn_rec *c,
+ server_rec *s, int where, int rc)
+{
+ /*
+ * create the various trace messages
+ */
+ if (where & SSL_CB_HANDSHAKE_START) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "%s: Handshake: start", SSL_LIBRARY_NAME);
+ }
+ else if (where & SSL_CB_HANDSHAKE_DONE) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "%s: Handshake: done", SSL_LIBRARY_NAME);
+ }
+ else if (where & SSL_CB_LOOP) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "%s: Loop: %s",
+ SSL_LIBRARY_NAME, SSL_state_string_long(ssl));
+ }
+ else if (where & SSL_CB_READ) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "%s: Read: %s",
+ SSL_LIBRARY_NAME, SSL_state_string_long(ssl));
+ }
+ else if (where & SSL_CB_WRITE) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "%s: Write: %s",
+ SSL_LIBRARY_NAME, SSL_state_string_long(ssl));
+ }
+ else if (where & SSL_CB_ALERT) {
+ char *str = (where & SSL_CB_READ) ? "read" : "write";
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "%s: Alert: %s:%s:%s",
+ SSL_LIBRARY_NAME, str,
+ SSL_alert_type_string_long(rc),
+ SSL_alert_desc_string_long(rc));
+ }
+ else if (where & SSL_CB_EXIT) {
+ if (rc == 0) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "%s: Exit: failed in %s",
+ SSL_LIBRARY_NAME, SSL_state_string_long(ssl));
+ }
+ else if (rc < 0) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "%s: Exit: error in %s",
+ SSL_LIBRARY_NAME, SSL_state_string_long(ssl));
+ }
+ }
+
+ /*
+ * Because SSL renegotations can happen at any time (not only after
+ * SSL_accept()), the best way to log the current connection details is
+ * right after a finished handshake.
+ */
+ if (where & SSL_CB_HANDSHAKE_DONE) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Connection: Client IP: %s, Protocol: %s, "
+ "Cipher: %s (%s/%s bits)",
+ ssl_var_lookup(NULL, s, c, NULL, "REMOTE_ADDR"),
+ ssl_var_lookup(NULL, s, c, NULL, "SSL_PROTOCOL"),
+ ssl_var_lookup(NULL, s, c, NULL, "SSL_CIPHER"),
+ ssl_var_lookup(NULL, s, c, NULL, "SSL_CIPHER_USEKEYSIZE"),
+ ssl_var_lookup(NULL, s, c, NULL, "SSL_CIPHER_ALGKEYSIZE"));
+ }
+}
+
+/*
+ * This callback function is executed while OpenSSL processes the SSL
+ * handshake and does SSL record layer stuff. It's used to trap
+ * client-initiated renegotiations, and for dumping everything to the
+ * log.
+ */
+void ssl_callback_Info(MODSSL_INFO_CB_ARG_TYPE ssl, int where, int rc)
+{
+ conn_rec *c;
+ server_rec *s;
+ SSLConnRec *scr;
+
+ /* Retrieve the conn_rec and the associated SSLConnRec. */
+ if ((c = (conn_rec *)SSL_get_app_data((SSL *)ssl)) == NULL) {
+ return;
+ }
+
+ if ((scr = myConnConfig(c)) == NULL) {
+ return;
+ }
+
+ /* If the reneg state is to reject renegotiations, check the SSL
+ * state machine and move to ABORT if a Client Hello is being
+ * read. */
+ if ((where & SSL_CB_ACCEPT_LOOP) && scr->reneg_state == RENEG_REJECT) {
+ int state = SSL_get_state((SSL *)ssl);
+
+ if (state == SSL3_ST_SR_CLNT_HELLO_A
+ || state == SSL23_ST_SR_CLNT_HELLO_A) {
+ scr->reneg_state = RENEG_ABORT;
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c,
+ "rejecting client initiated renegotiation");
+ }
+ }
+ /* If the first handshake is complete, change state to reject any
+ * subsequent client-initated renegotiation. */
+ else if ((where & SSL_CB_HANDSHAKE_DONE) && scr->reneg_state == RENEG_INIT) {
+ scr->reneg_state = RENEG_REJECT;
+ }
+
+ s = c->base_server;
+ if (s && s->loglevel >= APLOG_DEBUG) {
+ log_tracing_state(ssl, c, s, where, rc);
+ }
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_log.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_log.c
new file mode 100644
index 00000000..5ca1b6c0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_log.c
@@ -0,0 +1,101 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_engine_log.c
+ * Logging Facility
+ */
+ /* ``The difference between a computer
+ industry job and open-source software
+ hacking is about 30 hours a week.''
+ -- Ralf S. Engelschall */
+#include "mod_ssl.h"
+
+/* _________________________________________________________________
+**
+** Logfile Support
+** _________________________________________________________________
+*/
+
+static const struct {
+ const char *cpPattern;
+ const char *cpAnnotation;
+} ssl_log_annotate[] = {
+ { "*envelope*bad*decrypt*", "wrong pass phrase!?" },
+ { "*CLIENT_HELLO*unknown*protocol*", "speaking not SSL to HTTPS port!?" },
+ { "*CLIENT_HELLO*http*request*", "speaking HTTP to HTTPS port!?" },
+ { "*SSL3_READ_BYTES:sslv3*alert*bad*certificate*", "Subject CN in certificate not server name or identical to CA!?" },
+ { "*self signed certificate in certificate chain*", "Client certificate signed by CA not known to server?" },
+ { "*peer did not return a certificate*", "No CAs known to server for verification?" },
+ { "*no shared cipher*", "Too restrictive SSLCipherSuite or using DSA server certificate?" },
+ { "*no start line*", "Bad file contents or format - or even just a forgotten SSLCertificateKeyFile?" },
+ { "*bad password read*", "You entered an incorrect pass phrase!?" },
+ { "*bad mac decode*", "Browser still remembered details of a re-created server certificate?" },
+ { NULL, NULL }
+};
+
+static const char *ssl_log_annotation(const char *error)
+{
+ int i = 0;
+
+ while (ssl_log_annotate[i].cpPattern != NULL
+ && ap_strcmp_match(error, ssl_log_annotate[i].cpPattern) != 0)
+ i++;
+
+ return ssl_log_annotate[i].cpAnnotation;
+}
+
+void ssl_die(void)
+{
+ /*
+ * This is used for fatal errors and here
+ * it is common module practice to really
+ * exit from the complete program.
+ */
+ exit(1);
+}
+
+/*
+ * Prints the SSL library error information.
+ */
+void ssl_log_ssl_error(const char *file, int line, int level, server_rec *s)
+{
+ unsigned long e;
+
+ while ((e = ERR_get_error())) {
+ const char *annotation;
+ char err[256];
+
+ ERR_error_string_n(e, err, sizeof err);
+ annotation = ssl_log_annotation(err);
+
+ if (annotation) {
+ ap_log_error(file, line, level, 0, s,
+ "SSL Library Error: %lu %s %s",
+ e, err, annotation);
+ }
+ else {
+ ap_log_error(file, line, level, 0, s,
+ "SSL Library Error: %lu %s",
+ e, err);
+ }
+ }
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_mutex.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_mutex.c
new file mode 100644
index 00000000..1e65f4fe
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_mutex.c
@@ -0,0 +1,120 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_engine_mutex.c
+ * Semaphore for Mutual Exclusion
+ */
+ /* ``Real programmers confuse
+ Christmas and Halloween
+ because DEC 25 = OCT 31.''
+ -- Unknown */
+
+#include "mod_ssl.h"
+#if !defined(OS2) && !defined(WIN32) && !defined(BEOS) && !defined(NETWARE)
+#include "unixd.h"
+#define MOD_SSL_SET_MUTEX_PERMS /* XXX Apache should define something */
+#endif
+
+int ssl_mutex_init(server_rec *s, apr_pool_t *p)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ apr_status_t rv;
+
+ if (mc->nMutexMode == SSL_MUTEXMODE_NONE)
+ return TRUE;
+
+ if ((rv = apr_global_mutex_create(&mc->pMutex, mc->szMutexFile,
+ mc->nMutexMech, p)) != APR_SUCCESS) {
+ if (mc->szMutexFile)
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Cannot create SSLMutex with file `%s'",
+ mc->szMutexFile);
+ else
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Cannot create SSLMutex");
+ return FALSE;
+ }
+
+#ifdef MOD_SSL_SET_MUTEX_PERMS
+ rv = unixd_set_global_mutex_perms(mc->pMutex);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Could not set permissions on ssl_mutex; check User "
+ "and Group directives");
+ return FALSE;
+ }
+#endif
+ return TRUE;
+}
+
+int ssl_mutex_reinit(server_rec *s, apr_pool_t *p)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ apr_status_t rv;
+
+ if (mc->nMutexMode == SSL_MUTEXMODE_NONE)
+ return TRUE;
+
+ if ((rv = apr_global_mutex_child_init(&mc->pMutex,
+ mc->szMutexFile, p)) != APR_SUCCESS) {
+ if (mc->szMutexFile)
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Cannot reinit SSLMutex with file `%s'",
+ mc->szMutexFile);
+ else
+ ap_log_error(APLOG_MARK, APLOG_WARNING, rv, s,
+ "Cannot reinit SSLMutex");
+ return FALSE;
+ }
+ return TRUE;
+}
+
+int ssl_mutex_on(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ apr_status_t rv;
+
+ if (mc->nMutexMode == SSL_MUTEXMODE_NONE)
+ return TRUE;
+ if ((rv = apr_global_mutex_lock(mc->pMutex)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, rv, s,
+ "Failed to acquire global mutex lock");
+ return FALSE;
+ }
+ return TRUE;
+}
+
+int ssl_mutex_off(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ apr_status_t rv;
+
+ if (mc->nMutexMode == SSL_MUTEXMODE_NONE)
+ return TRUE;
+ if ((rv = apr_global_mutex_unlock(mc->pMutex)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, rv, s,
+ "Failed to release global mutex lock");
+ return FALSE;
+ }
+ return TRUE;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_pphrase.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_pphrase.c
new file mode 100644
index 00000000..1ca3f32f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_pphrase.c
@@ -0,0 +1,789 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_engine_pphrase.c
+ * Pass Phrase Dialog
+ */
+ /* ``Treat your password like your
+ toothbrush. Don't let anybody
+ else use it, and get a new one
+ every six months.''
+ -- Clifford Stoll */
+#include "mod_ssl.h"
+
+/*
+ * Return true if the named file exists and is readable
+ */
+
+static apr_status_t exists_and_readable(char *fname, apr_pool_t *pool, apr_time_t *mtime)
+{
+ apr_status_t stat;
+ apr_finfo_t sbuf;
+ apr_file_t *fd;
+
+ if ((stat = apr_stat(&sbuf, fname, APR_FINFO_MIN, pool)) != APR_SUCCESS)
+ return stat;
+
+ if (sbuf.filetype != APR_REG)
+ return APR_EGENERAL;
+
+ if ((stat = apr_file_open(&fd, fname, APR_READ, 0, pool)) != APR_SUCCESS)
+ return stat;
+
+ if (mtime) {
+ *mtime = sbuf.mtime;
+ }
+
+ apr_file_close(fd);
+ return APR_SUCCESS;
+}
+
+/*
+ * reuse vhost keys for asn1 tables where keys are allocated out
+ * of s->process->pool to prevent "leaking" each time we format
+ * a vhost key. since the key is stored in a table with lifetime
+ * of s->process->pool, the key needs to have the same lifetime.
+ *
+ * XXX: probably seems silly to use a hash table with keys and values
+ * being the same, but it is easier than doing a linear search
+ * and will make it easier to remove keys if needed in the future.
+ * also have the problem with apr_array_header_t that if we
+ * underestimate the number of vhost keys when we apr_array_make(),
+ * the array will get resized when we push past the initial number
+ * of elts. this resizing in the s->process->pool means "leaking"
+ * since apr_array_push() will apr_alloc arr->nalloc * 2 elts,
+ * leaving the original arr->elts to waste.
+ */
+static char *asn1_table_vhost_key(SSLModConfigRec *mc, apr_pool_t *p,
+ char *id, char *an)
+{
+ /* 'p' pool used here is cleared on restarts (or sooner) */
+ char *key = apr_psprintf(p, "%s:%s", id, an);
+ void *keyptr = apr_hash_get(mc->tVHostKeys, key,
+ APR_HASH_KEY_STRING);
+
+ if (!keyptr) {
+ /* make a copy out of s->process->pool */
+ keyptr = apr_pstrdup(mc->pPool, key);
+ apr_hash_set(mc->tVHostKeys, keyptr,
+ APR_HASH_KEY_STRING, keyptr);
+ }
+
+ return (char *)keyptr;
+}
+
+/* _________________________________________________________________
+**
+** Pass Phrase and Private Key Handling
+** _________________________________________________________________
+*/
+
+#define BUILTIN_DIALOG_BACKOFF 2
+#define BUILTIN_DIALOG_RETRIES 5
+
+static apr_file_t *writetty = NULL;
+static apr_file_t *readtty = NULL;
+
+/*
+ * sslc has a nasty flaw where its
+ * PEM_read_bio_PrivateKey does not take a callback arg.
+ */
+static server_rec *ssl_pphrase_server_rec = NULL;
+
+#ifdef SSLC_VERSION_NUMBER
+int ssl_pphrase_Handle_CB(char *, int, int);
+#else
+int ssl_pphrase_Handle_CB(char *, int, int, void *);
+#endif
+
+static char *pphrase_array_get(apr_array_header_t *arr, int idx)
+{
+ if ((idx < 0) || (idx >= arr->nelts)) {
+ return NULL;
+ }
+
+ return ((char **)arr->elts)[idx];
+}
+
+static void pphrase_array_clear(apr_array_header_t *arr)
+{
+ if (arr->nelts > 0) {
+ memset(arr->elts, 0, arr->elt_size * arr->nelts);
+ }
+ arr->nelts = 0;
+}
+
+void ssl_pphrase_Handle(server_rec *s, apr_pool_t *p)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ SSLSrvConfigRec *sc;
+ server_rec *pServ;
+ char *cpVHostID;
+ char szPath[MAX_STRING_LEN];
+ EVP_PKEY *pPrivateKey;
+ ssl_asn1_t *asn1;
+ unsigned char *ucp;
+ long int length;
+ X509 *pX509Cert;
+ BOOL bReadable;
+ apr_array_header_t *aPassPhrase;
+ int nPassPhrase;
+ int nPassPhraseCur;
+ char *cpPassPhraseCur;
+ int nPassPhraseRetry;
+ int nPassPhraseDialog;
+ int nPassPhraseDialogCur;
+ BOOL bPassPhraseDialogOnce;
+ char **cpp;
+ int i, j;
+ ssl_algo_t algoCert, algoKey, at;
+ char *an;
+ char *cp;
+ apr_time_t pkey_mtime = 0;
+ int isterm = 1;
+ apr_status_t rv;
+ /*
+ * Start with a fresh pass phrase array
+ */
+ aPassPhrase = apr_array_make(p, 2, sizeof(char *));
+ nPassPhrase = 0;
+ nPassPhraseDialog = 0;
+
+ /*
+ * Walk through all configured servers
+ */
+ for (pServ = s; pServ != NULL; pServ = pServ->next) {
+ sc = mySrvConfig(pServ);
+
+ if (!sc->enabled)
+ continue;
+
+ cpVHostID = ssl_util_vhostid(p, pServ);
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, pServ,
+ "Loading certificate & private key of SSL-aware server");
+
+ /*
+ * Read in server certificate(s): This is the easy part
+ * because this file isn't encrypted in any way.
+ */
+ if (sc->server->pks->cert_files[0] == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, pServ,
+ "Server should be SSL-aware but has no certificate "
+ "configured [Hint: SSLCertificateFile]");
+ ssl_die();
+ }
+ algoCert = SSL_ALGO_UNKNOWN;
+ algoKey = SSL_ALGO_UNKNOWN;
+ for (i = 0, j = 0; i < SSL_AIDX_MAX && sc->server->pks->cert_files[i] != NULL; i++) {
+
+ apr_cpystrn(szPath, sc->server->pks->cert_files[i], sizeof(szPath));
+ if ((rv = exists_and_readable(szPath, p, NULL)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Init: Can't open server certificate file %s",
+ szPath);
+ ssl_die();
+ }
+ if ((pX509Cert = SSL_read_X509(szPath, NULL, NULL)) == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Init: Unable to read server certificate from file %s", szPath);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+
+ /*
+ * check algorithm type of certificate and make
+ * sure only one certificate per type is used.
+ */
+ at = ssl_util_algotypeof(pX509Cert, NULL);
+ an = ssl_util_algotypestr(at);
+ if (algoCert & at) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Init: Multiple %s server certificates not "
+ "allowed", an);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+ algoCert |= at;
+
+ /*
+ * Insert the certificate into global module configuration to let it
+ * survive the processing between the 1st Apache API init round (where
+ * we operate here) and the 2nd Apache init round (where the
+ * certificate is actually used to configure mod_ssl's per-server
+ * configuration structures).
+ */
+ cp = asn1_table_vhost_key(mc, p, cpVHostID, an);
+ length = i2d_X509(pX509Cert, NULL);
+ ucp = ssl_asn1_table_set(mc->tPublicCert, cp, length);
+ (void)i2d_X509(pX509Cert, &ucp); /* 2nd arg increments */
+
+ /*
+ * Free the X509 structure
+ */
+ X509_free(pX509Cert);
+
+ /*
+ * Read in the private key: This is the non-trivial part, because the
+ * key is typically encrypted, so a pass phrase dialog has to be used
+ * to request it from the user (or it has to be alternatively gathered
+ * from a dialog program). The important point here is that ISPs
+ * usually have hundrets of virtual servers configured and a lot of
+ * them use SSL, so really we have to minimize the pass phrase
+ * dialogs.
+ *
+ * The idea is this: When N virtual hosts are configured and all of
+ * them use encrypted private keys with different pass phrases, we
+ * have no chance and have to pop up N pass phrase dialogs. But
+ * usually the admin is clever enough and uses the same pass phrase
+ * for more private key files (typically he even uses one single pass
+ * phrase for all). When this is the case we can minimize the dialogs
+ * by trying to re-use already known/entered pass phrases.
+ */
+ if (sc->server->pks->key_files[j] != NULL)
+ apr_cpystrn(szPath, sc->server->pks->key_files[j++], sizeof(szPath));
+
+ /*
+ * Try to read the private key file with the help of
+ * the callback function which serves the pass
+ * phrases to OpenSSL
+ */
+ myCtxVarSet(mc, 1, pServ);
+ myCtxVarSet(mc, 2, p);
+ myCtxVarSet(mc, 3, aPassPhrase);
+ myCtxVarSet(mc, 4, &nPassPhraseCur);
+ myCtxVarSet(mc, 5, &cpPassPhraseCur);
+ myCtxVarSet(mc, 6, cpVHostID);
+ myCtxVarSet(mc, 7, an);
+ myCtxVarSet(mc, 8, &nPassPhraseDialog);
+ myCtxVarSet(mc, 9, &nPassPhraseDialogCur);
+ myCtxVarSet(mc, 10, &bPassPhraseDialogOnce);
+
+ nPassPhraseCur = 0;
+ nPassPhraseRetry = 0;
+ nPassPhraseDialogCur = 0;
+ bPassPhraseDialogOnce = TRUE;
+
+ pPrivateKey = NULL;
+
+ for (;;) {
+ /*
+ * Try to read the private key file with the help of
+ * the callback function which serves the pass
+ * phrases to OpenSSL
+ */
+ if ((rv = exists_and_readable(szPath, p,
+ &pkey_mtime)) != APR_SUCCESS ) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Init: Can't open server private key file "
+ "%s",szPath);
+ ssl_die();
+ }
+
+ /*
+ * if the private key is encrypted and SSLPassPhraseDialog
+ * is configured to "builtin" it isn't possible to prompt for
+ * a password after httpd has detached from the tty.
+ * in this case if we already have a private key and the
+ * file name/mtime hasn't changed, then reuse the existing key.
+ * we also reuse existing private keys that were encrypted for
+ * exec: and pipe: dialogs to minimize chances to snoop the
+ * password. that and pipe: dialogs might prompt the user
+ * for password, which on win32 for example could happen 4
+ * times at startup. twice for each child and twice within
+ * each since apache "restarts itself" on startup.
+ * of course this will not work for the builtin dialog if
+ * the server was started without LoadModule ssl_module
+ * configured, then restarted with it configured.
+ * but we fall through with a chance of success if the key
+ * is not encrypted or can be handled via exec or pipe dialog.
+ * and in the case of fallthrough, pkey_mtime and isatty()
+ * are used to give a better idea as to what failed.
+ */
+ if (pkey_mtime) {
+ int i;
+
+ for (i=0; i < SSL_AIDX_MAX; i++) {
+ const char *key_id =
+ ssl_asn1_table_keyfmt(p, cpVHostID, i);
+ ssl_asn1_t *asn1 =
+ ssl_asn1_table_get(mc->tPrivateKey, key_id);
+
+ if (asn1 && (asn1->source_mtime == pkey_mtime)) {
+ ap_log_error(APLOG_MARK, APLOG_INFO,
+ 0, pServ,
+ "%s reusing existing "
+ "%s private key on restart",
+ cpVHostID, ssl_asn1_keystr(i));
+ return;
+ }
+ }
+ }
+
+ cpPassPhraseCur = NULL;
+ ssl_pphrase_server_rec = s; /* to make up for sslc flaw */
+
+ /* Ensure that the error stack is empty; some SSL
+ * functions will fail spuriously if the error stack
+ * is not empty. */
+ ERR_clear_error();
+
+ bReadable = ((pPrivateKey = SSL_read_PrivateKey(szPath, NULL,
+ ssl_pphrase_Handle_CB, s)) != NULL ? TRUE : FALSE);
+
+ /*
+ * when the private key file now was readable,
+ * it's fine and we go out of the loop
+ */
+ if (bReadable)
+ break;
+
+ /*
+ * when we have more remembered pass phrases
+ * try to reuse these first.
+ */
+ if (nPassPhraseCur < nPassPhrase) {
+ nPassPhraseCur++;
+ continue;
+ }
+
+ /*
+ * else it's not readable and we have no more
+ * remembered pass phrases. Then this has to mean
+ * that the callback function popped up the dialog
+ * but a wrong pass phrase was entered. We give the
+ * user (but not the dialog program) a few more
+ * chances...
+ */
+#ifndef WIN32
+ if ((sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN
+ || sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE)
+#else
+ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE
+#endif
+ && cpPassPhraseCur != NULL
+ && nPassPhraseRetry < BUILTIN_DIALOG_RETRIES ) {
+ apr_file_printf(writetty, "Apache:mod_ssl:Error: Pass phrase incorrect "
+ "(%d more retr%s permitted).\n",
+ (BUILTIN_DIALOG_RETRIES-nPassPhraseRetry),
+ (BUILTIN_DIALOG_RETRIES-nPassPhraseRetry) == 1 ? "y" : "ies");
+ nPassPhraseRetry++;
+ if (nPassPhraseRetry > BUILTIN_DIALOG_BACKOFF)
+ apr_sleep((nPassPhraseRetry-BUILTIN_DIALOG_BACKOFF)
+ * 5 * APR_USEC_PER_SEC);
+ continue;
+ }
+#ifdef WIN32
+ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Init: SSLPassPhraseDialog builtin is not "
+ "supported on Win32 (key file "
+ "%s)", szPath);
+ ssl_die();
+ }
+#endif /* WIN32 */
+
+ /*
+ * Ok, anything else now means a fatal error.
+ */
+ if (cpPassPhraseCur == NULL) {
+ if (nPassPhraseDialogCur && pkey_mtime &&
+ !(isterm = isatty(fileno(stdout)))) /* XXX: apr_isatty() */
+ {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0,
+ pServ,
+ "Init: Unable to read pass phrase "
+ "[Hint: key introduced or changed "
+ "before restart?]");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, pServ);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0,
+ pServ, "Init: Private key not found");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, pServ);
+ }
+ if (writetty) {
+ apr_file_printf(writetty, "Apache:mod_ssl:Error: Private key not found.\n");
+ apr_file_printf(writetty, "**Stopped\n");
+ }
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0,
+ pServ, "Init: Pass phrase incorrect");
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, pServ);
+
+ if (writetty) {
+ apr_file_printf(writetty, "Apache:mod_ssl:Error: Pass phrase incorrect.\n");
+ apr_file_printf(writetty, "**Stopped\n");
+ }
+ }
+ ssl_die();
+ }
+
+ if (pPrivateKey == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Init: Unable to read server private key from "
+ "file %s [Hint: Perhaps it is in a separate file? "
+ " See SSLCertificateKeyFile]", szPath);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+
+ /*
+ * check algorithm type of private key and make
+ * sure only one private key per type is used.
+ */
+ at = ssl_util_algotypeof(NULL, pPrivateKey);
+ an = ssl_util_algotypestr(at);
+ if (algoKey & at) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Init: Multiple %s server private keys not "
+ "allowed", an);
+ ssl_log_ssl_error(APLOG_MARK, APLOG_ERR, s);
+ ssl_die();
+ }
+ algoKey |= at;
+
+ /*
+ * Log the type of reading
+ */
+ if (nPassPhraseDialogCur == 0) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, pServ,
+ "unencrypted %s private key - pass phrase not "
+ "required", an);
+ }
+ else {
+ if (cpPassPhraseCur != NULL) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
+ pServ,
+ "encrypted %s private key - pass phrase "
+ "requested", an);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
+ pServ,
+ "encrypted %s private key - pass phrase"
+ " reused", an);
+ }
+ }
+
+ /*
+ * Ok, when we have one more pass phrase store it
+ */
+ if (cpPassPhraseCur != NULL) {
+ cpp = (char **)apr_array_push(aPassPhrase);
+ *cpp = cpPassPhraseCur;
+ nPassPhrase++;
+ }
+
+ /*
+ * Insert private key into the global module configuration
+ * (we convert it to a stand-alone DER byte sequence
+ * because the SSL library uses static variables inside a
+ * RSA structure which do not survive DSO reloads!)
+ */
+ cp = asn1_table_vhost_key(mc, p, cpVHostID, an);
+ length = i2d_PrivateKey(pPrivateKey, NULL);
+ ucp = ssl_asn1_table_set(mc->tPrivateKey, cp, length);
+ (void)i2d_PrivateKey(pPrivateKey, &ucp); /* 2nd arg increments */
+
+ if (nPassPhraseDialogCur != 0) {
+ /* remember mtime of encrypted keys */
+ asn1 = ssl_asn1_table_get(mc->tPrivateKey, cp);
+ asn1->source_mtime = pkey_mtime;
+ }
+
+ /*
+ * Free the private key structure
+ */
+ EVP_PKEY_free(pPrivateKey);
+ }
+ }
+
+ /*
+ * Let the user know when we're successful.
+ */
+ if (nPassPhraseDialog > 0) {
+ sc = mySrvConfig(s);
+ if (writetty) {
+ apr_file_printf(writetty, "\n");
+ apr_file_printf(writetty, "Ok: Pass Phrase Dialog successful.\n");
+ }
+ }
+
+ /*
+ * Wipe out the used memory from the
+ * pass phrase array and then deallocate it
+ */
+ if (aPassPhrase->nelts) {
+ pphrase_array_clear(aPassPhrase);
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Init: Wiped out the queried pass phrases from memory");
+ }
+
+ /* Close the pipes if they were opened
+ */
+ if (readtty) {
+ apr_file_close(readtty);
+ apr_file_close(writetty);
+ readtty = writetty = NULL;
+ }
+ return;
+}
+
+static apr_status_t ssl_pipe_child_create(apr_pool_t *p, const char *progname)
+{
+ /* Child process code for 'ErrorLog "|..."';
+ * may want a common framework for this, since I expect it will
+ * be common for other foo-loggers to want this sort of thing...
+ */
+ apr_status_t rc;
+ apr_procattr_t *procattr;
+ apr_proc_t *procnew;
+
+ if (((rc = apr_procattr_create(&procattr, p)) == APR_SUCCESS) &&
+ ((rc = apr_procattr_io_set(procattr,
+ APR_FULL_BLOCK,
+ APR_FULL_BLOCK,
+ APR_NO_PIPE)) == APR_SUCCESS)) {
+ char **args;
+ const char *pname;
+
+ apr_tokenize_to_argv(progname, &args, p);
+ pname = apr_pstrdup(p, args[0]);
+ procnew = (apr_proc_t *)apr_pcalloc(p, sizeof(*procnew));
+ rc = apr_proc_create(procnew, pname, (const char * const *)args,
+ NULL, procattr, p);
+ if (rc == APR_SUCCESS) {
+ /* XXX: not sure if we aught to...
+ * apr_pool_note_subprocess(p, procnew, APR_KILL_AFTER_TIMEOUT);
+ */
+ writetty = procnew->in;
+ readtty = procnew->out;
+ }
+ }
+
+ return rc;
+}
+
+static int pipe_get_passwd_cb(char *buf, int length, char *prompt, int verify)
+{
+ apr_status_t rc;
+ char *p;
+
+ apr_file_puts(prompt, writetty);
+
+ buf[0]='\0';
+ rc = apr_file_gets(buf, length, readtty);
+ apr_file_puts(APR_EOL_STR, writetty);
+
+ if (rc != APR_SUCCESS || apr_file_eof(readtty)) {
+ memset(buf, 0, length);
+ return 1; /* failure */
+ }
+ if ((p = strchr(buf, '\n')) != NULL) {
+ *p = '\0';
+ }
+#ifdef WIN32
+ /* XXX: apr_sometest */
+ if ((p = strchr(buf, '\r')) != NULL) {
+ *p = '\0';
+ }
+#endif
+ return 0;
+}
+
+#ifdef SSLC_VERSION_NUMBER
+int ssl_pphrase_Handle_CB(char *buf, int bufsize, int verify)
+{
+ void *srv = ssl_pphrase_server_rec;
+#else
+int ssl_pphrase_Handle_CB(char *buf, int bufsize, int verify, void *srv)
+{
+#endif
+ SSLModConfigRec *mc;
+ server_rec *s;
+ apr_pool_t *p;
+ apr_array_header_t *aPassPhrase;
+ SSLSrvConfigRec *sc;
+ int *pnPassPhraseCur;
+ char **cppPassPhraseCur;
+ char *cpVHostID;
+ char *cpAlgoType;
+ int *pnPassPhraseDialog;
+ int *pnPassPhraseDialogCur;
+ BOOL *pbPassPhraseDialogOnce;
+ char *cpp;
+ int len = -1;
+
+ mc = myModConfig((server_rec *)srv);
+
+ /*
+ * Reconnect to the context of ssl_phrase_Handle()
+ */
+ s = myCtxVarGet(mc, 1, server_rec *);
+ p = myCtxVarGet(mc, 2, apr_pool_t *);
+ aPassPhrase = myCtxVarGet(mc, 3, apr_array_header_t *);
+ pnPassPhraseCur = myCtxVarGet(mc, 4, int *);
+ cppPassPhraseCur = myCtxVarGet(mc, 5, char **);
+ cpVHostID = myCtxVarGet(mc, 6, char *);
+ cpAlgoType = myCtxVarGet(mc, 7, char *);
+ pnPassPhraseDialog = myCtxVarGet(mc, 8, int *);
+ pnPassPhraseDialogCur = myCtxVarGet(mc, 9, int *);
+ pbPassPhraseDialogOnce = myCtxVarGet(mc, 10, BOOL *);
+ sc = mySrvConfig(s);
+
+ (*pnPassPhraseDialog)++;
+ (*pnPassPhraseDialogCur)++;
+
+ /*
+ * When remembered pass phrases are available use them...
+ */
+ if ((cpp = pphrase_array_get(aPassPhrase, *pnPassPhraseCur)) != NULL) {
+ apr_cpystrn(buf, cpp, bufsize);
+ len = strlen(buf);
+ return len;
+ }
+
+ /*
+ * Builtin or Pipe dialog
+ */
+ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN
+ || sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) {
+ char *prompt;
+ int i;
+
+ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) {
+ if (!readtty) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Init: Creating pass phrase dialog pipe child "
+ "'%s'", sc->server->pphrase_dialog_path);
+ if (ssl_pipe_child_create(p, sc->server->pphrase_dialog_path)
+ != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Init: Failed to create pass phrase pipe '%s'",
+ sc->server->pphrase_dialog_path);
+ PEMerr(PEM_F_DEF_CALLBACK,PEM_R_PROBLEMS_GETTING_PASSWORD);
+ memset(buf, 0, (unsigned int)bufsize);
+ return (-1);
+ }
+ }
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Init: Requesting pass phrase via piped dialog");
+ }
+ else { /* sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN */
+#ifdef WIN32
+ PEMerr(PEM_F_DEF_CALLBACK,PEM_R_PROBLEMS_GETTING_PASSWORD);
+ memset(buf, 0, (unsigned int)bufsize);
+ return (-1);
+#else
+ /*
+ * stderr has already been redirected to the error_log.
+ * rather than attempting to temporarily rehook it to the terminal,
+ * we print the prompt to stdout before EVP_read_pw_string turns
+ * off tty echo
+ */
+ apr_file_open_stdout(&writetty, p);
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Init: Requesting pass phrase via builtin terminal "
+ "dialog");
+#endif
+ }
+
+ /*
+ * The first time display a header to inform the user about what
+ * program he actually speaks to, which module is responsible for
+ * this terminal dialog and why to the hell he has to enter
+ * something...
+ */
+ if (*pnPassPhraseDialog == 1) {
+ apr_file_printf(writetty, "%s mod_ssl/%s (Pass Phrase Dialog)\n",
+ AP_SERVER_BASEVERSION, MOD_SSL_VERSION);
+ apr_file_printf(writetty, "Some of your private key files are encrypted for security reasons.\n");
+ apr_file_printf(writetty, "In order to read them you have to provide us with the pass phrases.\n");
+ }
+ if (*pbPassPhraseDialogOnce) {
+ *pbPassPhraseDialogOnce = FALSE;
+ apr_file_printf(writetty, "\n");
+ apr_file_printf(writetty, "Server %s (%s)\n", cpVHostID, cpAlgoType);
+ }
+
+ /*
+ * Emulate the OpenSSL internal pass phrase dialog
+ * (see crypto/pem/pem_lib.c:def_callback() for details)
+ */
+ prompt = "Enter pass phrase:";
+
+ for (;;) {
+ apr_file_puts(prompt, writetty);
+ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) {
+ i = pipe_get_passwd_cb(buf, bufsize, "", FALSE);
+ }
+ else { /* sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN */
+ i = EVP_read_pw_string(buf, bufsize, "", FALSE);
+ }
+ if (i != 0) {
+ PEMerr(PEM_F_DEF_CALLBACK,PEM_R_PROBLEMS_GETTING_PASSWORD);
+ memset(buf, 0, (unsigned int)bufsize);
+ return (-1);
+ }
+ len = strlen(buf);
+ if (len < 1)
+ apr_file_printf(writetty, "Apache:mod_ssl:Error: Pass phrase empty (needs to be at least 1 character).\n");
+ else
+ break;
+ }
+ }
+
+ /*
+ * Filter program
+ */
+ else if (sc->server->pphrase_dialog_type == SSL_PPTYPE_FILTER) {
+ const char *cmd = sc->server->pphrase_dialog_path;
+ const char **argv = apr_palloc(p, sizeof(char *) * 4);
+ char *result;
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Init: Requesting pass phrase from dialog filter "
+ "program (%s)", cmd);
+
+ argv[0] = cmd;
+ argv[1] = cpVHostID;
+ argv[2] = cpAlgoType;
+ argv[3] = NULL;
+
+ result = ssl_util_readfilter(s, p, cmd, argv);
+ apr_cpystrn(buf, result, bufsize);
+ len = strlen(buf);
+ }
+
+ /*
+ * Ok, we now have the pass phrase, so give it back
+ */
+ *cppPassPhraseCur = apr_pstrdup(p, buf);
+
+ /*
+ * And return it's length to OpenSSL...
+ */
+ return (len);
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_rand.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_rand.c
new file mode 100644
index 00000000..b640e3f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_rand.c
@@ -0,0 +1,179 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_engine_rand.c
+ * Random Number Generator Seeding
+ */
+ /* ``The generation of random
+ numbers is too important
+ to be left to chance.'' */
+
+#include "mod_ssl.h"
+
+/* _________________________________________________________________
+**
+** Support for better seeding of SSL library's RNG
+** _________________________________________________________________
+*/
+
+static int ssl_rand_choosenum(int, int);
+static int ssl_rand_feedfp(apr_pool_t *, apr_file_t *, int);
+
+int ssl_rand_seed(server_rec *s, apr_pool_t *p, ssl_rsctx_t nCtx, char *prefix)
+{
+ SSLModConfigRec *mc;
+ apr_array_header_t *apRandSeed;
+ ssl_randseed_t *pRandSeeds;
+ ssl_randseed_t *pRandSeed;
+ unsigned char stackdata[256];
+ int nReq, nDone;
+ apr_file_t *fp;
+ int i, n, l;
+
+ mc = myModConfig(s);
+ nReq = 0;
+ nDone = 0;
+ apRandSeed = mc->aRandSeed;
+ pRandSeeds = (ssl_randseed_t *)apRandSeed->elts;
+ for (i = 0; i < apRandSeed->nelts; i++) {
+ pRandSeed = &pRandSeeds[i];
+ if (pRandSeed->nCtx == nCtx) {
+ nReq += pRandSeed->nBytes;
+ if (pRandSeed->nSrc == SSL_RSSRC_FILE) {
+ /*
+ * seed in contents of an external file
+ */
+ if (apr_file_open(&fp, pRandSeed->cpPath,
+ APR_READ, APR_OS_DEFAULT, p) != APR_SUCCESS)
+ continue;
+ nDone += ssl_rand_feedfp(p, fp, pRandSeed->nBytes);
+ apr_file_close(fp);
+ }
+ else if (pRandSeed->nSrc == SSL_RSSRC_EXEC) {
+ const char *cmd = pRandSeed->cpPath;
+ const char **argv = apr_palloc(p, sizeof(char *) * 3);
+ /*
+ * seed in contents generated by an external program
+ */
+ argv[0] = cmd;
+ argv[1] = apr_itoa(p, pRandSeed->nBytes);
+ argv[2] = NULL;
+
+ if ((fp = ssl_util_ppopen(s, p, cmd, argv)) == NULL)
+ continue;
+ nDone += ssl_rand_feedfp(p, fp, pRandSeed->nBytes);
+ ssl_util_ppclose(s, p, fp);
+ }
+#ifdef HAVE_SSL_RAND_EGD
+ else if (pRandSeed->nSrc == SSL_RSSRC_EGD) {
+ /*
+ * seed in contents provided by the external
+ * Entropy Gathering Daemon (EGD)
+ */
+ if ((n = RAND_egd(pRandSeed->cpPath)) == -1)
+ continue;
+ nDone += n;
+ }
+#endif
+ else if (pRandSeed->nSrc == SSL_RSSRC_BUILTIN) {
+ struct {
+ time_t t;
+ pid_t pid;
+ } my_seed;
+
+ /*
+ * seed in the current time (usually just 4 bytes)
+ */
+ my_seed.t = time(NULL);
+
+ /*
+ * seed in the current process id (usually just 4 bytes)
+ */
+ my_seed.pid = mc->pid;
+
+ l = sizeof(my_seed);
+ RAND_seed((unsigned char *)&my_seed, l);
+ nDone += l;
+
+ /*
+ * seed in some current state of the run-time stack (128 bytes)
+ */
+ n = ssl_rand_choosenum(0, sizeof(stackdata)-128-1);
+ RAND_seed(stackdata+n, 128);
+ nDone += 128;
+
+ }
+ }
+ }
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "%sSeeding PRNG with %d bytes of entropy", prefix, nDone);
+
+ if (RAND_status() == 0)
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "%sPRNG still contains insufficient entropy!", prefix);
+
+ return nDone;
+}
+
+#define BUFSIZE 8192
+
+static int ssl_rand_feedfp(apr_pool_t *p, apr_file_t *fp, int nReq)
+{
+ apr_size_t nDone;
+ unsigned char caBuf[BUFSIZE];
+ apr_size_t nBuf;
+ apr_size_t nRead;
+ apr_size_t nTodo;
+
+ nDone = 0;
+ nRead = BUFSIZE;
+ nTodo = nReq;
+ while (1) {
+ if (nReq > 0)
+ nRead = (nTodo < BUFSIZE ? nTodo : BUFSIZE);
+ nBuf = nRead;
+ if (apr_file_read(fp, caBuf, &nBuf) != APR_SUCCESS)
+ break;
+ RAND_seed(caBuf, nBuf);
+ nDone += nBuf;
+ if (nReq > 0) {
+ nTodo -= nBuf;
+ if (nTodo <= 0)
+ break;
+ }
+ }
+ return nDone;
+}
+
+static int ssl_rand_choosenum(int l, int h)
+{
+ int i;
+ char buf[50];
+
+ apr_snprintf(buf, sizeof(buf), "%.0f",
+ (((double)(rand()%RAND_MAX)/RAND_MAX)*(h-l)));
+ i = atoi(buf)+1;
+ if (i < l) i = l;
+ if (i > h) i = h;
+ return i;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_vars.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_vars.c
new file mode 100644
index 00000000..661e99d8
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_engine_vars.c
@@ -0,0 +1,687 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_engine_vars.c
+ * Variable Lookup Facility
+ */
+ /* ``Those of you who think they
+ know everything are very annoying
+ to those of us who do.''
+ -- Unknown */
+#include "mod_ssl.h"
+
+/* _________________________________________________________________
+**
+** Variable Lookup
+** _________________________________________________________________
+*/
+
+static char *ssl_var_lookup_header(apr_pool_t *p, request_rec *r, const char *name);
+static char *ssl_var_lookup_ssl(apr_pool_t *p, conn_rec *c, char *var);
+static char *ssl_var_lookup_ssl_cert(apr_pool_t *p, X509 *xs, char *var);
+static char *ssl_var_lookup_ssl_cert_dn(apr_pool_t *p, X509_NAME *xsname, char *var);
+static char *ssl_var_lookup_ssl_cert_valid(apr_pool_t *p, ASN1_UTCTIME *tm);
+static char *ssl_var_lookup_ssl_cert_serial(apr_pool_t *p, X509 *xs);
+static char *ssl_var_lookup_ssl_cert_chain(apr_pool_t *p, STACK_OF(X509) *sk, char *var);
+static char *ssl_var_lookup_ssl_cert_PEM(apr_pool_t *p, X509 *xs);
+static char *ssl_var_lookup_ssl_cert_verify(apr_pool_t *p, conn_rec *c);
+static char *ssl_var_lookup_ssl_cipher(apr_pool_t *p, conn_rec *c, char *var);
+static void ssl_var_lookup_ssl_cipher_bits(SSL *ssl, int *usekeysize, int *algkeysize);
+static char *ssl_var_lookup_ssl_version(apr_pool_t *pp, apr_pool_t *p, char *var);
+
+static int ssl_is_https(conn_rec *c)
+{
+ SSLConnRec *sslconn = myConnConfig(c);
+ return sslconn && sslconn->ssl;
+}
+
+void ssl_var_register(void)
+{
+ APR_REGISTER_OPTIONAL_FN(ssl_is_https);
+ APR_REGISTER_OPTIONAL_FN(ssl_var_lookup);
+ return;
+}
+
+/* This function must remain safe to use for a non-SSL connection. */
+char *ssl_var_lookup(apr_pool_t *p, server_rec *s, conn_rec *c, request_rec *r, char *var)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ char *result;
+ BOOL resdup;
+ apr_time_exp_t tm;
+
+ result = NULL;
+ resdup = TRUE;
+
+ /*
+ * When no pool is given try to find one
+ */
+ if (p == NULL) {
+ if (r != NULL)
+ p = r->pool;
+ else if (c != NULL)
+ p = c->pool;
+ else
+ p = mc->pPool;
+ }
+
+ /*
+ * Request dependent stuff
+ */
+ if (r != NULL) {
+ if (strcEQ(var, "HTTP_USER_AGENT"))
+ result = ssl_var_lookup_header(p, r, "User-Agent");
+ else if (strcEQ(var, "HTTP_REFERER"))
+ result = ssl_var_lookup_header(p, r, "Referer");
+ else if (strcEQ(var, "HTTP_COOKIE"))
+ result = ssl_var_lookup_header(p, r, "Cookie");
+ else if (strcEQ(var, "HTTP_FORWARDED"))
+ result = ssl_var_lookup_header(p, r, "Forwarded");
+ else if (strcEQ(var, "HTTP_HOST"))
+ result = ssl_var_lookup_header(p, r, "Host");
+ else if (strcEQ(var, "HTTP_PROXY_CONNECTION"))
+ result = ssl_var_lookup_header(p, r, "Proxy-Connection");
+ else if (strcEQ(var, "HTTP_ACCEPT"))
+ result = ssl_var_lookup_header(p, r, "Accept");
+ else if (strlen(var) > 5 && strcEQn(var, "HTTP:", 5))
+ /* all other headers from which we are still not know about */
+ result = ssl_var_lookup_header(p, r, var+5);
+ else if (strcEQ(var, "THE_REQUEST"))
+ result = r->the_request;
+ else if (strcEQ(var, "REQUEST_METHOD"))
+ result = (char *)(r->method);
+ else if (strcEQ(var, "REQUEST_SCHEME"))
+ result = (char *)ap_http_method(r);
+ else if (strcEQ(var, "REQUEST_URI"))
+ result = r->uri;
+ else if (strcEQ(var, "SCRIPT_FILENAME") ||
+ strcEQ(var, "REQUEST_FILENAME"))
+ result = r->filename;
+ else if (strcEQ(var, "PATH_INFO"))
+ result = r->path_info;
+ else if (strcEQ(var, "QUERY_STRING"))
+ result = r->args;
+ else if (strcEQ(var, "REMOTE_HOST"))
+ result = (char *)ap_get_remote_host(r->connection,
+ r->per_dir_config, REMOTE_NAME, NULL);
+ else if (strcEQ(var, "REMOTE_IDENT"))
+ result = (char *)ap_get_remote_logname(r);
+ else if (strcEQ(var, "IS_SUBREQ"))
+ result = (r->main != NULL ? "true" : "false");
+ else if (strcEQ(var, "DOCUMENT_ROOT"))
+ result = (char *)ap_document_root(r);
+ else if (strcEQ(var, "SERVER_ADMIN"))
+ result = r->server->server_admin;
+ else if (strcEQ(var, "SERVER_NAME"))
+ result = (char *)ap_get_server_name(r);
+ else if (strcEQ(var, "SERVER_PORT"))
+ result = apr_psprintf(p, "%u", ap_get_server_port(r));
+ else if (strcEQ(var, "SERVER_PROTOCOL"))
+ result = r->protocol;
+ }
+
+ /*
+ * Connection stuff
+ */
+ if (result == NULL && c != NULL) {
+ SSLConnRec *sslconn = myConnConfig(c);
+ if (strcEQ(var, "REMOTE_ADDR"))
+ result = c->remote_ip;
+ else if (strcEQ(var, "REMOTE_USER"))
+ result = r->user;
+ else if (strcEQ(var, "AUTH_TYPE"))
+ result = r->ap_auth_type;
+ else if (strlen(var) > 4 && strcEQn(var, "SSL_", 4)
+ && sslconn && sslconn->ssl)
+ result = ssl_var_lookup_ssl(p, c, var+4);
+ else if (strcEQ(var, "HTTPS")) {
+ if (sslconn && sslconn->ssl)
+ result = "on";
+ else
+ result = "off";
+ }
+ }
+
+ /*
+ * Totally independent stuff
+ */
+ if (result == NULL) {
+ if (strlen(var) > 12 && strcEQn(var, "SSL_VERSION_", 12))
+ result = ssl_var_lookup_ssl_version(s->process->pool, p, var+12);
+ else if (strcEQ(var, "SERVER_SOFTWARE"))
+ result = (char *)ap_get_server_version();
+ else if (strcEQ(var, "API_VERSION")) {
+ result = apr_psprintf(p, "%d", MODULE_MAGIC_NUMBER);
+ resdup = FALSE;
+ }
+ else if (strcEQ(var, "TIME_YEAR")) {
+ apr_time_exp_lt(&tm, apr_time_now());
+ result = apr_psprintf(p, "%02d%02d",
+ (tm.tm_year / 100) + 19, tm.tm_year % 100);
+ resdup = FALSE;
+ }
+#define MKTIMESTR(format, tmfield) \
+ apr_time_exp_lt(&tm, apr_time_now()); \
+ result = apr_psprintf(p, format, tm.tmfield); \
+ resdup = FALSE;
+ else if (strcEQ(var, "TIME_MON")) {
+ MKTIMESTR("%02d", tm_mon+1)
+ }
+ else if (strcEQ(var, "TIME_DAY")) {
+ MKTIMESTR("%02d", tm_mday)
+ }
+ else if (strcEQ(var, "TIME_HOUR")) {
+ MKTIMESTR("%02d", tm_hour)
+ }
+ else if (strcEQ(var, "TIME_MIN")) {
+ MKTIMESTR("%02d", tm_min)
+ }
+ else if (strcEQ(var, "TIME_SEC")) {
+ MKTIMESTR("%02d", tm_sec)
+ }
+ else if (strcEQ(var, "TIME_WDAY")) {
+ MKTIMESTR("%d", tm_wday)
+ }
+ else if (strcEQ(var, "TIME")) {
+ apr_time_exp_lt(&tm, apr_time_now());
+ result = apr_psprintf(p,
+ "%02d%02d%02d%02d%02d%02d%02d", (tm.tm_year / 100) + 19,
+ (tm.tm_year % 100), tm.tm_mon+1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+ resdup = FALSE;
+ }
+ /* all other env-variables from the parent Apache process */
+ else if (strlen(var) > 4 && strcEQn(var, "ENV:", 4)) {
+ result = (char *)apr_table_get(r->notes, var+4);
+ if (result == NULL)
+ result = (char *)apr_table_get(r->subprocess_env, var+4);
+ if (result == NULL)
+ result = getenv(var+4);
+ }
+ }
+
+ if (result != NULL && resdup)
+ result = apr_pstrdup(p, result);
+ if (result == NULL)
+ result = "";
+ return result;
+}
+
+static char *ssl_var_lookup_header(apr_pool_t *p, request_rec *r, const char *name)
+{
+ char *hdr = NULL;
+
+ if ((hdr = (char *)apr_table_get(r->headers_in, name)) != NULL)
+ hdr = apr_pstrdup(p, hdr);
+ return hdr;
+}
+
+static char *ssl_var_lookup_ssl(apr_pool_t *p, conn_rec *c, char *var)
+{
+ SSLConnRec *sslconn = myConnConfig(c);
+ char *result;
+ X509 *xs;
+ STACK_OF(X509) *sk;
+ SSL *ssl;
+
+ result = NULL;
+
+ ssl = sslconn->ssl;
+ if (strlen(var) > 8 && strcEQn(var, "VERSION_", 8)) {
+ result = ssl_var_lookup_ssl_version(c->base_server->process->pool,
+ p, var+8);
+ }
+ else if (ssl != NULL && strcEQ(var, "PROTOCOL")) {
+ result = (char *)SSL_get_version(ssl);
+ }
+ else if (ssl != NULL && strcEQ(var, "SESSION_ID")) {
+ char buf[SSL_SESSION_ID_STRING_LEN];
+ SSL_SESSION *pSession = SSL_get_session(ssl);
+ if (pSession) {
+ result = apr_pstrdup(p, SSL_SESSION_id2sz(
+ SSL_SESSION_get_session_id(pSession),
+ SSL_SESSION_get_session_id_length(pSession),
+ buf, sizeof(buf)));
+ }
+ }
+ else if (ssl != NULL && strlen(var) >= 6 && strcEQn(var, "CIPHER", 6)) {
+ result = ssl_var_lookup_ssl_cipher(p, c, var+6);
+ }
+ else if (ssl != NULL && strlen(var) > 18 && strcEQn(var, "CLIENT_CERT_CHAIN_", 18)) {
+ sk = SSL_get_peer_cert_chain(ssl);
+ result = ssl_var_lookup_ssl_cert_chain(p, sk, var+18);
+ }
+ else if (ssl != NULL && strcEQ(var, "CLIENT_VERIFY")) {
+ result = ssl_var_lookup_ssl_cert_verify(p, c);
+ }
+ else if (ssl != NULL && strlen(var) > 7 && strcEQn(var, "CLIENT_", 7)) {
+ if ((xs = SSL_get_peer_certificate(ssl)) != NULL) {
+ result = ssl_var_lookup_ssl_cert(p, xs, var+7);
+ X509_free(xs);
+ }
+ }
+ else if (ssl != NULL && strlen(var) > 7 && strcEQn(var, "SERVER_", 7)) {
+ if ((xs = SSL_get_certificate(ssl)) != NULL)
+ result = ssl_var_lookup_ssl_cert(p, xs, var+7);
+ }
+ else if (ssl != NULL && strcEQ(var, "SECURE_RENEG")) {
+ int flag = 0;
+#ifdef SSL_get_secure_renegotiation_support
+ flag = SSL_get_secure_renegotiation_support(ssl);
+#endif
+ result = apr_pstrdup(p, flag ? "true" : "false");
+ }
+
+ return result;
+}
+
+static char *ssl_var_lookup_ssl_cert(apr_pool_t *p, X509 *xs, char *var)
+{
+ char *result;
+ BOOL resdup;
+ X509_NAME *xsname;
+ int nid;
+ char *cp;
+
+ result = NULL;
+ resdup = TRUE;
+
+ if (strcEQ(var, "M_VERSION")) {
+ result = apr_psprintf(p, "%lu", X509_get_version(xs)+1);
+ resdup = FALSE;
+ }
+ else if (strcEQ(var, "M_SERIAL")) {
+ result = ssl_var_lookup_ssl_cert_serial(p, xs);
+ }
+ else if (strcEQ(var, "V_START")) {
+ result = ssl_var_lookup_ssl_cert_valid(p, X509_get_notBefore(xs));
+ }
+ else if (strcEQ(var, "V_END")) {
+ result = ssl_var_lookup_ssl_cert_valid(p, X509_get_notAfter(xs));
+ }
+ else if (strcEQ(var, "S_DN")) {
+ xsname = X509_get_subject_name(xs);
+ cp = X509_NAME_oneline(xsname, NULL, 0);
+ result = apr_pstrdup(p, cp);
+ modssl_free(cp);
+ resdup = FALSE;
+ }
+ else if (strlen(var) > 5 && strcEQn(var, "S_DN_", 5)) {
+ xsname = X509_get_subject_name(xs);
+ result = ssl_var_lookup_ssl_cert_dn(p, xsname, var+5);
+ resdup = FALSE;
+ }
+ else if (strcEQ(var, "I_DN")) {
+ xsname = X509_get_issuer_name(xs);
+ cp = X509_NAME_oneline(xsname, NULL, 0);
+ result = apr_pstrdup(p, cp);
+ modssl_free(cp);
+ resdup = FALSE;
+ }
+ else if (strlen(var) > 5 && strcEQn(var, "I_DN_", 5)) {
+ xsname = X509_get_issuer_name(xs);
+ result = ssl_var_lookup_ssl_cert_dn(p, xsname, var+5);
+ resdup = FALSE;
+ }
+ else if (strcEQ(var, "A_SIG")) {
+ nid = OBJ_obj2nid((ASN1_OBJECT *)X509_get_signature_algorithm(xs));
+ result = apr_pstrdup(p,
+ (nid == NID_undef) ? "UNKNOWN" : OBJ_nid2ln(nid));
+ resdup = FALSE;
+ }
+ else if (strcEQ(var, "A_KEY")) {
+ nid = OBJ_obj2nid((ASN1_OBJECT *)X509_get_key_algorithm(xs));
+ result = apr_pstrdup(p,
+ (nid == NID_undef) ? "UNKNOWN" : OBJ_nid2ln(nid));
+ resdup = FALSE;
+ }
+ else if (strcEQ(var, "CERT")) {
+ result = ssl_var_lookup_ssl_cert_PEM(p, xs);
+ }
+
+ if (result != NULL && resdup)
+ result = apr_pstrdup(p, result);
+ return result;
+}
+
+static const struct {
+ char *name;
+ int nid;
+} ssl_var_lookup_ssl_cert_dn_rec[] = {
+ { "C", NID_countryName },
+ { "ST", NID_stateOrProvinceName }, /* officially (RFC2156) */
+ { "SP", NID_stateOrProvinceName }, /* compatibility (SSLeay) */
+ { "L", NID_localityName },
+ { "O", NID_organizationName },
+ { "OU", NID_organizationalUnitName },
+ { "CN", NID_commonName },
+ { "T", NID_title },
+ { "I", NID_initials },
+ { "G", NID_givenName },
+ { "S", NID_surname },
+ { "D", NID_description },
+/* This has been removed in OpenSSL 0.9.8-dev. */
+#ifdef NID_uniqueIdentifier
+ { "UID", NID_uniqueIdentifier },
+#endif
+ { "Email", NID_pkcs9_emailAddress },
+ { NULL, 0 }
+};
+
+static char *ssl_var_lookup_ssl_cert_dn(apr_pool_t *p, X509_NAME *xsname, char *var)
+{
+ char *result;
+ X509_NAME_ENTRY *xsne;
+ int i, j, n;
+ unsigned char *data_ptr;
+ int data_len;
+
+ result = NULL;
+
+ for (i = 0; ssl_var_lookup_ssl_cert_dn_rec[i].name != NULL; i++) {
+ if (strEQ(var, ssl_var_lookup_ssl_cert_dn_rec[i].name)) {
+ for (j = 0; j < sk_X509_NAME_ENTRY_num((STACK_OF(X509_NAME_ENTRY) *)
+ X509_NAME_get_entries(xsname));
+ j++) {
+ xsne = sk_X509_NAME_ENTRY_value((STACK_OF(X509_NAME_ENTRY) *)
+ X509_NAME_get_entries(xsname), j);
+
+ n =OBJ_obj2nid((ASN1_OBJECT *)X509_NAME_ENTRY_get_object(xsne));
+ data_ptr = X509_NAME_ENTRY_get_data_ptr(xsne);
+ data_len = X509_NAME_ENTRY_get_data_len(xsne);
+
+ if (n == ssl_var_lookup_ssl_cert_dn_rec[i].nid) {
+ result = apr_palloc(p, data_len+1);
+ apr_cpystrn(result, (char *)data_ptr, data_len+1);
+#ifdef CHARSET_EBCDIC
+ ascii2ebcdic(result, result, xsne->value->length);
+#endif /* CHARSET_EBCDIC */
+ result[data_len] = NUL;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ return result;
+}
+
+static char *ssl_var_lookup_ssl_cert_valid(apr_pool_t *p, ASN1_UTCTIME *tm)
+{
+ char *result;
+ BIO* bio;
+ int n;
+
+ if ((bio = BIO_new(BIO_s_mem())) == NULL)
+ return NULL;
+ ASN1_UTCTIME_print(bio, tm);
+ n = BIO_pending(bio);
+ result = apr_pcalloc(p, n+1);
+ n = BIO_read(bio, result, n);
+ result[n] = NUL;
+ BIO_free(bio);
+ return result;
+}
+
+static char *ssl_var_lookup_ssl_cert_serial(apr_pool_t *p, X509 *xs)
+{
+ char *result;
+ BIO *bio;
+ int n;
+
+ if ((bio = BIO_new(BIO_s_mem())) == NULL)
+ return NULL;
+ i2a_ASN1_INTEGER(bio, X509_get_serialNumber(xs));
+ n = BIO_pending(bio);
+ result = apr_pcalloc(p, n+1);
+ n = BIO_read(bio, result, n);
+ result[n] = NUL;
+ BIO_free(bio);
+ return result;
+}
+
+static char *ssl_var_lookup_ssl_cert_chain(apr_pool_t *p, STACK_OF(X509) *sk, char *var)
+{
+ char *result;
+ X509 *xs;
+ int n;
+
+ result = NULL;
+
+ if (strspn(var, "0123456789") == strlen(var)) {
+ n = atoi(var);
+ if (n < sk_X509_num(sk)) {
+ xs = sk_X509_value(sk, n);
+ result = ssl_var_lookup_ssl_cert_PEM(p, xs);
+ }
+ }
+
+ return result;
+}
+
+static char *ssl_var_lookup_ssl_cert_PEM(apr_pool_t *p, X509 *xs)
+{
+ char *result;
+ BIO *bio;
+ int n;
+
+ if ((bio = BIO_new(BIO_s_mem())) == NULL)
+ return NULL;
+ PEM_write_bio_X509(bio, xs);
+ n = BIO_pending(bio);
+ result = apr_pcalloc(p, n+1);
+ n = BIO_read(bio, result, n);
+ result[n] = NUL;
+ BIO_free(bio);
+ return result;
+}
+
+static char *ssl_var_lookup_ssl_cert_verify(apr_pool_t *p, conn_rec *c)
+{
+ SSLConnRec *sslconn = myConnConfig(c);
+ char *result;
+ long vrc;
+ const char *verr;
+ const char *vinfo;
+ SSL *ssl;
+ X509 *xs;
+
+ result = NULL;
+ ssl = sslconn->ssl;
+ verr = sslconn->verify_error;
+ vinfo = sslconn->verify_info;
+ vrc = SSL_get_verify_result(ssl);
+ xs = SSL_get_peer_certificate(ssl);
+
+ if (vrc == X509_V_OK && verr == NULL && vinfo == NULL && xs == NULL)
+ /* no client verification done at all */
+ result = "NONE";
+ else if (vrc == X509_V_OK && verr == NULL && vinfo == NULL && xs != NULL)
+ /* client verification done successful */
+ result = "SUCCESS";
+ else if (vrc == X509_V_OK && vinfo != NULL && strEQ(vinfo, "GENEROUS"))
+ /* client verification done in generous way */
+ result = "GENEROUS";
+ else
+ /* client verification failed */
+ result = apr_psprintf(p, "FAILED:%s", verr);
+
+ if (xs)
+ X509_free(xs);
+ return result;
+}
+
+static char *ssl_var_lookup_ssl_cipher(apr_pool_t *p, conn_rec *c, char *var)
+{
+ SSLConnRec *sslconn = myConnConfig(c);
+ char *result;
+ BOOL resdup;
+ int usekeysize, algkeysize;
+ SSL *ssl;
+
+ result = NULL;
+ resdup = TRUE;
+
+ ssl = sslconn->ssl;
+ ssl_var_lookup_ssl_cipher_bits(ssl, &usekeysize, &algkeysize);
+
+ if (ssl && strEQ(var, "")) {
+ SSL_CIPHER *cipher = SSL_get_current_cipher(ssl);
+ result = (cipher != NULL ? (char *)SSL_CIPHER_get_name(cipher) : NULL);
+ }
+ else if (strcEQ(var, "_EXPORT"))
+ result = (usekeysize < 56 ? "true" : "false");
+ else if (strcEQ(var, "_USEKEYSIZE")) {
+ result = apr_psprintf(p, "%d", usekeysize);
+ resdup = FALSE;
+ }
+ else if (strcEQ(var, "_ALGKEYSIZE")) {
+ result = apr_psprintf(p, "%d", algkeysize);
+ resdup = FALSE;
+ }
+
+ if (result != NULL && resdup)
+ result = apr_pstrdup(p, result);
+ return result;
+}
+
+static void ssl_var_lookup_ssl_cipher_bits(SSL *ssl, int *usekeysize, int *algkeysize)
+{
+ SSL_CIPHER *cipher;
+
+ *usekeysize = 0;
+ *algkeysize = 0;
+ if (ssl != NULL)
+ if ((cipher = SSL_get_current_cipher(ssl)) != NULL)
+ *usekeysize = SSL_CIPHER_get_bits(cipher, algkeysize);
+ return;
+}
+
+static char *ssl_var_lookup_ssl_version(apr_pool_t *pp, apr_pool_t *p, char *var)
+{
+ static char interface[] = "mod_ssl/" MOD_SSL_VERSION;
+ static char library_interface[] = SSL_LIBRARY_TEXT;
+ static char *library = NULL;
+ char *result;
+
+ if (!library) {
+ char *cp, *cp2;
+ library = apr_pstrdup(pp, SSL_LIBRARY_DYNTEXT);
+ if ((cp = strchr(library, ' ')) != NULL) {
+ *cp = '/';
+ if ((cp2 = strchr(cp, ' ')) != NULL)
+ *cp2 = NUL;
+ }
+ if ((cp = strchr(library_interface, ' ')) != NULL) {
+ *cp = '/';
+ if ((cp2 = strchr(cp, ' ')) != NULL)
+ *cp2 = NUL;
+ }
+ }
+
+ if (strEQ(var, "INTERFACE")) {
+ result = apr_pstrdup(p, interface);
+ }
+ else if (strEQ(var, "LIBRARY_INTERFACE")) {
+ result = apr_pstrdup(p, library_interface);
+ }
+ else if (strEQ(var, "LIBRARY")) {
+ result = apr_pstrdup(p, library);
+ }
+ else {
+ result = NULL;
+ }
+ return result;
+}
+
+
+/* _________________________________________________________________
+**
+** SSL Extension to mod_log_config
+** _________________________________________________________________
+*/
+
+#include "../../modules/loggers/mod_log_config.h"
+
+static const char *ssl_var_log_handler_c(request_rec *r, char *a);
+static const char *ssl_var_log_handler_x(request_rec *r, char *a);
+
+/*
+ * register us for the mod_log_config function registering phase
+ * to establish %{...}c and to be able to expand %{...}x variables.
+ */
+void ssl_var_log_config_register(apr_pool_t *p)
+{
+ static APR_OPTIONAL_FN_TYPE(ap_register_log_handler) *log_pfn_register;
+
+ log_pfn_register = APR_RETRIEVE_OPTIONAL_FN(ap_register_log_handler);
+
+ if (log_pfn_register) {
+ log_pfn_register(p, "c", ssl_var_log_handler_c, 0);
+ log_pfn_register(p, "x", ssl_var_log_handler_x, 0);
+ }
+ return;
+}
+
+/*
+ * implement the %{..}c log function
+ * (we are the only function)
+ */
+static const char *ssl_var_log_handler_c(request_rec *r, char *a)
+{
+ SSLConnRec *sslconn = myConnConfig(r->connection);
+ char *result;
+
+ if (sslconn == NULL || sslconn->ssl == NULL)
+ return NULL;
+ result = NULL;
+ if (strEQ(a, "version"))
+ result = ssl_var_lookup(r->pool, r->server, r->connection, r, "SSL_PROTOCOL");
+ else if (strEQ(a, "cipher"))
+ result = ssl_var_lookup(r->pool, r->server, r->connection, r, "SSL_CIPHER");
+ else if (strEQ(a, "subjectdn") || strEQ(a, "clientcert"))
+ result = ssl_var_lookup(r->pool, r->server, r->connection, r, "SSL_CLIENT_S_DN");
+ else if (strEQ(a, "issuerdn") || strEQ(a, "cacert"))
+ result = ssl_var_lookup(r->pool, r->server, r->connection, r, "SSL_CLIENT_I_DN");
+ else if (strEQ(a, "errcode"))
+ result = "-";
+ else if (strEQ(a, "errstr"))
+ result = (char *)sslconn->verify_error;
+ if (result != NULL && result[0] == NUL)
+ result = NULL;
+ return result;
+}
+
+/*
+ * extend the implementation of the %{..}x log function
+ * (there can be more functions)
+ */
+static const char *ssl_var_log_handler_x(request_rec *r, char *a)
+{
+ char *result;
+
+ result = ssl_var_lookup(r->pool, r->server, r->connection, r, a);
+ if (result != NULL && result[0] == NUL)
+ result = NULL;
+ return result;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr.c
new file mode 100644
index 00000000..19e3d757
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr.c
@@ -0,0 +1,82 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_expr.c
+ * Expression Handling
+ */
+ /* ``It is hard to fly with
+ the eagles when you work
+ with the turkeys.''
+ -- Unknown */
+#include "mod_ssl.h"
+
+/* _________________________________________________________________
+**
+** Expression Handling
+** _________________________________________________________________
+*/
+
+ssl_expr_info_type ssl_expr_info;
+char *ssl_expr_error;
+
+ssl_expr *ssl_expr_comp(apr_pool_t *p, char *expr)
+{
+ ssl_expr_info.pool = p;
+ ssl_expr_info.inputbuf = expr;
+ ssl_expr_info.inputlen = strlen(expr);
+ ssl_expr_info.inputptr = ssl_expr_info.inputbuf;
+ ssl_expr_info.expr = FALSE;
+
+ ssl_expr_error = NULL;
+ if (ssl_expr_yyparse())
+ return NULL;
+ return ssl_expr_info.expr;
+}
+
+char *ssl_expr_get_error(void)
+{
+ if (ssl_expr_error == NULL)
+ return "";
+ return ssl_expr_error;
+}
+
+ssl_expr *ssl_expr_make(ssl_expr_node_op op, void *a1, void *a2)
+{
+ ssl_expr *node;
+
+ node = (ssl_expr *)apr_palloc(ssl_expr_info.pool, sizeof(ssl_expr));
+ node->node_op = op;
+ node->node_arg1 = (char *)a1;
+ node->node_arg2 = (char *)a2;
+ return node;
+}
+
+int ssl_expr_exec(request_rec *r, ssl_expr *expr)
+{
+ BOOL rc;
+
+ rc = ssl_expr_eval(r, expr);
+ if (ssl_expr_error != NULL)
+ return (-1);
+ else
+ return (rc ? 1 : 0);
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr.h b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr.h
new file mode 100644
index 00000000..20b9fbdb
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr.h
@@ -0,0 +1,104 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_expr.h
+ * Expression Handling (Header)
+ */
+ /* ``May all your PUSHes be POPed.'' */
+
+#ifndef __SSL_EXPR_H__
+#define __SSL_EXPR_H__
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef TRUE
+#define TRUE !FALSE
+#endif
+
+#ifndef YY_NULL
+#define YY_NULL 0
+#endif
+
+#ifndef MIN
+#define MIN(a,b) (((a)<(b))?(a):(b))
+#endif
+
+#ifndef BOOL
+#define BOOL unsigned int
+#endif
+
+#ifndef NULL
+#define NULL (void *)0
+#endif
+
+#ifndef NUL
+#define NUL '\0'
+#endif
+
+#ifndef YYDEBUG
+#define YYDEBUG 0
+#endif
+
+typedef enum {
+ op_NOP, op_ListElement,
+ op_True, op_False, op_Not, op_Or, op_And, op_Comp,
+ op_EQ, op_NE, op_LT, op_LE, op_GT, op_GE, op_IN, op_REG, op_NRE,
+ op_Digit, op_String, op_Regex, op_Var, op_Func
+} ssl_expr_node_op;
+
+typedef struct {
+ ssl_expr_node_op node_op;
+ void *node_arg1;
+ void *node_arg2;
+ apr_pool_t *p;
+} ssl_expr_node;
+
+typedef ssl_expr_node ssl_expr;
+
+typedef struct {
+ apr_pool_t *pool;
+ char *inputbuf;
+ int inputlen;
+ char *inputptr;
+ ssl_expr *expr;
+} ssl_expr_info_type;
+
+extern ssl_expr_info_type ssl_expr_info;
+extern char *ssl_expr_error;
+
+#define yylval ssl_expr_yylval
+#define yyerror ssl_expr_yyerror
+#define yyinput ssl_expr_yyinput
+
+extern int ssl_expr_yyparse(void);
+extern int ssl_expr_yyerror(char *);
+extern int ssl_expr_yylex(void);
+
+extern ssl_expr *ssl_expr_comp(apr_pool_t *, char *);
+extern int ssl_expr_exec(request_rec *, ssl_expr *);
+extern char *ssl_expr_get_error(void);
+extern ssl_expr *ssl_expr_make(ssl_expr_node_op, void *, void *);
+extern BOOL ssl_expr_eval(request_rec *, ssl_expr *);
+
+#endif /* __SSL_EXPR_H__ */
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_eval.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_eval.c
new file mode 100644
index 00000000..30adeefc
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_eval.c
@@ -0,0 +1,254 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_expr_eval.c
+ * Expression Evaluation
+ */
+ /* ``Make love,
+ not software!''
+ -- Unknown */
+#include "mod_ssl.h"
+
+/* _________________________________________________________________
+**
+** Expression Evaluation
+** _________________________________________________________________
+*/
+
+static BOOL ssl_expr_eval_comp(request_rec *, ssl_expr *);
+static char *ssl_expr_eval_word(request_rec *, ssl_expr *);
+static char *ssl_expr_eval_func_file(request_rec *, char *);
+static int ssl_expr_eval_strcmplex(char *, char *);
+
+BOOL ssl_expr_eval(request_rec *r, ssl_expr *node)
+{
+ switch (node->node_op) {
+ case op_True: {
+ return TRUE;
+ }
+ case op_False: {
+ return FALSE;
+ }
+ case op_Not: {
+ ssl_expr *e = (ssl_expr *)node->node_arg1;
+ return (!ssl_expr_eval(r, e));
+ }
+ case op_Or: {
+ ssl_expr *e1 = (ssl_expr *)node->node_arg1;
+ ssl_expr *e2 = (ssl_expr *)node->node_arg2;
+ return (ssl_expr_eval(r, e1) || ssl_expr_eval(r, e2));
+ }
+ case op_And: {
+ ssl_expr *e1 = (ssl_expr *)node->node_arg1;
+ ssl_expr *e2 = (ssl_expr *)node->node_arg2;
+ return (ssl_expr_eval(r, e1) && ssl_expr_eval(r, e2));
+ }
+ case op_Comp: {
+ ssl_expr *e = (ssl_expr *)node->node_arg1;
+ return ssl_expr_eval_comp(r, e);
+ }
+ default: {
+ ssl_expr_error = "Internal evaluation error: Unknown expression node";
+ return FALSE;
+ }
+ }
+}
+
+static BOOL ssl_expr_eval_comp(request_rec *r, ssl_expr *node)
+{
+ switch (node->node_op) {
+ case op_EQ: {
+ ssl_expr *e1 = (ssl_expr *)node->node_arg1;
+ ssl_expr *e2 = (ssl_expr *)node->node_arg2;
+ return (strcmp(ssl_expr_eval_word(r, e1), ssl_expr_eval_word(r, e2)) == 0);
+ }
+ case op_NE: {
+ ssl_expr *e1 = (ssl_expr *)node->node_arg1;
+ ssl_expr *e2 = (ssl_expr *)node->node_arg2;
+ return (strcmp(ssl_expr_eval_word(r, e1), ssl_expr_eval_word(r, e2)) != 0);
+ }
+ case op_LT: {
+ ssl_expr *e1 = (ssl_expr *)node->node_arg1;
+ ssl_expr *e2 = (ssl_expr *)node->node_arg2;
+ return (ssl_expr_eval_strcmplex(ssl_expr_eval_word(r, e1), ssl_expr_eval_word(r, e2)) < 0);
+ }
+ case op_LE: {
+ ssl_expr *e1 = (ssl_expr *)node->node_arg1;
+ ssl_expr *e2 = (ssl_expr *)node->node_arg2;
+ return (ssl_expr_eval_strcmplex(ssl_expr_eval_word(r, e1), ssl_expr_eval_word(r, e2)) <= 0);
+ }
+ case op_GT: {
+ ssl_expr *e1 = (ssl_expr *)node->node_arg1;
+ ssl_expr *e2 = (ssl_expr *)node->node_arg2;
+ return (ssl_expr_eval_strcmplex(ssl_expr_eval_word(r, e1), ssl_expr_eval_word(r, e2)) > 0);
+ }
+ case op_GE: {
+ ssl_expr *e1 = (ssl_expr *)node->node_arg1;
+ ssl_expr *e2 = (ssl_expr *)node->node_arg2;
+ return (ssl_expr_eval_strcmplex(ssl_expr_eval_word(r, e1), ssl_expr_eval_word(r, e2)) >= 0);
+ }
+ case op_IN: {
+ ssl_expr *e1 = (ssl_expr *)node->node_arg1;
+ ssl_expr *e2 = (ssl_expr *)node->node_arg2;
+ ssl_expr *e3;
+ char *w1 = ssl_expr_eval_word(r, e1);
+ BOOL found = FALSE;
+ do {
+ e3 = (ssl_expr *)e2->node_arg1;
+ e2 = (ssl_expr *)e2->node_arg2;
+ if (strcmp(w1, ssl_expr_eval_word(r, e3)) == 0) {
+ found = TRUE;
+ break;
+ }
+ } while (e2 != NULL);
+ return found;
+ }
+ case op_REG: {
+ ssl_expr *e1;
+ ssl_expr *e2;
+ char *word;
+ regex_t *regex;
+
+ e1 = (ssl_expr *)node->node_arg1;
+ e2 = (ssl_expr *)node->node_arg2;
+ word = ssl_expr_eval_word(r, e1);
+ regex = (regex_t *)(e2->node_arg1);
+ return (ap_regexec(regex, word, 0, NULL, 0) == 0);
+ }
+ case op_NRE: {
+ ssl_expr *e1;
+ ssl_expr *e2;
+ char *word;
+ regex_t *regex;
+
+ e1 = (ssl_expr *)node->node_arg1;
+ e2 = (ssl_expr *)node->node_arg2;
+ word = ssl_expr_eval_word(r, e1);
+ regex = (regex_t *)(e2->node_arg1);
+ return !(ap_regexec(regex, word, 0, NULL, 0) == 0);
+ }
+ default: {
+ ssl_expr_error = "Internal evaluation error: Unknown expression node";
+ return FALSE;
+ }
+ }
+}
+
+static char *ssl_expr_eval_word(request_rec *r, ssl_expr *node)
+{
+ switch (node->node_op) {
+ case op_Digit: {
+ char *string = (char *)node->node_arg1;
+ return string;
+ }
+ case op_String: {
+ char *string = (char *)node->node_arg1;
+ return string;
+ }
+ case op_Var: {
+ char *var = (char *)node->node_arg1;
+ char *val = ssl_var_lookup(r->pool, r->server, r->connection, r, var);
+ return (val == NULL ? "" : val);
+ }
+ case op_Func: {
+ char *name = (char *)node->node_arg1;
+ ssl_expr *args = (ssl_expr *)node->node_arg2;
+ if (strEQ(name, "file"))
+ return ssl_expr_eval_func_file(r, (char *)(args->node_arg1));
+ else {
+ ssl_expr_error = "Internal evaluation error: Unknown function name";
+ return "";
+ }
+ }
+ default: {
+ ssl_expr_error = "Internal evaluation error: Unknown expression node";
+ return FALSE;
+ }
+ }
+}
+
+static char *ssl_expr_eval_func_file(request_rec *r, char *filename)
+{
+ apr_file_t *fp;
+ char *buf;
+ apr_off_t offset;
+ apr_size_t len;
+ apr_finfo_t finfo;
+
+ if (apr_file_open(&fp, filename, APR_READ|APR_BUFFERED,
+ APR_OS_DEFAULT, r->pool) != APR_SUCCESS) {
+ ssl_expr_error = "Cannot open file";
+ return "";
+ }
+ apr_file_info_get(&finfo, APR_FINFO_SIZE, fp);
+ if ((finfo.size + 1) != ((apr_size_t)finfo.size + 1)) {
+ ssl_expr_error = "Huge file cannot be read";
+ apr_file_close(fp);
+ return "";
+ }
+ len = (apr_size_t)finfo.size;
+ if (len == 0) {
+ buf = (char *)apr_palloc(r->pool, sizeof(char) * 1);
+ *buf = NUL;
+ }
+ else {
+ if ((buf = (char *)apr_palloc(r->pool, sizeof(char)*(len+1))) == NULL) {
+ ssl_expr_error = "Cannot allocate memory";
+ apr_file_close(fp);
+ return "";
+ }
+ offset = 0;
+ apr_file_seek(fp, APR_SET, &offset);
+ if (apr_file_read(fp, buf, &len) != APR_SUCCESS) {
+ ssl_expr_error = "Cannot read from file";
+ apr_file_close(fp);
+ return "";
+ }
+ buf[len] = NUL;
+ }
+ apr_file_close(fp);
+ return buf;
+}
+
+/* a variant of strcmp(3) which works correctly also for number strings */
+static int ssl_expr_eval_strcmplex(char *cpNum1, char *cpNum2)
+{
+ int i, n1, n2;
+
+ if (cpNum1 == NULL)
+ return -1;
+ if (cpNum2 == NULL)
+ return +1;
+ n1 = strlen(cpNum1);
+ n2 = strlen(cpNum2);
+ if (n1 > n2)
+ return 1;
+ if (n1 < n2)
+ return -1;
+ for (i = 0; i < n1; i++) {
+ if (cpNum1[i] > cpNum2[i])
+ return 1;
+ if (cpNum1[i] < cpNum2[i])
+ return -1;
+ }
+ return 0;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_parse.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_parse.c
new file mode 100644
index 00000000..6f3f990e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_parse.c
@@ -0,0 +1,1081 @@
+
+/* A Bison parser, made from ssl_expr_parse.y
+ by GNU Bison version 1.28 */
+
+#define YYBISON 1 /* Identify Bison output. */
+
+#define T_TRUE 257
+#define T_FALSE 258
+#define T_DIGIT 259
+#define T_ID 260
+#define T_STRING 261
+#define T_REGEX 262
+#define T_REGEX_I 263
+#define T_FUNC_FILE 264
+#define T_OP_EQ 265
+#define T_OP_NE 266
+#define T_OP_LT 267
+#define T_OP_LE 268
+#define T_OP_GT 269
+#define T_OP_GE 270
+#define T_OP_REG 271
+#define T_OP_NRE 272
+#define T_OP_IN 273
+#define T_OP_OR 274
+#define T_OP_AND 275
+#define T_OP_NOT 276
+
+#line 68 "ssl_expr_parse.y"
+
+#include "mod_ssl.h"
+
+#line 72 "ssl_expr_parse.y"
+typedef union {
+ char *cpVal;
+ ssl_expr *exVal;
+} YYSTYPE;
+#include <stdio.h>
+
+#ifndef __cplusplus
+#ifndef __STDC__
+#define const
+#endif
+#endif
+
+
+
+#define YYFINAL 53
+#define YYFLAG -32768
+#define YYNTBASE 29
+
+#define YYTRANSLATE(x) ((unsigned)(x) <= 276 ? ssl_expr_yytranslate[x] : 36)
+
+static const char ssl_expr_yytranslate[] = { 0,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 28, 2, 2, 23,
+ 24, 2, 2, 27, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 25, 2, 26, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22
+};
+
+#if YYDEBUG != 0
+static const short ssl_expr_yyprhs[] = { 0,
+ 0, 2, 4, 6, 9, 13, 17, 19, 23, 27,
+ 31, 35, 39, 43, 47, 53, 57, 61, 63, 67,
+ 69, 71, 76, 78, 80, 82
+};
+
+static const short ssl_expr_yyrhs[] = { 30,
+ 0, 3, 0, 4, 0, 22, 30, 0, 30, 20,
+ 30, 0, 30, 21, 30, 0, 31, 0, 23, 30,
+ 24, 0, 33, 11, 33, 0, 33, 12, 33, 0,
+ 33, 13, 33, 0, 33, 14, 33, 0, 33, 15,
+ 33, 0, 33, 16, 33, 0, 33, 19, 25, 32,
+ 26, 0, 33, 17, 34, 0, 33, 18, 34, 0,
+ 33, 0, 32, 27, 33, 0, 5, 0, 7, 0,
+ 28, 25, 6, 26, 0, 35, 0, 8, 0, 9,
+ 0, 10, 23, 7, 24, 0
+};
+
+#endif
+
+#if YYDEBUG != 0
+static const short ssl_expr_yyrline[] = { 0,
+ 115, 118, 119, 120, 121, 122, 123, 124, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 138, 139, 142,
+ 143, 144, 145, 148, 158, 170
+};
+#endif
+
+
+#if YYDEBUG != 0 || defined (YYERROR_VERBOSE)
+
+static const char * const ssl_expr_yytname[] = { "$","error","$undefined.","T_TRUE",
+"T_FALSE","T_DIGIT","T_ID","T_STRING","T_REGEX","T_REGEX_I","T_FUNC_FILE","T_OP_EQ",
+"T_OP_NE","T_OP_LT","T_OP_LE","T_OP_GT","T_OP_GE","T_OP_REG","T_OP_NRE","T_OP_IN",
+"T_OP_OR","T_OP_AND","T_OP_NOT","'('","')'","'{'","'}'","','","'%'","root","expr",
+"comparison","words","word","regex","funccall", NULL
+};
+#endif
+
+static const short ssl_expr_yyr1[] = { 0,
+ 29, 30, 30, 30, 30, 30, 30, 30, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 32, 32, 33,
+ 33, 33, 33, 34, 34, 35
+};
+
+static const short ssl_expr_yyr2[] = { 0,
+ 1, 1, 1, 2, 3, 3, 1, 3, 3, 3,
+ 3, 3, 3, 3, 5, 3, 3, 1, 3, 1,
+ 1, 4, 1, 1, 1, 4
+};
+
+static const short ssl_expr_yydefact[] = { 0,
+ 2, 3, 20, 21, 0, 0, 0, 0, 1, 7,
+ 0, 23, 0, 4, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 8, 0,
+ 5, 6, 9, 10, 11, 12, 13, 14, 24, 25,
+ 16, 17, 0, 26, 22, 0, 18, 15, 0, 19,
+ 0, 0, 0
+};
+
+static const short ssl_expr_yydefgoto[] = { 51,
+ 9, 10, 46, 11, 41, 12
+};
+
+static const short ssl_expr_yypact[] = { 3,
+-32768,-32768,-32768,-32768, -11, 3, 3, -10, 0,-32768,
+ 22,-32768, 16,-32768, -2, 23, 3, 3, 4, 4,
+ 4, 4, 4, 4, 34, 34, 21, 24,-32768, 25,
+ 26,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,
+-32768,-32768, 4,-32768,-32768, 18,-32768,-32768, 4,-32768,
+ 49, 50,-32768
+};
+
+static const short ssl_expr_yypgoto[] = {-32768,
+ 10,-32768,-32768, -19, 27,-32768
+};
+
+
+#define YYLAST 53
+
+
+static const short ssl_expr_yytable[] = { 33,
+ 34, 35, 36, 37, 38, 1, 2, 3, 3, 4,
+ 4, 13, 5, 5, 16, 14, 15, 17, 18, 17,
+ 18, 29, 28, 47, 6, 7, 31, 32, 30, 50,
+ 8, 8, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 39, 40, 48, 49, 43, 18, 44, 52, 53,
+ 45, 0, 42
+};
+
+static const short ssl_expr_yycheck[] = { 19,
+ 20, 21, 22, 23, 24, 3, 4, 5, 5, 7,
+ 7, 23, 10, 10, 25, 6, 7, 20, 21, 20,
+ 21, 24, 7, 43, 22, 23, 17, 18, 6, 49,
+ 28, 28, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 8, 9, 26, 27, 25, 21, 24, 0, 0,
+ 26, -1, 26
+};
+/* -*-C-*- Note some compilers choke on comments on `#line' lines. */
+#line 3 "/usr/local/share/bison.simple"
+/* This file comes from bison-1.28. */
+
+/* Skeleton output parser for bison,
+ Copyright (C) 1984, 1989, 1990 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+/* As a special exception, when this file is copied by Bison into a
+ Bison output file, you may use that output file without restriction.
+ This special exception was added by the Free Software Foundation
+ in version 1.24 of Bison. */
+
+/* This is the parser code that is written into each bison parser
+ when the %semantic_parser declaration is not specified in the grammar.
+ It was written by Richard Stallman by simplifying the hairy parser
+ used when %semantic_parser is specified. */
+
+#ifndef YYSTACK_USE_ALLOCA
+#ifdef alloca
+#define YYSTACK_USE_ALLOCA
+#else /* alloca not defined */
+#ifdef __GNUC__
+#define YYSTACK_USE_ALLOCA
+#define alloca __builtin_alloca
+#else /* not GNU C. */
+#if (!defined (__STDC__) && defined (sparc)) || defined (__sparc__) || defined (__sparc) || defined (__sgi) || (defined (__sun) && defined (__i386))
+#define YYSTACK_USE_ALLOCA
+#include <alloca.h>
+#else /* not sparc */
+/* We think this test detects Watcom and Microsoft C. */
+/* This used to test MSDOS, but that is a bad idea
+ since that symbol is in the user namespace. */
+#if (defined (_MSDOS) || defined (_MSDOS_)) && !defined (__TURBOC__)
+#if 0 /* No need for malloc.h, which pollutes the namespace;
+ instead, just don't use alloca. */
+#include <malloc.h>
+#endif
+#else /* not MSDOS, or __TURBOC__ */
+#if defined(_AIX)
+/* I don't know what this was needed for, but it pollutes the namespace.
+ So I turned it off. rms, 2 May 1997. */
+/* #include <malloc.h> */
+#pragma alloca
+#define YYSTACK_USE_ALLOCA
+#else /* not MSDOS, or __TURBOC__, or _AIX */
+#if 0
+#ifdef __hpux /* haible@ilog.fr says this works for HPUX 9.05 and up,
+ and on HPUX 10. Eventually we can turn this on. */
+#define YYSTACK_USE_ALLOCA
+#define alloca __builtin_alloca
+#endif /* __hpux */
+#endif
+#endif /* not _AIX */
+#endif /* not MSDOS, or __TURBOC__ */
+#endif /* not sparc */
+#endif /* not GNU C */
+#endif /* alloca not defined */
+#endif /* YYSTACK_USE_ALLOCA not defined */
+
+#ifdef YYSTACK_USE_ALLOCA
+#define YYSTACK_ALLOC alloca
+#else
+#define YYSTACK_ALLOC malloc
+#endif
+
+/* Note: there must be only one dollar sign in this file.
+ It is replaced by the list of actions, each action
+ as one case of the switch. */
+
+#define ssl_expr_yyerrok (ssl_expr_yyerrstatus = 0)
+#define ssl_expr_yyclearin (ssl_expr_yychar = YYEMPTY)
+#define YYEMPTY -2
+#define YYEOF 0
+#define YYACCEPT goto ssl_expr_yyacceptlab
+#define YYABORT goto ssl_expr_yyabortlab
+#define YYERROR goto ssl_expr_yyerrlab1
+/* Like YYERROR except do call ssl_expr_yyerror.
+ This remains here temporarily to ease the
+ transition to the new meaning of YYERROR, for GCC.
+ Once GCC version 2 has supplanted version 1, this can go. */
+#define YYFAIL goto ssl_expr_yyerrlab
+#define YYRECOVERING() (!!ssl_expr_yyerrstatus)
+#define YYBACKUP(token, value) \
+do \
+ if (ssl_expr_yychar == YYEMPTY && ssl_expr_yylen == 1) \
+ { ssl_expr_yychar = (token), ssl_expr_yylval = (value); \
+ ssl_expr_yychar1 = YYTRANSLATE (ssl_expr_yychar); \
+ YYPOPSTACK; \
+ goto ssl_expr_yybackup; \
+ } \
+ else \
+ { ssl_expr_yyerror ("syntax error: cannot back up"); YYERROR; } \
+while (0)
+
+#define YYTERROR 1
+#define YYERRCODE 256
+
+#ifndef YYPURE
+#define YYLEX ssl_expr_yylex()
+#endif
+
+#ifdef YYPURE
+#ifdef YYLSP_NEEDED
+#ifdef YYLEX_PARAM
+#define YYLEX ssl_expr_yylex(&ssl_expr_yylval, &ssl_expr_yylloc, YYLEX_PARAM)
+#else
+#define YYLEX ssl_expr_yylex(&ssl_expr_yylval, &ssl_expr_yylloc)
+#endif
+#else /* not YYLSP_NEEDED */
+#ifdef YYLEX_PARAM
+#define YYLEX ssl_expr_yylex(&ssl_expr_yylval, YYLEX_PARAM)
+#else
+#define YYLEX ssl_expr_yylex(&ssl_expr_yylval)
+#endif
+#endif /* not YYLSP_NEEDED */
+#endif
+
+/* If nonreentrant, generate the variables here */
+
+#ifndef YYPURE
+
+int ssl_expr_yychar; /* the lookahead symbol */
+YYSTYPE ssl_expr_yylval; /* the semantic value of the */
+ /* lookahead symbol */
+
+#ifdef YYLSP_NEEDED
+YYLTYPE ssl_expr_yylloc; /* location data for the lookahead */
+ /* symbol */
+#endif
+
+int ssl_expr_yynerrs; /* number of parse errors so far */
+#endif /* not YYPURE */
+
+#if YYDEBUG != 0
+int ssl_expr_yydebug; /* nonzero means print parse trace */
+/* Since this is uninitialized, it does not stop multiple parsers
+ from coexisting. */
+#endif
+
+/* YYINITDEPTH indicates the initial size of the parser's stacks */
+
+#ifndef YYINITDEPTH
+#define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH is the maximum size the stacks can grow to
+ (effective only if the built-in stack extension method is used). */
+
+#if YYMAXDEPTH == 0
+#undef YYMAXDEPTH
+#endif
+
+#ifndef YYMAXDEPTH
+#define YYMAXDEPTH 10000
+#endif
+
+/* Define __ssl_expr_yy_memcpy. Note that the size argument
+ should be passed with type unsigned int, because that is what the non-GCC
+ definitions require. With GCC, __builtin_memcpy takes an arg
+ of type size_t, but it can handle unsigned int. */
+
+#if __GNUC__ > 1 /* GNU C and GNU C++ define this. */
+#define __ssl_expr_yy_memcpy(TO,FROM,COUNT) __builtin_memcpy(TO,FROM,COUNT)
+#else /* not GNU C or C++ */
+#ifndef __cplusplus
+
+/* This is the most reliable way to avoid incompatibilities
+ in available built-in functions on various systems. */
+static void
+__ssl_expr_yy_memcpy (to, from, count)
+ char *to;
+ char *from;
+ unsigned int count;
+{
+ register char *f = from;
+ register char *t = to;
+ register int i = count;
+
+ while (i-- > 0)
+ *t++ = *f++;
+}
+
+#else /* __cplusplus */
+
+/* This is the most reliable way to avoid incompatibilities
+ in available built-in functions on various systems. */
+static void
+__ssl_expr_yy_memcpy (char *to, char *from, unsigned int count)
+{
+ register char *t = to;
+ register char *f = from;
+ register int i = count;
+
+ while (i-- > 0)
+ *t++ = *f++;
+}
+
+#endif
+#endif
+
+#line 217 "/usr/local/share/bison.simple"
+
+/* The user can define YYPARSE_PARAM as the name of an argument to be passed
+ into ssl_expr_yyparse. The argument should have type void *.
+ It should actually point to an object.
+ Grammar actions can access the variable by casting it
+ to the proper pointer type. */
+
+#ifdef YYPARSE_PARAM
+#ifdef __cplusplus
+#define YYPARSE_PARAM_ARG void *YYPARSE_PARAM
+#define YYPARSE_PARAM_DECL
+#else /* not __cplusplus */
+#define YYPARSE_PARAM_ARG YYPARSE_PARAM
+#define YYPARSE_PARAM_DECL void *YYPARSE_PARAM;
+#endif /* not __cplusplus */
+#else /* not YYPARSE_PARAM */
+#define YYPARSE_PARAM_ARG
+#define YYPARSE_PARAM_DECL
+#endif /* not YYPARSE_PARAM */
+
+/* Prevent warning if -Wstrict-prototypes. */
+#ifdef __GNUC__
+#ifdef YYPARSE_PARAM
+int ssl_expr_yyparse (void *);
+#else
+int ssl_expr_yyparse (void);
+#endif
+#endif
+
+int
+ssl_expr_yyparse(YYPARSE_PARAM_ARG)
+ YYPARSE_PARAM_DECL
+{
+ register int ssl_expr_yystate;
+ register int ssl_expr_yyn;
+ register short *ssl_expr_yyssp;
+ register YYSTYPE *ssl_expr_yyvsp;
+ int ssl_expr_yyerrstatus; /* number of tokens to shift before error messages enabled */
+ int ssl_expr_yychar1 = 0; /* lookahead token as an internal (translated) token number */
+
+ short ssl_expr_yyssa[YYINITDEPTH]; /* the state stack */
+ YYSTYPE ssl_expr_yyvsa[YYINITDEPTH]; /* the semantic value stack */
+
+ short *ssl_expr_yyss = ssl_expr_yyssa; /* refer to the stacks thru separate pointers */
+ YYSTYPE *ssl_expr_yyvs = ssl_expr_yyvsa; /* to allow ssl_expr_yyoverflow to reallocate them elsewhere */
+
+#ifdef YYLSP_NEEDED
+ YYLTYPE ssl_expr_yylsa[YYINITDEPTH]; /* the location stack */
+ YYLTYPE *ssl_expr_yyls = ssl_expr_yylsa;
+ YYLTYPE *ssl_expr_yylsp;
+
+#define YYPOPSTACK (ssl_expr_yyvsp--, ssl_expr_yyssp--, ssl_expr_yylsp--)
+#else
+#define YYPOPSTACK (ssl_expr_yyvsp--, ssl_expr_yyssp--)
+#endif
+
+ int ssl_expr_yystacksize = YYINITDEPTH;
+ int ssl_expr_yyfree_stacks = 0;
+
+#ifdef YYPURE
+ int ssl_expr_yychar;
+ YYSTYPE ssl_expr_yylval;
+ int ssl_expr_yynerrs;
+#ifdef YYLSP_NEEDED
+ YYLTYPE ssl_expr_yylloc;
+#endif
+#endif
+
+ YYSTYPE ssl_expr_yyval; /* the variable used to return */
+ /* semantic values from the action */
+ /* routines */
+
+ int ssl_expr_yylen;
+
+#if YYDEBUG != 0
+ if (ssl_expr_yydebug)
+ fprintf(stderr, "Starting parse\n");
+#endif
+
+ ssl_expr_yystate = 0;
+ ssl_expr_yyerrstatus = 0;
+ ssl_expr_yynerrs = 0;
+ ssl_expr_yychar = YYEMPTY; /* Cause a token to be read. */
+
+ /* Initialize stack pointers.
+ Waste one element of value and location stack
+ so that they stay on the same level as the state stack.
+ The wasted elements are never initialized. */
+
+ ssl_expr_yyssp = ssl_expr_yyss - 1;
+ ssl_expr_yyvsp = ssl_expr_yyvs;
+#ifdef YYLSP_NEEDED
+ ssl_expr_yylsp = ssl_expr_yyls;
+#endif
+
+/* Push a new state, which is found in ssl_expr_yystate . */
+/* In all cases, when you get here, the value and location stacks
+ have just been pushed. so pushing a state here evens the stacks. */
+ssl_expr_yynewstate:
+
+ *++ssl_expr_yyssp = ssl_expr_yystate;
+
+ if (ssl_expr_yyssp >= ssl_expr_yyss + ssl_expr_yystacksize - 1)
+ {
+ /* Give user a chance to reallocate the stack */
+ /* Use copies of these so that the &'s don't force the real ones into memory. */
+ YYSTYPE *ssl_expr_yyvs1 = ssl_expr_yyvs;
+ short *ssl_expr_yyss1 = ssl_expr_yyss;
+#ifdef YYLSP_NEEDED
+ YYLTYPE *ssl_expr_yyls1 = ssl_expr_yyls;
+#endif
+
+ /* Get the current used size of the three stacks, in elements. */
+ int size = ssl_expr_yyssp - ssl_expr_yyss + 1;
+
+#ifdef ssl_expr_yyoverflow
+ /* Each stack pointer address is followed by the size of
+ the data in use in that stack, in bytes. */
+#ifdef YYLSP_NEEDED
+ /* This used to be a conditional around just the two extra args,
+ but that might be undefined if ssl_expr_yyoverflow is a macro. */
+ ssl_expr_yyoverflow("parser stack overflow",
+ &ssl_expr_yyss1, size * sizeof (*ssl_expr_yyssp),
+ &ssl_expr_yyvs1, size * sizeof (*ssl_expr_yyvsp),
+ &ssl_expr_yyls1, size * sizeof (*ssl_expr_yylsp),
+ &ssl_expr_yystacksize);
+#else
+ ssl_expr_yyoverflow("parser stack overflow",
+ &ssl_expr_yyss1, size * sizeof (*ssl_expr_yyssp),
+ &ssl_expr_yyvs1, size * sizeof (*ssl_expr_yyvsp),
+ &ssl_expr_yystacksize);
+#endif
+
+ ssl_expr_yyss = ssl_expr_yyss1; ssl_expr_yyvs = ssl_expr_yyvs1;
+#ifdef YYLSP_NEEDED
+ ssl_expr_yyls = ssl_expr_yyls1;
+#endif
+#else /* no ssl_expr_yyoverflow */
+ /* Extend the stack our own way. */
+ if (ssl_expr_yystacksize >= YYMAXDEPTH)
+ {
+ ssl_expr_yyerror("parser stack overflow");
+ if (ssl_expr_yyfree_stacks)
+ {
+ free (ssl_expr_yyss);
+ free (ssl_expr_yyvs);
+#ifdef YYLSP_NEEDED
+ free (ssl_expr_yyls);
+#endif
+ }
+ return 2;
+ }
+ ssl_expr_yystacksize *= 2;
+ if (ssl_expr_yystacksize > YYMAXDEPTH)
+ ssl_expr_yystacksize = YYMAXDEPTH;
+#ifndef YYSTACK_USE_ALLOCA
+ ssl_expr_yyfree_stacks = 1;
+#endif
+ ssl_expr_yyss = (short *) YYSTACK_ALLOC (ssl_expr_yystacksize * sizeof (*ssl_expr_yyssp));
+ __ssl_expr_yy_memcpy ((char *)ssl_expr_yyss, (char *)ssl_expr_yyss1,
+ size * (unsigned int) sizeof (*ssl_expr_yyssp));
+ ssl_expr_yyvs = (YYSTYPE *) YYSTACK_ALLOC (ssl_expr_yystacksize * sizeof (*ssl_expr_yyvsp));
+ __ssl_expr_yy_memcpy ((char *)ssl_expr_yyvs, (char *)ssl_expr_yyvs1,
+ size * (unsigned int) sizeof (*ssl_expr_yyvsp));
+#ifdef YYLSP_NEEDED
+ ssl_expr_yyls = (YYLTYPE *) YYSTACK_ALLOC (ssl_expr_yystacksize * sizeof (*ssl_expr_yylsp));
+ __ssl_expr_yy_memcpy ((char *)ssl_expr_yyls, (char *)ssl_expr_yyls1,
+ size * (unsigned int) sizeof (*ssl_expr_yylsp));
+#endif
+#endif /* no ssl_expr_yyoverflow */
+
+ ssl_expr_yyssp = ssl_expr_yyss + size - 1;
+ ssl_expr_yyvsp = ssl_expr_yyvs + size - 1;
+#ifdef YYLSP_NEEDED
+ ssl_expr_yylsp = ssl_expr_yyls + size - 1;
+#endif
+
+#if YYDEBUG != 0
+ if (ssl_expr_yydebug)
+ fprintf(stderr, "Stack size increased to %d\n", ssl_expr_yystacksize);
+#endif
+
+ if (ssl_expr_yyssp >= ssl_expr_yyss + ssl_expr_yystacksize - 1)
+ YYABORT;
+ }
+
+#if YYDEBUG != 0
+ if (ssl_expr_yydebug)
+ fprintf(stderr, "Entering state %d\n", ssl_expr_yystate);
+#endif
+
+ goto ssl_expr_yybackup;
+ ssl_expr_yybackup:
+
+/* Do appropriate processing given the current state. */
+/* Read a lookahead token if we need one and don't already have one. */
+/* ssl_expr_yyresume: */
+
+ /* First try to decide what to do without reference to lookahead token. */
+
+ ssl_expr_yyn = ssl_expr_yypact[ssl_expr_yystate];
+ if (ssl_expr_yyn == YYFLAG)
+ goto ssl_expr_yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* ssl_expr_yychar is either YYEMPTY or YYEOF
+ or a valid token in external form. */
+
+ if (ssl_expr_yychar == YYEMPTY)
+ {
+#if YYDEBUG != 0
+ if (ssl_expr_yydebug)
+ fprintf(stderr, "Reading a token: ");
+#endif
+ ssl_expr_yychar = YYLEX;
+ }
+
+ /* Convert token to internal form (in ssl_expr_yychar1) for indexing tables with */
+
+ if (ssl_expr_yychar <= 0) /* This means end of input. */
+ {
+ ssl_expr_yychar1 = 0;
+ ssl_expr_yychar = YYEOF; /* Don't call YYLEX any more */
+
+#if YYDEBUG != 0
+ if (ssl_expr_yydebug)
+ fprintf(stderr, "Now at end of input.\n");
+#endif
+ }
+ else
+ {
+ ssl_expr_yychar1 = YYTRANSLATE(ssl_expr_yychar);
+
+#if YYDEBUG != 0
+ if (ssl_expr_yydebug)
+ {
+ fprintf (stderr, "Next token is %d (%s", ssl_expr_yychar, ssl_expr_yytname[ssl_expr_yychar1]);
+ /* Give the individual parser a way to print the precise meaning
+ of a token, for further debugging info. */
+#ifdef YYPRINT
+ YYPRINT (stderr, ssl_expr_yychar, ssl_expr_yylval);
+#endif
+ fprintf (stderr, ")\n");
+ }
+#endif
+ }
+
+ ssl_expr_yyn += ssl_expr_yychar1;
+ if (ssl_expr_yyn < 0 || ssl_expr_yyn > YYLAST || ssl_expr_yycheck[ssl_expr_yyn] != ssl_expr_yychar1)
+ goto ssl_expr_yydefault;
+
+ ssl_expr_yyn = ssl_expr_yytable[ssl_expr_yyn];
+
+ /* ssl_expr_yyn is what to do for this token type in this state.
+ Negative => reduce, -ssl_expr_yyn is rule number.
+ Positive => shift, ssl_expr_yyn is new state.
+ New state is final state => don't bother to shift,
+ just return success.
+ 0, or most negative number => error. */
+
+ if (ssl_expr_yyn < 0)
+ {
+ if (ssl_expr_yyn == YYFLAG)
+ goto ssl_expr_yyerrlab;
+ ssl_expr_yyn = -ssl_expr_yyn;
+ goto ssl_expr_yyreduce;
+ }
+ else if (ssl_expr_yyn == 0)
+ goto ssl_expr_yyerrlab;
+
+ if (ssl_expr_yyn == YYFINAL)
+ YYACCEPT;
+
+ /* Shift the lookahead token. */
+
+#if YYDEBUG != 0
+ if (ssl_expr_yydebug)
+ fprintf(stderr, "Shifting token %d (%s), ", ssl_expr_yychar, ssl_expr_yytname[ssl_expr_yychar1]);
+#endif
+
+ /* Discard the token being shifted unless it is eof. */
+ if (ssl_expr_yychar != YYEOF)
+ ssl_expr_yychar = YYEMPTY;
+
+ *++ssl_expr_yyvsp = ssl_expr_yylval;
+#ifdef YYLSP_NEEDED
+ *++ssl_expr_yylsp = ssl_expr_yylloc;
+#endif
+
+ /* count tokens shifted since error; after three, turn off error status. */
+ if (ssl_expr_yyerrstatus) ssl_expr_yyerrstatus--;
+
+ ssl_expr_yystate = ssl_expr_yyn;
+ goto ssl_expr_yynewstate;
+
+/* Do the default action for the current state. */
+ssl_expr_yydefault:
+
+ ssl_expr_yyn = ssl_expr_yydefact[ssl_expr_yystate];
+ if (ssl_expr_yyn == 0)
+ goto ssl_expr_yyerrlab;
+
+/* Do a reduction. ssl_expr_yyn is the number of a rule to reduce with. */
+ssl_expr_yyreduce:
+ ssl_expr_yylen = ssl_expr_yyr2[ssl_expr_yyn];
+ if (ssl_expr_yylen > 0)
+ ssl_expr_yyval = ssl_expr_yyvsp[1-ssl_expr_yylen]; /* implement default value of the action */
+
+#if YYDEBUG != 0
+ if (ssl_expr_yydebug)
+ {
+ int i;
+
+ fprintf (stderr, "Reducing via rule %d (line %d), ",
+ ssl_expr_yyn, ssl_expr_yyrline[ssl_expr_yyn]);
+
+ /* Print the symbols being reduced, and their result. */
+ for (i = ssl_expr_yyprhs[ssl_expr_yyn]; ssl_expr_yyrhs[i] > 0; i++)
+ fprintf (stderr, "%s ", ssl_expr_yytname[ssl_expr_yyrhs[i]]);
+ fprintf (stderr, " -> %s\n", ssl_expr_yytname[ssl_expr_yyr1[ssl_expr_yyn]]);
+ }
+#endif
+
+
+ switch (ssl_expr_yyn) {
+
+case 1:
+#line 115 "ssl_expr_parse.y"
+{ ssl_expr_info.expr = ssl_expr_yyvsp[0].exVal; ;
+ break;}
+case 2:
+#line 118 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_True, NULL, NULL); ;
+ break;}
+case 3:
+#line 119 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_False, NULL, NULL); ;
+ break;}
+case 4:
+#line 120 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_Not, ssl_expr_yyvsp[0].exVal, NULL); ;
+ break;}
+case 5:
+#line 121 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_Or, ssl_expr_yyvsp[-2].exVal, ssl_expr_yyvsp[0].exVal); ;
+ break;}
+case 6:
+#line 122 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_And, ssl_expr_yyvsp[-2].exVal, ssl_expr_yyvsp[0].exVal); ;
+ break;}
+case 7:
+#line 123 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_Comp, ssl_expr_yyvsp[0].exVal, NULL); ;
+ break;}
+case 8:
+#line 124 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_yyvsp[-1].exVal; ;
+ break;}
+case 9:
+#line 127 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_EQ, ssl_expr_yyvsp[-2].exVal, ssl_expr_yyvsp[0].exVal); ;
+ break;}
+case 10:
+#line 128 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_NE, ssl_expr_yyvsp[-2].exVal, ssl_expr_yyvsp[0].exVal); ;
+ break;}
+case 11:
+#line 129 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_LT, ssl_expr_yyvsp[-2].exVal, ssl_expr_yyvsp[0].exVal); ;
+ break;}
+case 12:
+#line 130 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_LE, ssl_expr_yyvsp[-2].exVal, ssl_expr_yyvsp[0].exVal); ;
+ break;}
+case 13:
+#line 131 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_GT, ssl_expr_yyvsp[-2].exVal, ssl_expr_yyvsp[0].exVal); ;
+ break;}
+case 14:
+#line 132 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_GE, ssl_expr_yyvsp[-2].exVal, ssl_expr_yyvsp[0].exVal); ;
+ break;}
+case 15:
+#line 133 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_IN, ssl_expr_yyvsp[-4].exVal, ssl_expr_yyvsp[-1].exVal); ;
+ break;}
+case 16:
+#line 134 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_REG, ssl_expr_yyvsp[-2].exVal, ssl_expr_yyvsp[0].exVal); ;
+ break;}
+case 17:
+#line 135 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_NRE, ssl_expr_yyvsp[-2].exVal, ssl_expr_yyvsp[0].exVal); ;
+ break;}
+case 18:
+#line 138 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_ListElement, ssl_expr_yyvsp[0].exVal, NULL); ;
+ break;}
+case 19:
+#line 139 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_ListElement, ssl_expr_yyvsp[0].exVal, ssl_expr_yyvsp[-2].exVal); ;
+ break;}
+case 20:
+#line 142 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_Digit, ssl_expr_yyvsp[0].cpVal, NULL); ;
+ break;}
+case 21:
+#line 143 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_String, ssl_expr_yyvsp[0].cpVal, NULL); ;
+ break;}
+case 22:
+#line 144 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_make(op_Var, ssl_expr_yyvsp[-1].cpVal, NULL); ;
+ break;}
+case 23:
+#line 145 "ssl_expr_parse.y"
+{ ssl_expr_yyval.exVal = ssl_expr_yyvsp[0].exVal; ;
+ break;}
+case 24:
+#line 148 "ssl_expr_parse.y"
+{
+ regex_t *regex;
+ if ((regex = ap_pregcomp(ssl_expr_info.pool, ssl_expr_yyvsp[0].cpVal,
+ REG_EXTENDED|REG_NOSUB)) == NULL) {
+ ssl_expr_error = "Failed to compile regular expression";
+ YYERROR;
+ regex = NULL;
+ }
+ ssl_expr_yyval.exVal = ssl_expr_make(op_Regex, regex, NULL);
+ ;
+ break;}
+case 25:
+#line 158 "ssl_expr_parse.y"
+{
+ regex_t *regex;
+ if ((regex = ap_pregcomp(ssl_expr_info.pool, ssl_expr_yyvsp[0].cpVal,
+ REG_EXTENDED|REG_NOSUB|REG_ICASE)) == NULL) {
+ ssl_expr_error = "Failed to compile regular expression";
+ YYERROR;
+ regex = NULL;
+ }
+ ssl_expr_yyval.exVal = ssl_expr_make(op_Regex, regex, NULL);
+ ;
+ break;}
+case 26:
+#line 170 "ssl_expr_parse.y"
+{
+ ssl_expr *args = ssl_expr_make(op_ListElement, ssl_expr_yyvsp[-1].cpVal, NULL);
+ ssl_expr_yyval.exVal = ssl_expr_make(op_Func, "file", args);
+ ;
+ break;}
+}
+ /* the action file gets copied in in place of this dollarsign */
+#line 543 "/usr/local/share/bison.simple"
+
+ ssl_expr_yyvsp -= ssl_expr_yylen;
+ ssl_expr_yyssp -= ssl_expr_yylen;
+#ifdef YYLSP_NEEDED
+ ssl_expr_yylsp -= ssl_expr_yylen;
+#endif
+
+#if YYDEBUG != 0
+ if (ssl_expr_yydebug)
+ {
+ short *ssp1 = ssl_expr_yyss - 1;
+ fprintf (stderr, "state stack now");
+ while (ssp1 != ssl_expr_yyssp)
+ fprintf (stderr, " %d", *++ssp1);
+ fprintf (stderr, "\n");
+ }
+#endif
+
+ *++ssl_expr_yyvsp = ssl_expr_yyval;
+
+#ifdef YYLSP_NEEDED
+ ssl_expr_yylsp++;
+ if (ssl_expr_yylen == 0)
+ {
+ ssl_expr_yylsp->first_line = ssl_expr_yylloc.first_line;
+ ssl_expr_yylsp->first_column = ssl_expr_yylloc.first_column;
+ ssl_expr_yylsp->last_line = (ssl_expr_yylsp-1)->last_line;
+ ssl_expr_yylsp->last_column = (ssl_expr_yylsp-1)->last_column;
+ ssl_expr_yylsp->text = 0;
+ }
+ else
+ {
+ ssl_expr_yylsp->last_line = (ssl_expr_yylsp+ssl_expr_yylen-1)->last_line;
+ ssl_expr_yylsp->last_column = (ssl_expr_yylsp+ssl_expr_yylen-1)->last_column;
+ }
+#endif
+
+ /* Now "shift" the result of the reduction.
+ Determine what state that goes to,
+ based on the state we popped back to
+ and the rule number reduced by. */
+
+ ssl_expr_yyn = ssl_expr_yyr1[ssl_expr_yyn];
+
+ ssl_expr_yystate = ssl_expr_yypgoto[ssl_expr_yyn - YYNTBASE] + *ssl_expr_yyssp;
+ if (ssl_expr_yystate >= 0 && ssl_expr_yystate <= YYLAST && ssl_expr_yycheck[ssl_expr_yystate] == *ssl_expr_yyssp)
+ ssl_expr_yystate = ssl_expr_yytable[ssl_expr_yystate];
+ else
+ ssl_expr_yystate = ssl_expr_yydefgoto[ssl_expr_yyn - YYNTBASE];
+
+ goto ssl_expr_yynewstate;
+
+ssl_expr_yyerrlab: /* here on detecting error */
+
+ if (! ssl_expr_yyerrstatus)
+ /* If not already recovering from an error, report this error. */
+ {
+ ++ssl_expr_yynerrs;
+
+#ifdef YYERROR_VERBOSE
+ ssl_expr_yyn = ssl_expr_yypact[ssl_expr_yystate];
+
+ if (ssl_expr_yyn > YYFLAG && ssl_expr_yyn < YYLAST)
+ {
+ int size = 0;
+ char *msg;
+ int x, count;
+
+ count = 0;
+ /* Start X at -ssl_expr_yyn if nec to avoid negative indexes in ssl_expr_yycheck. */
+ for (x = (ssl_expr_yyn < 0 ? -ssl_expr_yyn : 0);
+ x < (sizeof(ssl_expr_yytname) / sizeof(char *)); x++)
+ if (ssl_expr_yycheck[x + ssl_expr_yyn] == x)
+ size += strlen(ssl_expr_yytname[x]) + 15, count++;
+ msg = (char *) malloc(size + 15);
+ if (msg != 0)
+ {
+ strcpy(msg, "parse error");
+
+ if (count < 5)
+ {
+ count = 0;
+ for (x = (ssl_expr_yyn < 0 ? -ssl_expr_yyn : 0);
+ x < (sizeof(ssl_expr_yytname) / sizeof(char *)); x++)
+ if (ssl_expr_yycheck[x + ssl_expr_yyn] == x)
+ {
+ strcat(msg, count == 0 ? ", expecting `" : " or `");
+ strcat(msg, ssl_expr_yytname[x]);
+ strcat(msg, "'");
+ count++;
+ }
+ }
+ ssl_expr_yyerror(msg);
+ free(msg);
+ }
+ else
+ ssl_expr_yyerror ("parse error; also virtual memory exceeded");
+ }
+ else
+#endif /* YYERROR_VERBOSE */
+ ssl_expr_yyerror("parse error");
+ }
+
+ goto ssl_expr_yyerrlab1;
+ssl_expr_yyerrlab1: /* here on error raised explicitly by an action */
+
+ if (ssl_expr_yyerrstatus == 3)
+ {
+ /* if just tried and failed to reuse lookahead token after an error, discard it. */
+
+ /* return failure if at end of input */
+ if (ssl_expr_yychar == YYEOF)
+ YYABORT;
+
+#if YYDEBUG != 0
+ if (ssl_expr_yydebug)
+ fprintf(stderr, "Discarding token %d (%s).\n", ssl_expr_yychar, ssl_expr_yytname[ssl_expr_yychar1]);
+#endif
+
+ ssl_expr_yychar = YYEMPTY;
+ }
+
+ /* Else will try to reuse lookahead token
+ after shifting the error token. */
+
+ ssl_expr_yyerrstatus = 3; /* Each real token shifted decrements this */
+
+ goto ssl_expr_yyerrhandle;
+
+ssl_expr_yyerrdefault: /* current state does not do anything special for the error token. */
+
+#if 0
+ /* This is wrong; only states that explicitly want error tokens
+ should shift them. */
+ ssl_expr_yyn = ssl_expr_yydefact[ssl_expr_yystate]; /* If its default is to accept any token, ok. Otherwise pop it.*/
+ if (ssl_expr_yyn) goto ssl_expr_yydefault;
+#endif
+
+ssl_expr_yyerrpop: /* pop the current state because it cannot handle the error token */
+
+ if (ssl_expr_yyssp == ssl_expr_yyss) YYABORT;
+ ssl_expr_yyvsp--;
+ ssl_expr_yystate = *--ssl_expr_yyssp;
+#ifdef YYLSP_NEEDED
+ ssl_expr_yylsp--;
+#endif
+
+#if YYDEBUG != 0
+ if (ssl_expr_yydebug)
+ {
+ short *ssp1 = ssl_expr_yyss - 1;
+ fprintf (stderr, "Error: state stack now");
+ while (ssp1 != ssl_expr_yyssp)
+ fprintf (stderr, " %d", *++ssp1);
+ fprintf (stderr, "\n");
+ }
+#endif
+
+ssl_expr_yyerrhandle:
+
+ ssl_expr_yyn = ssl_expr_yypact[ssl_expr_yystate];
+ if (ssl_expr_yyn == YYFLAG)
+ goto ssl_expr_yyerrdefault;
+
+ ssl_expr_yyn += YYTERROR;
+ if (ssl_expr_yyn < 0 || ssl_expr_yyn > YYLAST || ssl_expr_yycheck[ssl_expr_yyn] != YYTERROR)
+ goto ssl_expr_yyerrdefault;
+
+ ssl_expr_yyn = ssl_expr_yytable[ssl_expr_yyn];
+ if (ssl_expr_yyn < 0)
+ {
+ if (ssl_expr_yyn == YYFLAG)
+ goto ssl_expr_yyerrpop;
+ ssl_expr_yyn = -ssl_expr_yyn;
+ goto ssl_expr_yyreduce;
+ }
+ else if (ssl_expr_yyn == 0)
+ goto ssl_expr_yyerrpop;
+
+ if (ssl_expr_yyn == YYFINAL)
+ YYACCEPT;
+
+#if YYDEBUG != 0
+ if (ssl_expr_yydebug)
+ fprintf(stderr, "Shifting error token, ");
+#endif
+
+ *++ssl_expr_yyvsp = ssl_expr_yylval;
+#ifdef YYLSP_NEEDED
+ *++ssl_expr_yylsp = ssl_expr_yylloc;
+#endif
+
+ ssl_expr_yystate = ssl_expr_yyn;
+ goto ssl_expr_yynewstate;
+
+ ssl_expr_yyacceptlab:
+ /* YYACCEPT comes here. */
+ if (ssl_expr_yyfree_stacks)
+ {
+ free (ssl_expr_yyss);
+ free (ssl_expr_yyvs);
+#ifdef YYLSP_NEEDED
+ free (ssl_expr_yyls);
+#endif
+ }
+ return 0;
+
+ ssl_expr_yyabortlab:
+ /* YYABORT comes here. */
+ if (ssl_expr_yyfree_stacks)
+ {
+ free (ssl_expr_yyss);
+ free (ssl_expr_yyvs);
+#ifdef YYLSP_NEEDED
+ free (ssl_expr_yyls);
+#endif
+ }
+ return 1;
+}
+#line 176 "ssl_expr_parse.y"
+
+
+int ssl_expr_yyerror(char *s)
+{
+ ssl_expr_error = s;
+ return 2;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_parse.h b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_parse.h
new file mode 100644
index 00000000..5378e287
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_parse.h
@@ -0,0 +1,27 @@
+typedef union {
+ char *cpVal;
+ ssl_expr *exVal;
+} YYSTYPE;
+#define T_TRUE 257
+#define T_FALSE 258
+#define T_DIGIT 259
+#define T_ID 260
+#define T_STRING 261
+#define T_REGEX 262
+#define T_REGEX_I 263
+#define T_FUNC_FILE 264
+#define T_OP_EQ 265
+#define T_OP_NE 266
+#define T_OP_LT 267
+#define T_OP_LE 268
+#define T_OP_GT 269
+#define T_OP_GE 270
+#define T_OP_REG 271
+#define T_OP_NRE 272
+#define T_OP_IN 273
+#define T_OP_OR 274
+#define T_OP_AND 275
+#define T_OP_NOT 276
+
+
+extern YYSTYPE ssl_expr_yylval;
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_parse.y b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_parse.y
new file mode 100644
index 00000000..649e1541
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_parse.y
@@ -0,0 +1,148 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| |
+ * | '_ ` _ \ / _ \ / _` | / __/ __| |
+ * | | | | | | (_) | (_| | \__ \__ \ | mod_ssl - Apache Interface to OpenSSL
+ * |_| |_| |_|\___/ \__,_|___|___/___/_| http://www.modssl.org/
+ * |_____|
+ * ssl_expr_parse.y
+ * Expression LR(1) Parser
+ */
+ /* ``What you see is all you get.''
+ -- Brian Kernighan */
+
+/* _________________________________________________________________
+**
+** Expression Parser
+** _________________________________________________________________
+*/
+
+%{
+#include "mod_ssl.h"
+%}
+
+%union {
+ char *cpVal;
+ ssl_expr *exVal;
+}
+
+%token T_TRUE
+%token T_FALSE
+
+%token <cpVal> T_DIGIT
+%token <cpVal> T_ID
+%token <cpVal> T_STRING
+%token <cpVal> T_REGEX
+%token <cpVal> T_REGEX_I
+
+%token T_FUNC_FILE
+
+%token T_OP_EQ
+%token T_OP_NE
+%token T_OP_LT
+%token T_OP_LE
+%token T_OP_GT
+%token T_OP_GE
+%token T_OP_REG
+%token T_OP_NRE
+%token T_OP_IN
+
+%token T_OP_OR
+%token T_OP_AND
+%token T_OP_NOT
+
+%left T_OP_OR
+%left T_OP_AND
+%left T_OP_NOT
+
+%type <exVal> expr
+%type <exVal> comparison
+%type <exVal> funccall
+%type <exVal> regex
+%type <exVal> words
+%type <exVal> word
+
+%%
+
+root : expr { ssl_expr_info.expr = $1; }
+ ;
+
+expr : T_TRUE { $$ = ssl_expr_make(op_True, NULL, NULL); }
+ | T_FALSE { $$ = ssl_expr_make(op_False, NULL, NULL); }
+ | T_OP_NOT expr { $$ = ssl_expr_make(op_Not, $2, NULL); }
+ | expr T_OP_OR expr { $$ = ssl_expr_make(op_Or, $1, $3); }
+ | expr T_OP_AND expr { $$ = ssl_expr_make(op_And, $1, $3); }
+ | comparison { $$ = ssl_expr_make(op_Comp, $1, NULL); }
+ | '(' expr ')' { $$ = $2; }
+ ;
+
+comparison: word T_OP_EQ word { $$ = ssl_expr_make(op_EQ, $1, $3); }
+ | word T_OP_NE word { $$ = ssl_expr_make(op_NE, $1, $3); }
+ | word T_OP_LT word { $$ = ssl_expr_make(op_LT, $1, $3); }
+ | word T_OP_LE word { $$ = ssl_expr_make(op_LE, $1, $3); }
+ | word T_OP_GT word { $$ = ssl_expr_make(op_GT, $1, $3); }
+ | word T_OP_GE word { $$ = ssl_expr_make(op_GE, $1, $3); }
+ | word T_OP_IN '{' words '}' { $$ = ssl_expr_make(op_IN, $1, $4); }
+ | word T_OP_REG regex { $$ = ssl_expr_make(op_REG, $1, $3); }
+ | word T_OP_NRE regex { $$ = ssl_expr_make(op_NRE, $1, $3); }
+ ;
+
+words : word { $$ = ssl_expr_make(op_ListElement, $1, NULL); }
+ | words ',' word { $$ = ssl_expr_make(op_ListElement, $3, $1); }
+ ;
+
+word : T_DIGIT { $$ = ssl_expr_make(op_Digit, $1, NULL); }
+ | T_STRING { $$ = ssl_expr_make(op_String, $1, NULL); }
+ | '%' '{' T_ID '}' { $$ = ssl_expr_make(op_Var, $3, NULL); }
+ | funccall { $$ = $1; }
+ ;
+
+regex : T_REGEX {
+ regex_t *regex;
+ if ((regex = ap_pregcomp(ssl_expr_info.pool, $1,
+ REG_EXTENDED|REG_NOSUB)) == NULL) {
+ ssl_expr_error = "Failed to compile regular expression";
+ YYERROR;
+ }
+ $$ = ssl_expr_make(op_Regex, regex, NULL);
+ }
+ | T_REGEX_I {
+ regex_t *regex;
+ if ((regex = ap_pregcomp(ssl_expr_info.pool, $1,
+ REG_EXTENDED|REG_NOSUB|REG_ICASE)) == NULL) {
+ ssl_expr_error = "Failed to compile regular expression";
+ YYERROR;
+ }
+ $$ = ssl_expr_make(op_Regex, regex, NULL);
+ }
+ ;
+
+funccall : T_FUNC_FILE '(' T_STRING ')' {
+ ssl_expr *args = ssl_expr_make(op_ListElement, $3, NULL);
+ $$ = ssl_expr_make(op_Func, "file", args);
+ }
+ ;
+
+%%
+
+int yyerror(char *s)
+{
+ ssl_expr_error = s;
+ return 2;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_scan.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_scan.c
new file mode 100644
index 00000000..12977a3e
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_scan.c
@@ -0,0 +1,1969 @@
+#define yy_create_buffer ssl_expr_yy_create_buffer
+#define yy_delete_buffer ssl_expr_yy_delete_buffer
+#define yy_scan_buffer ssl_expr_yy_scan_buffer
+#define yy_scan_string ssl_expr_yy_scan_string
+#define yy_scan_bytes ssl_expr_yy_scan_bytes
+#define yy_flex_debug ssl_expr_yy_flex_debug
+#define yy_init_buffer ssl_expr_yy_init_buffer
+#define yy_flush_buffer ssl_expr_yy_flush_buffer
+#define yy_load_buffer_state ssl_expr_yy_load_buffer_state
+#define yy_switch_to_buffer ssl_expr_yy_switch_to_buffer
+#define yyin ssl_expr_yyin
+#define yyleng ssl_expr_yyleng
+#define yylex ssl_expr_yylex
+#define yyout ssl_expr_yyout
+#define yyrestart ssl_expr_yyrestart
+#define yytext ssl_expr_yytext
+
+/* A lexical scanner generated by flex */
+
+/* Scanner skeleton version:
+ * $Header: /home/striker/cvs2svn/dumps/httpd-2.0/../../httpd-2.0/modules/ssl/ssl_expr_scan.c,v 1.12.2.4 2004/02/09 20:53:20 nd Exp $
+ */
+
+#define FLEX_SCANNER
+#define YY_FLEX_MAJOR_VERSION 2
+#define YY_FLEX_MINOR_VERSION 5
+
+#include <stdio.h>
+
+
+/* cfront 1.2 defines "c_plusplus" instead of "__cplusplus" */
+#ifdef c_plusplus
+#ifndef __cplusplus
+#define __cplusplus
+#endif
+#endif
+
+
+#ifdef __cplusplus
+
+#include <stdlib.h>
+#include <unistd.h>
+
+/* Use prototypes in function declarations. */
+#define YY_USE_PROTOS
+
+/* The "const" storage-class-modifier is valid. */
+#define YY_USE_CONST
+
+#else /* ! __cplusplus */
+
+#if __STDC__
+
+#define YY_USE_PROTOS
+#define YY_USE_CONST
+
+#endif /* __STDC__ */
+#endif /* ! __cplusplus */
+
+#ifdef __TURBOC__
+#pragma warn -rch
+#pragma warn -use
+#include <io.h>
+#include <stdlib.h>
+#define YY_USE_CONST
+#define YY_USE_PROTOS
+#endif
+
+#ifdef YY_USE_CONST
+#define yyconst const
+#else
+#define yyconst
+#endif
+
+
+#ifdef YY_USE_PROTOS
+#define YY_PROTO(proto) proto
+#else
+#define YY_PROTO(proto) ()
+#endif
+
+/* Returned upon end-of-file. */
+#define YY_NULL 0
+
+/* Promotes a possibly negative, possibly signed char to an unsigned
+ * integer for use as an array index. If the signed char is negative,
+ * we want to instead treat it as an 8-bit unsigned char, hence the
+ * double cast.
+ */
+#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)
+
+/* Enter a start condition. This macro really ought to take a parameter,
+ * but we do it the disgusting crufty way forced on us by the ()-less
+ * definition of BEGIN.
+ */
+#define BEGIN yy_start = 1 + 2 *
+
+/* Translate the current start state into a value that can be later handed
+ * to BEGIN to return to the state. The YYSTATE alias is for lex
+ * compatibility.
+ */
+#define YY_START ((yy_start - 1) / 2)
+#define YYSTATE YY_START
+
+/* Action number for EOF rule of a given start state. */
+#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
+
+/* Special action meaning "start processing a new file". */
+#define YY_NEW_FILE yyrestart( yyin )
+
+#define YY_END_OF_BUFFER_CHAR 0
+
+/* Size of default input buffer. */
+#define YY_BUF_SIZE 16384
+
+typedef struct yy_buffer_state *YY_BUFFER_STATE;
+
+extern int yyleng;
+extern FILE *yyin, *yyout;
+
+#define EOB_ACT_CONTINUE_SCAN 0
+#define EOB_ACT_END_OF_FILE 1
+#define EOB_ACT_LAST_MATCH 2
+
+/* The funky do-while in the following #define is used to turn the definition
+ * int a single C statement (which needs a semi-colon terminator). This
+ * avoids problems with code like:
+ *
+ * if ( condition_holds )
+ * yyless( 5 );
+ * else
+ * do_something_else();
+ *
+ * Prior to using the do-while the compiler would get upset at the
+ * "else" because it interpreted the "if" statement as being all
+ * done when it reached the ';' after the yyless() call.
+ */
+
+/* Return all but the first 'n' matched characters back to the input stream. */
+
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ *yy_cp = yy_hold_char; \
+ YY_RESTORE_YY_MORE_OFFSET \
+ yy_c_buf_p = yy_cp = yy_bp + n - YY_MORE_ADJ; \
+ YY_DO_BEFORE_ACTION; /* set up yytext again */ \
+ } \
+ while ( 0 )
+
+#define unput(c) yyunput( c, yytext_ptr )
+
+/* The following is because we cannot portably get our hands on size_t
+ * (without autoconf's help, which isn't available because we want
+ * flex-generated scanners to compile on their own).
+ */
+typedef unsigned int yy_size_t;
+
+
+struct yy_buffer_state
+ {
+ FILE *yy_input_file;
+
+ char *yy_ch_buf; /* input buffer */
+ char *yy_buf_pos; /* current position in input buffer */
+
+ /* Size of input buffer in bytes, not including room for EOB
+ * characters.
+ */
+ yy_size_t yy_buf_size;
+
+ /* Number of characters read into yy_ch_buf, not including EOB
+ * characters.
+ */
+ int yy_n_chars;
+
+ /* Whether we "own" the buffer - i.e., we know we created it,
+ * and can realloc() it to grow it, and should free() it to
+ * delete it.
+ */
+ int yy_is_our_buffer;
+
+ /* Whether this is an "interactive" input source; if so, and
+ * if we're using stdio for input, then we want to use getc()
+ * instead of fread(), to make sure we stop fetching input after
+ * each newline.
+ */
+ int yy_is_interactive;
+
+ /* Whether we're considered to be at the beginning of a line.
+ * If so, '^' rules will be active on the next match, otherwise
+ * not.
+ */
+ int yy_at_bol;
+
+ /* Whether to try to fill the input buffer when we reach the
+ * end of it.
+ */
+ int yy_fill_buffer;
+
+ int yy_buffer_status;
+#define YY_BUFFER_NEW 0
+#define YY_BUFFER_NORMAL 1
+ /* When an EOF's been seen but there's still some text to process
+ * then we mark the buffer as YY_EOF_PENDING, to indicate that we
+ * shouldn't try reading from the input source any more. We might
+ * still have a bunch of tokens to match, though, because of
+ * possible backing-up.
+ *
+ * When we actually see the EOF, we change the status to "new"
+ * (via yyrestart()), so that the user can continue scanning by
+ * just pointing yyin at a new input file.
+ */
+#define YY_BUFFER_EOF_PENDING 2
+ };
+
+static YY_BUFFER_STATE yy_current_buffer = 0;
+
+/* We provide macros for accessing buffer states in case in the
+ * future we want to put the buffer states in a more general
+ * "scanner state".
+ */
+#define YY_CURRENT_BUFFER yy_current_buffer
+
+
+/* yy_hold_char holds the character lost when yytext is formed. */
+static char yy_hold_char;
+
+static int yy_n_chars; /* number of characters read into yy_ch_buf */
+
+
+int yyleng;
+
+/* Points to current character in buffer. */
+static char *yy_c_buf_p = (char *) 0;
+static int yy_init = 1; /* whether we need to initialize */
+static int yy_start = 0; /* start state number */
+
+/* Flag which is used to allow yywrap()'s to do buffer switches
+ * instead of setting up a fresh yyin. A bit of a hack ...
+ */
+static int yy_did_buffer_switch_on_eof;
+
+void yyrestart YY_PROTO(( FILE *input_file ));
+
+void yy_switch_to_buffer YY_PROTO(( YY_BUFFER_STATE new_buffer ));
+void yy_load_buffer_state YY_PROTO(( void ));
+YY_BUFFER_STATE yy_create_buffer YY_PROTO(( FILE *file, int size ));
+void yy_delete_buffer YY_PROTO(( YY_BUFFER_STATE b ));
+void yy_init_buffer YY_PROTO(( YY_BUFFER_STATE b, FILE *file ));
+void yy_flush_buffer YY_PROTO(( YY_BUFFER_STATE b ));
+#define YY_FLUSH_BUFFER yy_flush_buffer( yy_current_buffer )
+
+YY_BUFFER_STATE yy_scan_buffer YY_PROTO(( char *base, yy_size_t size ));
+YY_BUFFER_STATE yy_scan_string YY_PROTO(( yyconst char *yy_str ));
+YY_BUFFER_STATE yy_scan_bytes YY_PROTO(( yyconst char *bytes, int len ));
+
+static void *yy_flex_alloc YY_PROTO(( yy_size_t ));
+static void *yy_flex_realloc YY_PROTO(( void *, yy_size_t ));
+static void yy_flex_free YY_PROTO(( void * ));
+
+#define yy_new_buffer yy_create_buffer
+
+#define yy_set_interactive(is_interactive) \
+ { \
+ if ( ! yy_current_buffer ) \
+ yy_current_buffer = yy_create_buffer( yyin, YY_BUF_SIZE ); \
+ yy_current_buffer->yy_is_interactive = is_interactive; \
+ }
+
+#define yy_set_bol(at_bol) \
+ { \
+ if ( ! yy_current_buffer ) \
+ yy_current_buffer = yy_create_buffer( yyin, YY_BUF_SIZE ); \
+ yy_current_buffer->yy_at_bol = at_bol; \
+ }
+
+#define YY_AT_BOL() (yy_current_buffer->yy_at_bol)
+
+
+#define yywrap() 1
+#define YY_SKIP_YYWRAP
+typedef unsigned char YY_CHAR;
+FILE *yyin = (FILE *) 0, *yyout = (FILE *) 0;
+typedef int yy_state_type;
+extern char *yytext;
+#define yytext_ptr yytext
+
+static yy_state_type yy_get_previous_state YY_PROTO(( void ));
+static yy_state_type yy_try_NUL_trans YY_PROTO(( yy_state_type current_state ));
+static int yy_get_next_buffer YY_PROTO(( void ));
+static void yy_fatal_error YY_PROTO(( yyconst char msg[] ));
+
+/* Done after the current pattern has been matched and before the
+ * corresponding action - sets up yytext.
+ */
+#define YY_DO_BEFORE_ACTION \
+ yytext_ptr = yy_bp; \
+ yyleng = (int) (yy_cp - yy_bp); \
+ yy_hold_char = *yy_cp; \
+ *yy_cp = '\0'; \
+ yy_c_buf_p = yy_cp;
+
+#define YY_NUM_RULES 46
+#define YY_END_OF_BUFFER 47
+static yyconst short int yy_accept[86] =
+ { 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 47, 45,
+ 1, 38, 2, 45, 43, 24, 45, 28, 44, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 45,
+ 13, 4, 3, 14, 16, 18, 17, 1, 22, 32,
+ 34, 43, 26, 20, 31, 30, 44, 44, 19, 44,
+ 44, 29, 27, 39, 25, 23, 15, 15, 21, 44,
+ 35, 44, 36, 13, 12, 5, 6, 10, 11, 7,
+ 8, 9, 33, 44, 44, 37, 44, 5, 6, 44,
+ 40, 41, 5, 42, 0
+ } ;
+
+static yyconst int yy_ec[256] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 4, 5, 1, 1, 1, 6, 1, 1,
+ 1, 1, 1, 1, 7, 1, 1, 8, 8, 8,
+ 8, 8, 8, 8, 8, 9, 9, 7, 1, 10,
+ 11, 12, 1, 1, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 1, 14, 1, 1, 7, 1, 15, 16, 13, 17,
+
+ 18, 19, 20, 13, 21, 13, 13, 22, 23, 24,
+ 25, 13, 26, 27, 28, 29, 30, 13, 13, 13,
+ 13, 13, 1, 31, 1, 32, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1
+ } ;
+
+static yyconst int yy_meta[33] =
+ { 0,
+ 1, 1, 2, 1, 3, 1, 4, 4, 4, 1,
+ 1, 1, 4, 3, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 1, 1
+ } ;
+
+static yyconst short int yy_base[93] =
+ { 0,
+ 0, 0, 30, 31, 0, 0, 82, 81, 101, 142,
+ 35, 28, 142, 94, 32, 88, 31, 87, 0, 69,
+ 66, 28, 28, 67, 29, 63, 30, 63, 62, 57,
+ 0, 142, 142, 88, 142, 142, 142, 48, 142, 142,
+ 142, 44, 142, 142, 142, 142, 0, 70, 0, 64,
+ 63, 0, 0, 0, 0, 0, 142, 0, 0, 55,
+ 0, 46, 142, 0, 142, 53, 62, 142, 142, 142,
+ 142, 142, 0, 44, 48, 0, 41, 70, 72, 38,
+ 0, 0, 74, 0, 142, 117, 121, 125, 50, 129,
+ 133, 137
+
+ } ;
+
+static yyconst short int yy_def[93] =
+ { 0,
+ 85, 1, 86, 86, 87, 87, 88, 88, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 89, 89,
+ 89, 89, 89, 89, 89, 90, 89, 89, 89, 85,
+ 91, 85, 85, 92, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 85, 89, 89, 89,
+ 89, 89, 85, 91, 85, 85, 85, 85, 85, 85,
+ 85, 85, 89, 89, 89, 89, 89, 85, 85, 89,
+ 89, 89, 85, 89, 0, 85, 85, 85, 85, 85,
+ 85, 85
+
+ } ;
+
+static yyconst short int yy_nxt[175] =
+ { 0,
+ 10, 11, 11, 12, 13, 14, 10, 15, 15, 16,
+ 17, 18, 19, 10, 20, 19, 19, 21, 22, 23,
+ 24, 25, 26, 27, 28, 19, 19, 19, 29, 19,
+ 30, 10, 32, 32, 33, 33, 38, 38, 39, 42,
+ 42, 44, 50, 34, 34, 52, 55, 59, 51, 38,
+ 38, 42, 42, 47, 60, 84, 53, 56, 82, 40,
+ 78, 79, 45, 57, 57, 81, 57, 57, 57, 79,
+ 79, 80, 57, 57, 57, 77, 57, 83, 79, 79,
+ 79, 79, 79, 76, 75, 74, 73, 63, 62, 61,
+ 54, 49, 48, 57, 57, 66, 67, 46, 43, 41,
+
+ 85, 37, 37, 68, 85, 85, 69, 85, 85, 85,
+ 85, 70, 85, 85, 71, 85, 72, 31, 31, 31,
+ 31, 35, 35, 35, 35, 36, 36, 36, 36, 58,
+ 85, 58, 58, 64, 85, 85, 64, 65, 65, 65,
+ 65, 9, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85
+ } ;
+
+static yyconst short int yy_chk[175] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 3, 4, 3, 4, 11, 11, 12, 15,
+ 15, 17, 22, 3, 4, 23, 25, 27, 22, 38,
+ 38, 42, 42, 89, 27, 80, 23, 25, 77, 12,
+ 66, 66, 17, 26, 26, 75, 26, 26, 26, 67,
+ 67, 74, 26, 26, 26, 62, 26, 78, 78, 79,
+ 79, 83, 83, 60, 51, 50, 48, 30, 29, 28,
+ 24, 21, 20, 26, 26, 34, 34, 18, 16, 14,
+
+ 9, 8, 7, 34, 0, 0, 34, 0, 0, 0,
+ 0, 34, 0, 0, 34, 0, 34, 86, 86, 86,
+ 86, 87, 87, 87, 87, 88, 88, 88, 88, 90,
+ 0, 90, 90, 91, 0, 0, 91, 92, 92, 92,
+ 92, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85
+ } ;
+
+static yy_state_type yy_last_accepting_state;
+static char *yy_last_accepting_cpos;
+
+/* The intent behind this definition is that it'll catch
+ * any uses of REJECT which flex missed.
+ */
+#define REJECT reject_used_but_not_detected
+#define yymore() yymore_used_but_not_detected
+#define YY_MORE_ADJ 0
+#define YY_RESTORE_YY_MORE_OFFSET
+char *yytext;
+#line 1 "ssl_expr_scan.l"
+#define INITIAL 0
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| |
+ * | '_ ` _ \ / _ \ / _` | / __/ __| |
+ * | | | | | | (_) | (_| | \__ \__ \ | mod_ssl - Apache Interface to OpenSSL
+ * |_| |_| |_|\___/ \__,_|___|___/___/_| http://www.modssl.org/
+ * |_____|
+ * ssl_expr_scan.l
+ * Expression Scanner
+ */
+/* ``Killing for peace is
+like fucking for virginity.''
+-- Unknown */
+/* _________________________________________________________________
+**
+** Expression Scanner
+** _________________________________________________________________
+*/
+#line 38 "ssl_expr_scan.l"
+#include "mod_ssl.h"
+
+#include "ssl_expr_parse.h"
+
+#define YY_NO_UNPUT 1
+int yyinput(char *buf, int max_size);
+
+#undef YY_INPUT
+#define YY_INPUT(buf,result,max_size) \
+ (result = yyinput(buf, max_size))
+
+#define MAX_STR_LEN 2048
+/* %option stack */
+#define YY_NEVER_INTERACTIVE 1
+#define str 1
+
+#define regex 2
+#define regex_flags 3
+
+#line 535 "lex.ssl_expr_yy.c"
+
+/* Macros after this point can all be overridden by user definitions in
+ * section 1.
+ */
+
+#ifndef YY_SKIP_YYWRAP
+#ifdef __cplusplus
+extern "C" int yywrap YY_PROTO(( void ));
+#else
+extern int yywrap YY_PROTO(( void ));
+#endif
+#endif
+
+#ifndef YY_NO_UNPUT
+static void yyunput YY_PROTO(( int c, char *buf_ptr ));
+#endif
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy YY_PROTO(( char *, yyconst char *, int ));
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen YY_PROTO(( yyconst char * ));
+#endif
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+static int yyinput YY_PROTO(( void ));
+#else
+static int input YY_PROTO(( void ));
+#endif
+#endif
+
+#if YY_STACK_USED
+static int yy_start_stack_ptr = 0;
+static int yy_start_stack_depth = 0;
+static int *yy_start_stack = 0;
+#ifndef YY_NO_PUSH_STATE
+static void yy_push_state YY_PROTO(( int new_state ));
+#endif
+#ifndef YY_NO_POP_STATE
+static void yy_pop_state YY_PROTO(( void ));
+#endif
+#ifndef YY_NO_TOP_STATE
+static int yy_top_state YY_PROTO(( void ));
+#endif
+
+#else
+#define YY_NO_PUSH_STATE 1
+#define YY_NO_POP_STATE 1
+#define YY_NO_TOP_STATE 1
+#endif
+
+#ifdef YY_MALLOC_DECL
+YY_MALLOC_DECL
+#else
+#if __STDC__
+#ifndef __cplusplus
+#include <stdlib.h>
+#endif
+#else
+/* Just try to get by without declaring the routines. This will fail
+ * miserably on non-ANSI systems for which sizeof(size_t) != sizeof(int)
+ * or sizeof(void*) != sizeof(int).
+ */
+#endif
+#endif
+
+/* Amount of stuff to slurp up with each read. */
+#ifndef YY_READ_BUF_SIZE
+#define YY_READ_BUF_SIZE 8192
+#endif
+
+/* Copy whatever the last rule matched to the standard output. */
+
+#ifndef ECHO
+/* This used to be an fputs(), but since the string might contain NUL's,
+ * we now use fwrite().
+ */
+#define ECHO (void) fwrite( yytext, yyleng, 1, yyout )
+#endif
+
+/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
+ * is returned in "result".
+ */
+#ifndef YY_INPUT
+#define YY_INPUT(buf,result,max_size) \
+ if ( yy_current_buffer->yy_is_interactive ) \
+ { \
+ int c = '*', n; \
+ for ( n = 0; n < max_size && \
+ (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
+ buf[n] = (char) c; \
+ if ( c == '\n' ) \
+ buf[n++] = (char) c; \
+ if ( c == EOF && ferror( yyin ) ) \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ result = n; \
+ } \
+ else if ( ((result = fread( buf, 1, max_size, yyin )) == 0) \
+ && ferror( yyin ) ) \
+ YY_FATAL_ERROR( "input in flex scanner failed" );
+#endif
+
+/* No semi-colon after return; correct usage is to write "yyterminate();" -
+ * we don't want an extra ';' after the "return" because that will cause
+ * some compilers to complain about unreachable statements.
+ */
+#ifndef yyterminate
+#define yyterminate() return YY_NULL
+#endif
+
+/* Number of entries by which start-condition stack grows. */
+#ifndef YY_START_STACK_INCR
+#define YY_START_STACK_INCR 25
+#endif
+
+/* Report a fatal error. */
+#ifndef YY_FATAL_ERROR
+#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )
+#endif
+
+/* Default declaration of generated scanner - a define so the user can
+ * easily add parameters.
+ */
+#ifndef YY_DECL
+#define YY_DECL int yylex YY_PROTO(( void ))
+#endif
+
+/* Code executed at the beginning of each rule, after yytext and yyleng
+ * have been set up.
+ */
+#ifndef YY_USER_ACTION
+#define YY_USER_ACTION
+#endif
+
+/* Code executed at the end of each rule. */
+#ifndef YY_BREAK
+#define YY_BREAK break;
+#endif
+
+#define YY_RULE_SETUP \
+ YY_USER_ACTION
+
+YY_DECL
+ {
+ register yy_state_type yy_current_state;
+ register char *yy_cp, *yy_bp;
+ register int yy_act;
+
+#line 91 "ssl_expr_scan.l"
+
+
+ char caStr[MAX_STR_LEN];
+ char *cpStr = NULL;
+ char caRegex[MAX_STR_LEN];
+ char *cpRegex = NULL;
+ char cRegexDel = NUL;
+
+ /*
+ * Whitespaces
+ */
+#line 698 "lex.ssl_expr_yy.c"
+
+ if ( yy_init )
+ {
+ yy_init = 0;
+
+#ifdef YY_USER_INIT
+ YY_USER_INIT;
+#endif
+
+ if ( ! yy_start )
+ yy_start = 1; /* first start state */
+
+ if ( ! yyin )
+ yyin = stdin;
+
+ if ( ! yyout )
+ yyout = stdout;
+
+ if ( ! yy_current_buffer )
+ yy_current_buffer =
+ yy_create_buffer( yyin, YY_BUF_SIZE );
+
+ yy_load_buffer_state();
+ }
+
+ while ( 1 ) /* loops until end-of-file is reached */
+ {
+ yy_cp = yy_c_buf_p;
+
+ /* Support of yytext. */
+ *yy_cp = yy_hold_char;
+
+ /* yy_bp points to the position in yy_ch_buf of the start of
+ * the current run.
+ */
+ yy_bp = yy_cp;
+
+ yy_current_state = yy_start;
+yy_match:
+ do
+ {
+ register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)];
+ if ( yy_accept[yy_current_state] )
+ {
+ yy_last_accepting_state = yy_current_state;
+ yy_last_accepting_cpos = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 86 )
+ yy_c = yy_meta[(unsigned int) yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
+ ++yy_cp;
+ }
+ while ( yy_current_state != 85 );
+ yy_cp = yy_last_accepting_cpos;
+ yy_current_state = yy_last_accepting_state;
+
+yy_find_action:
+ yy_act = yy_accept[yy_current_state];
+
+ YY_DO_BEFORE_ACTION;
+
+
+do_action: /* This label is used only to access EOF actions. */
+
+
+ switch ( yy_act )
+ { /* beginning of action switch */
+ case 0: /* must back up */
+ /* undo the effects of YY_DO_BEFORE_ACTION */
+ *yy_cp = yy_hold_char;
+ yy_cp = yy_last_accepting_cpos;
+ yy_current_state = yy_last_accepting_state;
+ goto yy_find_action;
+
+case 1:
+YY_RULE_SETUP
+#line 102 "ssl_expr_scan.l"
+{
+ /* NOP */
+}
+ YY_BREAK
+/*
+ * C-style strings ("...")
+ */
+case 2:
+YY_RULE_SETUP
+#line 109 "ssl_expr_scan.l"
+{
+ cpStr = caStr;
+ BEGIN(str);
+}
+ YY_BREAK
+case 3:
+YY_RULE_SETUP
+#line 113 "ssl_expr_scan.l"
+{
+ BEGIN(INITIAL);
+ *cpStr = NUL;
+ yylval.cpVal = apr_pstrdup(ssl_expr_info.pool, caStr);
+ return T_STRING;
+}
+ YY_BREAK
+case 4:
+YY_RULE_SETUP
+#line 119 "ssl_expr_scan.l"
+{
+ yyerror("Unterminated string");
+}
+ YY_BREAK
+case 5:
+YY_RULE_SETUP
+#line 122 "ssl_expr_scan.l"
+{
+ int result;
+
+ (void)sscanf(yytext+1, "%o", &result);
+ if (result > 0xff)
+ yyerror("Escape sequence out of bound");
+ else
+ *cpStr++ = result;
+}
+ YY_BREAK
+case 6:
+YY_RULE_SETUP
+#line 131 "ssl_expr_scan.l"
+{
+ yyerror("Bad escape sequence");
+}
+ YY_BREAK
+case 7:
+YY_RULE_SETUP
+#line 134 "ssl_expr_scan.l"
+{ *cpStr++ = '\n'; }
+ YY_BREAK
+case 8:
+YY_RULE_SETUP
+#line 135 "ssl_expr_scan.l"
+{ *cpStr++ = '\r'; }
+ YY_BREAK
+case 9:
+YY_RULE_SETUP
+#line 136 "ssl_expr_scan.l"
+{ *cpStr++ = '\t'; }
+ YY_BREAK
+case 10:
+YY_RULE_SETUP
+#line 137 "ssl_expr_scan.l"
+{ *cpStr++ = '\b'; }
+ YY_BREAK
+case 11:
+YY_RULE_SETUP
+#line 138 "ssl_expr_scan.l"
+{ *cpStr++ = '\f'; }
+ YY_BREAK
+case 12:
+YY_RULE_SETUP
+#line 139 "ssl_expr_scan.l"
+{
+ *cpStr++ = yytext[1];
+}
+ YY_BREAK
+case 13:
+YY_RULE_SETUP
+#line 142 "ssl_expr_scan.l"
+{
+ char *cp = yytext;
+ while (*cp != NUL)
+ *cpStr++ = *cp++;
+}
+ YY_BREAK
+case 14:
+YY_RULE_SETUP
+#line 147 "ssl_expr_scan.l"
+{
+ *cpStr++ = yytext[1];
+}
+ YY_BREAK
+/*
+ * Regular Expression
+ */
+case 15:
+YY_RULE_SETUP
+#line 154 "ssl_expr_scan.l"
+{
+ cRegexDel = yytext[1];
+ cpRegex = caRegex;
+ BEGIN(regex);
+}
+ YY_BREAK
+case 16:
+YY_RULE_SETUP
+#line 159 "ssl_expr_scan.l"
+{
+ if (yytext[0] == cRegexDel) {
+ *cpRegex = NUL;
+ BEGIN(regex_flags);
+ }
+ else {
+ *cpRegex++ = yytext[0];
+ }
+}
+ YY_BREAK
+case 17:
+YY_RULE_SETUP
+#line 168 "ssl_expr_scan.l"
+{
+ yylval.cpVal = apr_pstrdup(ssl_expr_info.pool, caRegex);
+ BEGIN(INITIAL);
+ return T_REGEX_I;
+}
+ YY_BREAK
+case 18:
+YY_RULE_SETUP
+#line 173 "ssl_expr_scan.l"
+{
+ yylval.cpVal = apr_pstrdup(ssl_expr_info.pool, caRegex);
+ yyless(0);
+ BEGIN(INITIAL);
+ return T_REGEX;
+}
+ YY_BREAK
+case YY_STATE_EOF(regex_flags):
+#line 179 "ssl_expr_scan.l"
+{
+ yylval.cpVal = apr_pstrdup(ssl_expr_info.pool, caRegex);
+ BEGIN(INITIAL);
+ return T_REGEX;
+}
+ YY_BREAK
+/*
+ * Operators
+ */
+case 19:
+YY_RULE_SETUP
+#line 188 "ssl_expr_scan.l"
+{ return T_OP_EQ; }
+ YY_BREAK
+case 20:
+YY_RULE_SETUP
+#line 189 "ssl_expr_scan.l"
+{ return T_OP_EQ; }
+ YY_BREAK
+case 21:
+YY_RULE_SETUP
+#line 190 "ssl_expr_scan.l"
+{ return T_OP_NE; }
+ YY_BREAK
+case 22:
+YY_RULE_SETUP
+#line 191 "ssl_expr_scan.l"
+{ return T_OP_NE; }
+ YY_BREAK
+case 23:
+YY_RULE_SETUP
+#line 192 "ssl_expr_scan.l"
+{ return T_OP_LT; }
+ YY_BREAK
+case 24:
+YY_RULE_SETUP
+#line 193 "ssl_expr_scan.l"
+{ return T_OP_LT; }
+ YY_BREAK
+case 25:
+YY_RULE_SETUP
+#line 194 "ssl_expr_scan.l"
+{ return T_OP_LE; }
+ YY_BREAK
+case 26:
+YY_RULE_SETUP
+#line 195 "ssl_expr_scan.l"
+{ return T_OP_LE; }
+ YY_BREAK
+case 27:
+YY_RULE_SETUP
+#line 196 "ssl_expr_scan.l"
+{ return T_OP_GT; }
+ YY_BREAK
+case 28:
+YY_RULE_SETUP
+#line 197 "ssl_expr_scan.l"
+{ return T_OP_GT; }
+ YY_BREAK
+case 29:
+YY_RULE_SETUP
+#line 198 "ssl_expr_scan.l"
+{ return T_OP_GE; }
+ YY_BREAK
+case 30:
+YY_RULE_SETUP
+#line 199 "ssl_expr_scan.l"
+{ return T_OP_GE; }
+ YY_BREAK
+case 31:
+YY_RULE_SETUP
+#line 200 "ssl_expr_scan.l"
+{ return T_OP_REG; }
+ YY_BREAK
+case 32:
+YY_RULE_SETUP
+#line 201 "ssl_expr_scan.l"
+{ return T_OP_NRE; }
+ YY_BREAK
+case 33:
+YY_RULE_SETUP
+#line 202 "ssl_expr_scan.l"
+{ return T_OP_AND; }
+ YY_BREAK
+case 34:
+YY_RULE_SETUP
+#line 203 "ssl_expr_scan.l"
+{ return T_OP_AND; }
+ YY_BREAK
+case 35:
+YY_RULE_SETUP
+#line 204 "ssl_expr_scan.l"
+{ return T_OP_OR; }
+ YY_BREAK
+case 36:
+YY_RULE_SETUP
+#line 205 "ssl_expr_scan.l"
+{ return T_OP_OR; }
+ YY_BREAK
+case 37:
+YY_RULE_SETUP
+#line 206 "ssl_expr_scan.l"
+{ return T_OP_NOT; }
+ YY_BREAK
+case 38:
+YY_RULE_SETUP
+#line 207 "ssl_expr_scan.l"
+{ return T_OP_NOT; }
+ YY_BREAK
+case 39:
+YY_RULE_SETUP
+#line 208 "ssl_expr_scan.l"
+{ return T_OP_IN; }
+ YY_BREAK
+/*
+ * Functions
+ */
+case 40:
+YY_RULE_SETUP
+#line 213 "ssl_expr_scan.l"
+{ return T_FUNC_FILE; }
+ YY_BREAK
+/*
+ * Specials
+ */
+case 41:
+YY_RULE_SETUP
+#line 218 "ssl_expr_scan.l"
+{ return T_TRUE; }
+ YY_BREAK
+case 42:
+YY_RULE_SETUP
+#line 219 "ssl_expr_scan.l"
+{ return T_FALSE; }
+ YY_BREAK
+/*
+ * Digits
+ */
+case 43:
+YY_RULE_SETUP
+#line 224 "ssl_expr_scan.l"
+{
+ yylval.cpVal = apr_pstrdup(ssl_expr_info.pool, yytext);
+ return T_DIGIT;
+}
+ YY_BREAK
+/*
+ * Identifiers
+ */
+case 44:
+YY_RULE_SETUP
+#line 232 "ssl_expr_scan.l"
+{
+ yylval.cpVal = apr_pstrdup(ssl_expr_info.pool, yytext);
+ return T_ID;
+}
+ YY_BREAK
+/*
+ * Anything else is returned as is...
+ */
+case 45:
+YY_RULE_SETUP
+#line 240 "ssl_expr_scan.l"
+{
+ return yytext[0];
+}
+ YY_BREAK
+case 46:
+YY_RULE_SETUP
+#line 244 "ssl_expr_scan.l"
+YY_FATAL_ERROR( "flex scanner jammed" );
+ YY_BREAK
+#line 1098 "lex.ssl_expr_yy.c"
+case YY_STATE_EOF(INITIAL):
+case YY_STATE_EOF(str):
+case YY_STATE_EOF(regex):
+ yyterminate();
+
+ case YY_END_OF_BUFFER:
+ {
+ /* Amount of text matched not including the EOB char. */
+ int yy_amount_of_matched_text = (int) (yy_cp - yytext_ptr) - 1;
+
+ /* Undo the effects of YY_DO_BEFORE_ACTION. */
+ *yy_cp = yy_hold_char;
+ YY_RESTORE_YY_MORE_OFFSET
+
+ if ( yy_current_buffer->yy_buffer_status == YY_BUFFER_NEW )
+ {
+ /* We're scanning a new file or input source. It's
+ * possible that this happened because the user
+ * just pointed yyin at a new source and called
+ * yylex(). If so, then we have to assure
+ * consistency between yy_current_buffer and our
+ * globals. Here is the right place to do so, because
+ * this is the first action (other than possibly a
+ * back-up) that will match for the new input source.
+ */
+ yy_n_chars = yy_current_buffer->yy_n_chars;
+ yy_current_buffer->yy_input_file = yyin;
+ yy_current_buffer->yy_buffer_status = YY_BUFFER_NORMAL;
+ }
+
+ /* Note that here we test for yy_c_buf_p "<=" to the position
+ * of the first EOB in the buffer, since yy_c_buf_p will
+ * already have been incremented past the NUL character
+ * (since all states make transitions on EOB to the
+ * end-of-buffer state). Contrast this with the test
+ * in input().
+ */
+ if ( yy_c_buf_p <= &yy_current_buffer->yy_ch_buf[yy_n_chars] )
+ { /* This was really a NUL. */
+ yy_state_type yy_next_state;
+
+ yy_c_buf_p = yytext_ptr + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state();
+
+ /* Okay, we're now positioned to make the NUL
+ * transition. We couldn't have
+ * yy_get_previous_state() go ahead and do it
+ * for us because it doesn't know how to deal
+ * with the possibility of jamming (and we don't
+ * want to build jamming into it because then it
+ * will run more slowly).
+ */
+
+ yy_next_state = yy_try_NUL_trans( yy_current_state );
+
+ yy_bp = yytext_ptr + YY_MORE_ADJ;
+
+ if ( yy_next_state )
+ {
+ /* Consume the NUL. */
+ yy_cp = ++yy_c_buf_p;
+ yy_current_state = yy_next_state;
+ goto yy_match;
+ }
+
+ else
+ {
+ yy_cp = yy_last_accepting_cpos;
+ yy_current_state = yy_last_accepting_state;
+ goto yy_find_action;
+ }
+ }
+
+ else switch ( yy_get_next_buffer() )
+ {
+ case EOB_ACT_END_OF_FILE:
+ {
+ yy_did_buffer_switch_on_eof = 0;
+
+ if ( yywrap() )
+ {
+ /* Note: because we've taken care in
+ * yy_get_next_buffer() to have set up
+ * yytext, we can now set up
+ * yy_c_buf_p so that if some total
+ * hoser (like flex itself) wants to
+ * call the scanner after we return the
+ * YY_NULL, it'll still work - another
+ * YY_NULL will get returned.
+ */
+ yy_c_buf_p = yytext_ptr + YY_MORE_ADJ;
+
+ yy_act = YY_STATE_EOF(YY_START);
+ goto do_action;
+ }
+
+ else
+ {
+ if ( ! yy_did_buffer_switch_on_eof )
+ YY_NEW_FILE;
+ }
+ break;
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ yy_c_buf_p =
+ yytext_ptr + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state();
+
+ yy_cp = yy_c_buf_p;
+ yy_bp = yytext_ptr + YY_MORE_ADJ;
+ goto yy_match;
+
+ case EOB_ACT_LAST_MATCH:
+ yy_c_buf_p =
+ &yy_current_buffer->yy_ch_buf[yy_n_chars];
+
+ yy_current_state = yy_get_previous_state();
+
+ yy_cp = yy_c_buf_p;
+ yy_bp = yytext_ptr + YY_MORE_ADJ;
+ goto yy_find_action;
+ }
+ break;
+ }
+
+ default:
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--no action found" );
+ } /* end of action switch */
+ } /* end of scanning one token */
+ } /* end of yylex */
+
+
+/* yy_get_next_buffer - try to read in a new buffer
+ *
+ * Returns a code representing an action:
+ * EOB_ACT_LAST_MATCH -
+ * EOB_ACT_CONTINUE_SCAN - continue scanning from current position
+ * EOB_ACT_END_OF_FILE - end of file
+ */
+
+static int yy_get_next_buffer()
+ {
+ register char *dest = yy_current_buffer->yy_ch_buf;
+ register char *source = yytext_ptr;
+ register int number_to_move, i;
+ int ret_val;
+
+ if ( yy_c_buf_p > &yy_current_buffer->yy_ch_buf[yy_n_chars + 1] )
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--end of buffer missed" );
+
+ if ( yy_current_buffer->yy_fill_buffer == 0 )
+ { /* Don't try to fill the buffer, so this is an EOF. */
+ if ( yy_c_buf_p - yytext_ptr - YY_MORE_ADJ == 1 )
+ {
+ /* We matched a single character, the EOB, so
+ * treat this as a final EOF.
+ */
+ return EOB_ACT_END_OF_FILE;
+ }
+
+ else
+ {
+ /* We matched some text prior to the EOB, first
+ * process it.
+ */
+ return EOB_ACT_LAST_MATCH;
+ }
+ }
+
+ /* Try to read more data. */
+
+ /* First move last chars to start of buffer. */
+ number_to_move = (int) (yy_c_buf_p - yytext_ptr) - 1;
+
+ for ( i = 0; i < number_to_move; ++i )
+ *(dest++) = *(source++);
+
+ if ( yy_current_buffer->yy_buffer_status == YY_BUFFER_EOF_PENDING )
+ /* don't do the read, it's not guaranteed to return an EOF,
+ * just force an EOF
+ */
+ yy_current_buffer->yy_n_chars = yy_n_chars = 0;
+
+ else
+ {
+ int num_to_read =
+ yy_current_buffer->yy_buf_size - number_to_move - 1;
+
+ while ( num_to_read <= 0 )
+ { /* Not enough room in the buffer - grow it. */
+#ifdef YY_USES_REJECT
+ YY_FATAL_ERROR(
+"input buffer overflow, can't enlarge buffer because scanner uses REJECT" );
+#else
+
+ /* just a shorter name for the current buffer */
+ YY_BUFFER_STATE b = yy_current_buffer;
+
+ int yy_c_buf_p_offset =
+ (int) (yy_c_buf_p - b->yy_ch_buf);
+
+ if ( b->yy_is_our_buffer )
+ {
+ int new_size = b->yy_buf_size * 2;
+
+ if ( new_size <= 0 )
+ b->yy_buf_size += b->yy_buf_size / 8;
+ else
+ b->yy_buf_size *= 2;
+
+ b->yy_ch_buf = (char *)
+ /* Include room in for 2 EOB chars. */
+ yy_flex_realloc( (void *) b->yy_ch_buf,
+ b->yy_buf_size + 2 );
+ }
+ else
+ /* Can't grow it, we don't own it. */
+ b->yy_ch_buf = 0;
+
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR(
+ "fatal error - scanner input buffer overflow" );
+
+ yy_c_buf_p = &b->yy_ch_buf[yy_c_buf_p_offset];
+
+ num_to_read = yy_current_buffer->yy_buf_size -
+ number_to_move - 1;
+#endif
+ }
+
+ if ( num_to_read > YY_READ_BUF_SIZE )
+ num_to_read = YY_READ_BUF_SIZE;
+
+ /* Read in more data. */
+ YY_INPUT( (&yy_current_buffer->yy_ch_buf[number_to_move]),
+ yy_n_chars, num_to_read );
+
+ yy_current_buffer->yy_n_chars = yy_n_chars;
+ }
+
+ if ( yy_n_chars == 0 )
+ {
+ if ( number_to_move == YY_MORE_ADJ )
+ {
+ ret_val = EOB_ACT_END_OF_FILE;
+ yyrestart( yyin );
+ }
+
+ else
+ {
+ ret_val = EOB_ACT_LAST_MATCH;
+ yy_current_buffer->yy_buffer_status =
+ YY_BUFFER_EOF_PENDING;
+ }
+ }
+
+ else
+ ret_val = EOB_ACT_CONTINUE_SCAN;
+
+ yy_n_chars += number_to_move;
+ yy_current_buffer->yy_ch_buf[yy_n_chars] = YY_END_OF_BUFFER_CHAR;
+ yy_current_buffer->yy_ch_buf[yy_n_chars + 1] = YY_END_OF_BUFFER_CHAR;
+
+ yytext_ptr = &yy_current_buffer->yy_ch_buf[0];
+
+ return ret_val;
+ }
+
+
+/* yy_get_previous_state - get the state just before the EOB char was reached */
+
+static yy_state_type yy_get_previous_state()
+ {
+ register yy_state_type yy_current_state;
+ register char *yy_cp;
+
+ yy_current_state = yy_start;
+
+ for ( yy_cp = yytext_ptr + YY_MORE_ADJ; yy_cp < yy_c_buf_p; ++yy_cp )
+ {
+ register YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1);
+ if ( yy_accept[yy_current_state] )
+ {
+ yy_last_accepting_state = yy_current_state;
+ yy_last_accepting_cpos = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 86 )
+ yy_c = yy_meta[(unsigned int) yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
+ }
+
+ return yy_current_state;
+ }
+
+
+/* yy_try_NUL_trans - try to make a transition on the NUL character
+ *
+ * synopsis
+ * next_state = yy_try_NUL_trans( current_state );
+ */
+
+#ifdef YY_USE_PROTOS
+static yy_state_type yy_try_NUL_trans( yy_state_type yy_current_state )
+#else
+static yy_state_type yy_try_NUL_trans( yy_current_state )
+yy_state_type yy_current_state;
+#endif
+ {
+ register int yy_is_jam;
+ register char *yy_cp = yy_c_buf_p;
+
+ register YY_CHAR yy_c = 1;
+ if ( yy_accept[yy_current_state] )
+ {
+ yy_last_accepting_state = yy_current_state;
+ yy_last_accepting_cpos = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 86 )
+ yy_c = yy_meta[(unsigned int) yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
+ yy_is_jam = (yy_current_state == 85);
+
+ return yy_is_jam ? 0 : yy_current_state;
+ }
+
+
+#ifndef YY_NO_UNPUT
+#ifdef YY_USE_PROTOS
+static void yyunput( int c, register char *yy_bp )
+#else
+static void yyunput( c, yy_bp )
+int c;
+register char *yy_bp;
+#endif
+ {
+ register char *yy_cp = yy_c_buf_p;
+
+ /* undo effects of setting up yytext */
+ *yy_cp = yy_hold_char;
+
+ if ( yy_cp < yy_current_buffer->yy_ch_buf + 2 )
+ { /* need to shift things up to make room */
+ /* +2 for EOB chars. */
+ register int number_to_move = yy_n_chars + 2;
+ register char *dest = &yy_current_buffer->yy_ch_buf[
+ yy_current_buffer->yy_buf_size + 2];
+ register char *source =
+ &yy_current_buffer->yy_ch_buf[number_to_move];
+
+ while ( source > yy_current_buffer->yy_ch_buf )
+ *--dest = *--source;
+
+ yy_cp += (int) (dest - source);
+ yy_bp += (int) (dest - source);
+ yy_current_buffer->yy_n_chars =
+ yy_n_chars = yy_current_buffer->yy_buf_size;
+
+ if ( yy_cp < yy_current_buffer->yy_ch_buf + 2 )
+ YY_FATAL_ERROR( "flex scanner push-back overflow" );
+ }
+
+ *--yy_cp = (char) c;
+
+
+ yytext_ptr = yy_bp;
+ yy_hold_char = *yy_cp;
+ yy_c_buf_p = yy_cp;
+ }
+#endif /* ifndef YY_NO_UNPUT */
+
+
+#ifdef __cplusplus
+static int yyinput()
+#else
+static int input()
+#endif
+ {
+ int c;
+
+ *yy_c_buf_p = yy_hold_char;
+
+ if ( *yy_c_buf_p == YY_END_OF_BUFFER_CHAR )
+ {
+ /* yy_c_buf_p now points to the character we want to return.
+ * If this occurs *before* the EOB characters, then it's a
+ * valid NUL; if not, then we've hit the end of the buffer.
+ */
+ if ( yy_c_buf_p < &yy_current_buffer->yy_ch_buf[yy_n_chars] )
+ /* This was really a NUL. */
+ *yy_c_buf_p = '\0';
+
+ else
+ { /* need more input */
+ int offset = yy_c_buf_p - yytext_ptr;
+ ++yy_c_buf_p;
+
+ switch ( yy_get_next_buffer() )
+ {
+ case EOB_ACT_LAST_MATCH:
+ /* This happens because yy_g_n_b()
+ * sees that we've accumulated a
+ * token and flags that we need to
+ * try matching the token before
+ * proceeding. But for input(),
+ * there's no matching to consider.
+ * So convert the EOB_ACT_LAST_MATCH
+ * to EOB_ACT_END_OF_FILE.
+ */
+
+ /* Reset buffer status. */
+ yyrestart( yyin );
+
+ /* fall through */
+
+ case EOB_ACT_END_OF_FILE:
+ {
+ if ( yywrap() )
+ return EOF;
+
+ if ( ! yy_did_buffer_switch_on_eof )
+ YY_NEW_FILE;
+#ifdef __cplusplus
+ return yyinput();
+#else
+ return input();
+#endif
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ yy_c_buf_p = yytext_ptr + offset;
+ break;
+ }
+ }
+ }
+
+ c = *(unsigned char *) yy_c_buf_p; /* cast for 8-bit char's */
+ *yy_c_buf_p = '\0'; /* preserve yytext */
+ yy_hold_char = *++yy_c_buf_p;
+
+
+ return c;
+ }
+
+
+#ifdef YY_USE_PROTOS
+void yyrestart( FILE *input_file )
+#else
+void yyrestart( input_file )
+FILE *input_file;
+#endif
+ {
+ if ( ! yy_current_buffer )
+ yy_current_buffer = yy_create_buffer( yyin, YY_BUF_SIZE );
+
+ yy_init_buffer( yy_current_buffer, input_file );
+ yy_load_buffer_state();
+ }
+
+
+#ifdef YY_USE_PROTOS
+void yy_switch_to_buffer( YY_BUFFER_STATE new_buffer )
+#else
+void yy_switch_to_buffer( new_buffer )
+YY_BUFFER_STATE new_buffer;
+#endif
+ {
+ if ( yy_current_buffer == new_buffer )
+ return;
+
+ if ( yy_current_buffer )
+ {
+ /* Flush out information for old buffer. */
+ *yy_c_buf_p = yy_hold_char;
+ yy_current_buffer->yy_buf_pos = yy_c_buf_p;
+ yy_current_buffer->yy_n_chars = yy_n_chars;
+ }
+
+ yy_current_buffer = new_buffer;
+ yy_load_buffer_state();
+
+ /* We don't actually know whether we did this switch during
+ * EOF (yywrap()) processing, but the only time this flag
+ * is looked at is after yywrap() is called, so it's safe
+ * to go ahead and always set it.
+ */
+ yy_did_buffer_switch_on_eof = 1;
+ }
+
+
+#ifdef YY_USE_PROTOS
+void yy_load_buffer_state( void )
+#else
+void yy_load_buffer_state()
+#endif
+ {
+ yy_n_chars = yy_current_buffer->yy_n_chars;
+ yytext_ptr = yy_c_buf_p = yy_current_buffer->yy_buf_pos;
+ yyin = yy_current_buffer->yy_input_file;
+ yy_hold_char = *yy_c_buf_p;
+ }
+
+
+#ifdef YY_USE_PROTOS
+YY_BUFFER_STATE yy_create_buffer( FILE *file, int size )
+#else
+YY_BUFFER_STATE yy_create_buffer( file, size )
+FILE *file;
+int size;
+#endif
+ {
+ YY_BUFFER_STATE b;
+
+ b = (YY_BUFFER_STATE) yy_flex_alloc( sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
+
+ b->yy_buf_size = size;
+
+ /* yy_ch_buf has to be 2 characters longer than the size given because
+ * we need to put in 2 end-of-buffer characters.
+ */
+ b->yy_ch_buf = (char *) yy_flex_alloc( b->yy_buf_size + 2 );
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
+
+ b->yy_is_our_buffer = 1;
+
+ yy_init_buffer( b, file );
+
+ return b;
+ }
+
+
+#ifdef YY_USE_PROTOS
+void yy_delete_buffer( YY_BUFFER_STATE b )
+#else
+void yy_delete_buffer( b )
+YY_BUFFER_STATE b;
+#endif
+ {
+ if ( ! b )
+ return;
+
+ if ( b == yy_current_buffer )
+ yy_current_buffer = (YY_BUFFER_STATE) 0;
+
+ if ( b->yy_is_our_buffer )
+ yy_flex_free( (void *) b->yy_ch_buf );
+
+ yy_flex_free( (void *) b );
+ }
+
+
+#ifndef YY_ALWAYS_INTERACTIVE
+#ifndef YY_NEVER_INTERACTIVE
+extern int isatty YY_PROTO(( int ));
+#endif
+#endif
+
+#ifdef YY_USE_PROTOS
+void yy_init_buffer( YY_BUFFER_STATE b, FILE *file )
+#else
+void yy_init_buffer( b, file )
+YY_BUFFER_STATE b;
+FILE *file;
+#endif
+
+
+ {
+ yy_flush_buffer( b );
+
+ b->yy_input_file = file;
+ b->yy_fill_buffer = 1;
+
+#if YY_ALWAYS_INTERACTIVE
+ b->yy_is_interactive = 1;
+#else
+#if YY_NEVER_INTERACTIVE
+ b->yy_is_interactive = 0;
+#else
+ b->yy_is_interactive = file ? (isatty( fileno(file) ) > 0) : 0;
+#endif
+#endif
+ }
+
+
+#ifdef YY_USE_PROTOS
+void yy_flush_buffer( YY_BUFFER_STATE b )
+#else
+void yy_flush_buffer( b )
+YY_BUFFER_STATE b;
+#endif
+
+ {
+ if ( ! b )
+ return;
+
+ b->yy_n_chars = 0;
+
+ /* We always need two end-of-buffer characters. The first causes
+ * a transition to the end-of-buffer state. The second causes
+ * a jam in that state.
+ */
+ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
+ b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
+
+ b->yy_buf_pos = &b->yy_ch_buf[0];
+
+ b->yy_at_bol = 1;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ if ( b == yy_current_buffer )
+ yy_load_buffer_state();
+ }
+
+
+#ifndef YY_NO_SCAN_BUFFER
+#ifdef YY_USE_PROTOS
+YY_BUFFER_STATE yy_scan_buffer( char *base, yy_size_t size )
+#else
+YY_BUFFER_STATE yy_scan_buffer( base, size )
+char *base;
+yy_size_t size;
+#endif
+ {
+ YY_BUFFER_STATE b;
+
+ if ( size < 2 ||
+ base[size-2] != YY_END_OF_BUFFER_CHAR ||
+ base[size-1] != YY_END_OF_BUFFER_CHAR )
+ /* They forgot to leave room for the EOB's. */
+ return 0;
+
+ b = (YY_BUFFER_STATE) yy_flex_alloc( sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" );
+
+ b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */
+ b->yy_buf_pos = b->yy_ch_buf = base;
+ b->yy_is_our_buffer = 0;
+ b->yy_input_file = 0;
+ b->yy_n_chars = b->yy_buf_size;
+ b->yy_is_interactive = 0;
+ b->yy_at_bol = 1;
+ b->yy_fill_buffer = 0;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ yy_switch_to_buffer( b );
+
+ return b;
+ }
+#endif
+
+
+#ifndef YY_NO_SCAN_STRING
+#ifdef YY_USE_PROTOS
+YY_BUFFER_STATE yy_scan_string( yyconst char *yy_str )
+#else
+YY_BUFFER_STATE yy_scan_string( yy_str )
+yyconst char *yy_str;
+#endif
+ {
+ int len;
+ for ( len = 0; yy_str[len]; ++len )
+ ;
+
+ return yy_scan_bytes( yy_str, len );
+ }
+#endif
+
+
+#ifndef YY_NO_SCAN_BYTES
+#ifdef YY_USE_PROTOS
+YY_BUFFER_STATE yy_scan_bytes( yyconst char *bytes, int len )
+#else
+YY_BUFFER_STATE yy_scan_bytes( bytes, len )
+yyconst char *bytes;
+int len;
+#endif
+ {
+ YY_BUFFER_STATE b;
+ char *buf;
+ yy_size_t n;
+ int i;
+
+ /* Get memory for full buffer, including space for trailing EOB's. */
+ n = len + 2;
+ buf = (char *) yy_flex_alloc( n );
+ if ( ! buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" );
+
+ for ( i = 0; i < len; ++i )
+ buf[i] = bytes[i];
+
+ buf[len] = buf[len+1] = YY_END_OF_BUFFER_CHAR;
+
+ b = yy_scan_buffer( buf, n );
+ if ( ! b )
+ YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" );
+
+ /* It's okay to grow etc. this buffer, and we should throw it
+ * away when we're done.
+ */
+ b->yy_is_our_buffer = 1;
+
+ return b;
+ }
+#endif
+
+
+#ifndef YY_NO_PUSH_STATE
+#ifdef YY_USE_PROTOS
+static void yy_push_state( int new_state )
+#else
+static void yy_push_state( new_state )
+int new_state;
+#endif
+ {
+ if ( yy_start_stack_ptr >= yy_start_stack_depth )
+ {
+ yy_size_t new_size;
+
+ yy_start_stack_depth += YY_START_STACK_INCR;
+ new_size = yy_start_stack_depth * sizeof( int );
+
+ if ( ! yy_start_stack )
+ yy_start_stack = (int *) yy_flex_alloc( new_size );
+
+ else
+ yy_start_stack = (int *) yy_flex_realloc(
+ (void *) yy_start_stack, new_size );
+
+ if ( ! yy_start_stack )
+ YY_FATAL_ERROR(
+ "out of memory expanding start-condition stack" );
+ }
+
+ yy_start_stack[yy_start_stack_ptr++] = YY_START;
+
+ BEGIN(new_state);
+ }
+#endif
+
+
+#ifndef YY_NO_POP_STATE
+static void yy_pop_state()
+ {
+ if ( --yy_start_stack_ptr < 0 )
+ YY_FATAL_ERROR( "start-condition stack underflow" );
+
+ BEGIN(yy_start_stack[yy_start_stack_ptr]);
+ }
+#endif
+
+
+#ifndef YY_NO_TOP_STATE
+static int yy_top_state()
+ {
+ return yy_start_stack[yy_start_stack_ptr - 1];
+ }
+#endif
+
+#ifndef YY_EXIT_FAILURE
+#define YY_EXIT_FAILURE 2
+#endif
+
+#ifdef YY_USE_PROTOS
+static void yy_fatal_error( yyconst char msg[] )
+#else
+static void yy_fatal_error( msg )
+char msg[];
+#endif
+ {
+ (void) fprintf( stderr, "%s\n", msg );
+ exit( YY_EXIT_FAILURE );
+ }
+
+
+
+/* Redefine yyless() so it works in section 3 code. */
+
+#undef yyless
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ yytext[yyleng] = yy_hold_char; \
+ yy_c_buf_p = yytext + n; \
+ yy_hold_char = *yy_c_buf_p; \
+ *yy_c_buf_p = '\0'; \
+ yyleng = n; \
+ } \
+ while ( 0 )
+
+
+/* Internal utility routines. */
+
+#ifndef yytext_ptr
+#ifdef YY_USE_PROTOS
+static void yy_flex_strncpy( char *s1, yyconst char *s2, int n )
+#else
+static void yy_flex_strncpy( s1, s2, n )
+char *s1;
+yyconst char *s2;
+int n;
+#endif
+ {
+ register int i;
+ for ( i = 0; i < n; ++i )
+ s1[i] = s2[i];
+ }
+#endif
+
+#ifdef YY_NEED_STRLEN
+#ifdef YY_USE_PROTOS
+static int yy_flex_strlen( yyconst char *s )
+#else
+static int yy_flex_strlen( s )
+yyconst char *s;
+#endif
+ {
+ register int n;
+ for ( n = 0; s[n]; ++n )
+ ;
+
+ return n;
+ }
+#endif
+
+
+#ifdef YY_USE_PROTOS
+static void *yy_flex_alloc( yy_size_t size )
+#else
+static void *yy_flex_alloc( size )
+yy_size_t size;
+#endif
+ {
+ return (void *) malloc( size );
+ }
+
+#ifdef YY_USE_PROTOS
+static void *yy_flex_realloc( void *ptr, yy_size_t size )
+#else
+static void *yy_flex_realloc( ptr, size )
+void *ptr;
+yy_size_t size;
+#endif
+ {
+ /* The cast to (char *) in the following accommodates both
+ * implementations that use char* generic pointers, and those
+ * that use void* generic pointers. It works with the latter
+ * because both ANSI C and C++ allow castless assignment from
+ * any pointer type to void*, and deal with argument conversions
+ * as though doing an assignment.
+ */
+ return (void *) realloc( (char *) ptr, size );
+ }
+
+#ifdef YY_USE_PROTOS
+static void yy_flex_free( void *ptr )
+#else
+static void yy_flex_free( ptr )
+void *ptr;
+#endif
+ {
+ free( ptr );
+ }
+
+#if YY_MAIN
+int main()
+ {
+ yylex();
+ return 0;
+ }
+#endif
+#line 244 "ssl_expr_scan.l"
+
+
+int yyinput(char *buf, int max_size)
+{
+ int n;
+
+ if ((n = MIN(max_size, ssl_expr_info.inputbuf
+ + ssl_expr_info.inputlen
+ - ssl_expr_info.inputptr)) <= 0)
+ return YY_NULL;
+ memcpy(buf, ssl_expr_info.inputptr, n);
+ ssl_expr_info.inputptr += n;
+ return n;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_scan.l b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_scan.l
new file mode 100644
index 00000000..86ba3b49
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_expr_scan.l
@@ -0,0 +1,225 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| |
+ * | '_ ` _ \ / _ \ / _` | / __/ __| |
+ * | | | | | | (_) | (_| | \__ \__ \ | mod_ssl - Apache Interface to OpenSSL
+ * |_| |_| |_|\___/ \__,_|___|___/___/_| http://www.modssl.org/
+ * |_____|
+ * ssl_expr_scan.l
+ * Expression Scanner
+ */
+ /* ``Killing for peace is
+ like fucking for virginity.''
+ -- Unknown */
+
+/* _________________________________________________________________
+**
+** Expression Scanner
+** _________________________________________________________________
+*/
+
+%{
+#include "mod_ssl.h"
+
+#include "ssl_expr_parse.h"
+
+#define YY_NO_UNPUT 1
+int yyinput(char *buf, int max_size);
+
+#undef YY_INPUT
+#define YY_INPUT(buf,result,max_size) \
+ (result = yyinput(buf, max_size))
+
+#define MAX_STR_LEN 2048
+%}
+
+%pointer
+/* %option stack */
+%option never-interactive
+%option noyywrap
+%x str
+%x regex regex_flags
+
+%%
+
+ char caStr[MAX_STR_LEN];
+ char *cpStr = NULL;
+ char caRegex[MAX_STR_LEN];
+ char *cpRegex = NULL;
+ char cRegexDel = NUL;
+
+ /*
+ * Whitespaces
+ */
+[ \t\n]+ {
+ /* NOP */
+}
+
+ /*
+ * C-style strings ("...")
+ */
+\" {
+ cpStr = caStr;
+ BEGIN(str);
+}
+<str>\" {
+ BEGIN(INITIAL);
+ *cpStr = NUL;
+ yylval.cpVal = apr_pstrdup(ssl_expr_info.pool, caStr);
+ return T_STRING;
+}
+<str>\n {
+ yyerror("Unterminated string");
+}
+<str>\\[0-7]{1,3} {
+ int result;
+
+ (void)sscanf(yytext+1, "%o", &result);
+ if (result > 0xff)
+ yyerror("Escape sequence out of bound");
+ else
+ *cpStr++ = result;
+}
+<str>\\[0-9]+ {
+ yyerror("Bad escape sequence");
+}
+<str>\\n { *cpStr++ = '\n'; }
+<str>\\r { *cpStr++ = '\r'; }
+<str>\\t { *cpStr++ = '\t'; }
+<str>\\b { *cpStr++ = '\b'; }
+<str>\\f { *cpStr++ = '\f'; }
+<str>\\(.|\n) {
+ *cpStr++ = yytext[1];
+}
+<str>[^\\\n\"]+ {
+ char *cp = yytext;
+ while (*cp != NUL)
+ *cpStr++ = *cp++;
+}
+<str>. {
+ *cpStr++ = yytext[1];
+}
+
+ /*
+ * Regular Expression
+ */
+"m". {
+ cRegexDel = yytext[1];
+ cpRegex = caRegex;
+ BEGIN(regex);
+}
+<regex>.|\n {
+ if (yytext[0] == cRegexDel) {
+ *cpRegex = NUL;
+ BEGIN(regex_flags);
+ }
+ else {
+ *cpRegex++ = yytext[0];
+ }
+}
+<regex_flags>i {
+ yylval.cpVal = apr_pstrdup(ssl_expr_info.pool, caRegex);
+ BEGIN(INITIAL);
+ return T_REGEX_I;
+}
+<regex_flags>.|\n {
+ yylval.cpVal = apr_pstrdup(ssl_expr_info.pool, caRegex);
+ yyless(0);
+ BEGIN(INITIAL);
+ return T_REGEX;
+}
+<regex_flags><<EOF>> {
+ yylval.cpVal = apr_pstrdup(ssl_expr_info.pool, caRegex);
+ BEGIN(INITIAL);
+ return T_REGEX;
+}
+
+ /*
+ * Operators
+ */
+"eq" { return T_OP_EQ; }
+"==" { return T_OP_EQ; }
+"ne" { return T_OP_NE; }
+"!=" { return T_OP_NE; }
+"lt" { return T_OP_LT; }
+"<" { return T_OP_LT; }
+"le" { return T_OP_LE; }
+"<=" { return T_OP_LE; }
+"gt" { return T_OP_GT; }
+">" { return T_OP_GT; }
+"ge" { return T_OP_GE; }
+">=" { return T_OP_GE; }
+"=~" { return T_OP_REG; }
+"!~" { return T_OP_NRE; }
+"and" { return T_OP_AND; }
+"&&" { return T_OP_AND; }
+"or" { return T_OP_OR; }
+"||" { return T_OP_OR; }
+"not" { return T_OP_NOT; }
+"!" { return T_OP_NOT; }
+"in" { return T_OP_IN; }
+
+ /*
+ * Functions
+ */
+"file" { return T_FUNC_FILE; }
+
+ /*
+ * Specials
+ */
+"true" { return T_TRUE; }
+"false" { return T_FALSE; }
+
+ /*
+ * Digits
+ */
+[0-9]+ {
+ yylval.cpVal = apr_pstrdup(ssl_expr_info.pool, yytext);
+ return T_DIGIT;
+}
+
+ /*
+ * Identifiers
+ */
+[a-zA-Z][a-zA-Z0-9_:-]* {
+ yylval.cpVal = apr_pstrdup(ssl_expr_info.pool, yytext);
+ return T_ID;
+}
+
+ /*
+ * Anything else is returned as is...
+ */
+.|\n {
+ return yytext[0];
+}
+
+%%
+
+int yyinput(char *buf, int max_size)
+{
+ int n;
+
+ if ((n = MIN(max_size, ssl_expr_info.inputbuf
+ + ssl_expr_info.inputlen
+ - ssl_expr_info.inputptr)) <= 0)
+ return YY_NULL;
+ memcpy(buf, ssl_expr_info.inputptr, n);
+ ssl_expr_info.inputptr += n;
+ return n;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache.c
new file mode 100644
index 00000000..d74b853b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache.c
@@ -0,0 +1,199 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_scache.c
+ * Session Cache Abstraction
+ */
+ /* ``Open-Source Software: generous
+ programmers from around the world all
+ join forces to help you shoot
+ yourself in the foot for free.''
+ -- Unknown */
+#include "mod_ssl.h"
+
+/* _________________________________________________________________
+**
+** Session Cache: Common Abstraction Layer
+** _________________________________________________________________
+*/
+
+void ssl_scache_init(server_rec *s, apr_pool_t *p)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+
+ /*
+ * Warn the user that he should use the session cache.
+ * But we can operate without it, of course.
+ */
+ if (mc->nSessionCacheMode == SSL_SCMODE_UNSET) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ "Init: Session Cache is not configured "
+ "[hint: SSLSessionCache]");
+ mc->nSessionCacheMode = SSL_SCMODE_NONE;
+ return;
+ }
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ ssl_scache_dbm_init(s, p);
+ else if ((mc->nSessionCacheMode == SSL_SCMODE_SHMHT) ||
+ (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)) {
+ void *data;
+ const char *userdata_key = "ssl_scache_init";
+
+ apr_pool_userdata_get(&data, userdata_key, s->process->pool);
+ if (!data) {
+ apr_pool_userdata_set((const void *)1, userdata_key,
+ apr_pool_cleanup_null, s->process->pool);
+ return;
+ }
+ if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ ssl_scache_shmht_init(s, p);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ ssl_scache_shmcb_init(s, p);
+ }
+}
+
+void ssl_scache_kill(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ ssl_scache_dbm_kill(s);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ ssl_scache_shmht_kill(s);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ ssl_scache_shmcb_kill(s);
+ return;
+}
+
+BOOL ssl_scache_store(server_rec *s, UCHAR *id, int idlen, time_t expiry, SSL_SESSION *sess)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ BOOL rv = FALSE;
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ rv = ssl_scache_dbm_store(s, id, idlen, expiry, sess);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ rv = ssl_scache_shmht_store(s, id, idlen, expiry, sess);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ rv = ssl_scache_shmcb_store(s, id, idlen, expiry, sess);
+ return rv;
+}
+
+SSL_SESSION *ssl_scache_retrieve(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ SSL_SESSION *sess = NULL;
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ sess = ssl_scache_dbm_retrieve(s, id, idlen);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ sess = ssl_scache_shmht_retrieve(s, id, idlen);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ sess = ssl_scache_shmcb_retrieve(s, id, idlen);
+ return sess;
+}
+
+void ssl_scache_remove(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ ssl_scache_dbm_remove(s, id, idlen);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ ssl_scache_shmht_remove(s, id, idlen);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ ssl_scache_shmcb_remove(s, id, idlen);
+ return;
+}
+
+void ssl_scache_status(server_rec *s, apr_pool_t *p, void (*func)(char *, void *), void *arg)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ ssl_scache_dbm_status(s, p, func, arg);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ ssl_scache_shmht_status(s, p, func, arg);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ ssl_scache_shmcb_status(s, p, func, arg);
+ return;
+}
+
+void ssl_scache_expire(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ ssl_scache_dbm_expire(s);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ ssl_scache_shmht_expire(s);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ ssl_scache_shmcb_expire(s);
+ return;
+}
+
+/* _________________________________________________________________
+**
+** SSL Extension to mod_status
+** _________________________________________________________________
+*/
+#if 0 /* NOT YET */
+static void ssl_ext_ms_display(request_rec *, int, int);
+
+void ssl_scache_status_register(apr_pool_t *p)
+{
+ /* XXX point mod_status to this update, when it grows the opt fn */
+#if 0
+ ap_hook_register("ap::mod_status::display", ssl_ext_ms_display, AP_HOOK_NOCTX);
+#endif
+ return;
+}
+
+static void ssl_ext_ms_display_cb(char *str, void *_r)
+{
+ request_rec *r = (request_rec *)_r;
+ if (str != NULL)
+ ap_rputs(str, r);
+ return;
+}
+
+static void ssl_ext_ms_display(request_rec *r, int no_table_report, int short_report)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(r->server);
+
+ if (sc == NULL)
+ return;
+ if (short_report)
+ return;
+ ap_rputs("<hr>\n", r);
+ ap_rputs("<table cellspacing=0 cellpadding=0>\n", r);
+ ap_rputs("<tr><td bgcolor=\"#000000\">\n", r);
+ ap_rputs("<b><font color=\"#ffffff\" face=\"Arial,Helvetica\">SSL/TLS Session Cache Status:</font></b>\r", r);
+ ap_rputs("</td></tr>\n", r);
+ ap_rputs("<tr><td bgcolor=\"#ffffff\">\n", r);
+ ssl_scache_status(r->server, r->pool, ssl_ext_ms_display_cb, r);
+ ap_rputs("</td></tr>\n", r);
+ ap_rputs("</table>\n", r);
+ return;
+}
+#endif
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache_dbm.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache_dbm.c
new file mode 100644
index 00000000..10ab9c89
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache_dbm.c
@@ -0,0 +1,462 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_scache_dbm.c
+ * Session Cache via DBM
+ */
+
+#include "mod_ssl.h"
+
+void ssl_scache_dbm_init(server_rec *s, apr_pool_t *p)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ apr_dbm_t *dbm;
+ apr_status_t rv;
+
+ /* for the DBM we need the data file */
+ if (mc->szSessionCacheDataFile == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "SSLSessionCache required");
+ ssl_die();
+ }
+
+ /* open it once to create it and to make sure it _can_ be created */
+ ssl_mutex_on(s);
+ if ((rv = apr_dbm_open(&dbm, mc->szSessionCacheDataFile,
+ APR_DBM_RWCREATE, SSL_DBM_FILE_MODE, mc->pPool)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Cannot create SSLSessionCache DBM file `%s'",
+ mc->szSessionCacheDataFile);
+ ssl_mutex_off(s);
+ return;
+ }
+ apr_dbm_close(dbm);
+
+#if !defined(OS2) && !defined(WIN32) && !defined(BEOS) && !defined(NETWARE)
+ /*
+ * We have to make sure the Apache child processes have access to
+ * the DBM file. But because there are brain-dead platforms where we
+ * cannot exactly determine the suffixes we try all possibilities.
+ */
+ if (geteuid() == 0 /* is superuser */) {
+ chown(mc->szSessionCacheDataFile, unixd_config.user_id, -1 /* no gid change */);
+ if (chown(apr_pstrcat(p, mc->szSessionCacheDataFile, SSL_DBM_FILE_SUFFIX_DIR, NULL),
+ unixd_config.user_id, -1) == -1) {
+ if (chown(apr_pstrcat(p, mc->szSessionCacheDataFile, ".db", NULL),
+ unixd_config.user_id, -1) == -1)
+ chown(apr_pstrcat(p, mc->szSessionCacheDataFile, ".dir", NULL),
+ unixd_config.user_id, -1);
+ }
+ if (chown(apr_pstrcat(p, mc->szSessionCacheDataFile, SSL_DBM_FILE_SUFFIX_PAG, NULL),
+ unixd_config.user_id, -1) == -1) {
+ if (chown(apr_pstrcat(p, mc->szSessionCacheDataFile, ".db", NULL),
+ unixd_config.user_id, -1) == -1)
+ chown(apr_pstrcat(p, mc->szSessionCacheDataFile, ".pag", NULL),
+ unixd_config.user_id, -1);
+ }
+ }
+#endif
+ ssl_mutex_off(s);
+ ssl_scache_dbm_expire(s);
+ return;
+}
+
+void ssl_scache_dbm_kill(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ apr_pool_t *p;
+
+ apr_pool_sub_make(&p, mc->pPool, NULL);
+ if (p != NULL) {
+ /* the correct way */
+ unlink(apr_pstrcat(p, mc->szSessionCacheDataFile, SSL_DBM_FILE_SUFFIX_DIR, NULL));
+ unlink(apr_pstrcat(p, mc->szSessionCacheDataFile, SSL_DBM_FILE_SUFFIX_PAG, NULL));
+ /* the additional ways to be sure */
+ unlink(apr_pstrcat(p, mc->szSessionCacheDataFile, ".dir", NULL));
+ unlink(apr_pstrcat(p, mc->szSessionCacheDataFile, ".pag", NULL));
+ unlink(apr_pstrcat(p, mc->szSessionCacheDataFile, ".db", NULL));
+ unlink(mc->szSessionCacheDataFile);
+ apr_pool_destroy(p);
+ }
+ return;
+}
+
+BOOL ssl_scache_dbm_store(server_rec *s, UCHAR *id, int idlen, time_t expiry, SSL_SESSION *sess)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ apr_dbm_t *dbm;
+ apr_datum_t dbmkey;
+ apr_datum_t dbmval;
+ UCHAR ucaData[SSL_SESSION_MAX_DER];
+ int nData;
+ UCHAR *ucp;
+ apr_status_t rv;
+
+ /* streamline session data */
+ if ((nData = i2d_SSL_SESSION(sess, NULL)) > sizeof(ucaData)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "streamline session data size too large: %d > %d",
+ nData, sizeof(ucaData));
+ return FALSE;
+ }
+ ucp = ucaData;
+ i2d_SSL_SESSION(sess, &ucp);
+
+ /* be careful: do not try to store too much bytes in a DBM file! */
+#ifdef PAIRMAX
+ if ((idlen + nData) >= PAIRMAX) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "data size too large for DBM session cache: %d >= %d",
+ (idlen + nData), PAIRMAX);
+ return FALSE;
+ }
+#else
+ if ((idlen + nData) >= 950 /* at least less than approx. 1KB */) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "data size too large for DBM session cache: %d >= %d",
+ (idlen + nData), 950);
+ return FALSE;
+ }
+#endif
+
+ /* create DBM key */
+ dbmkey.dptr = (char *)id;
+ dbmkey.dsize = idlen;
+
+ /* create DBM value */
+ dbmval.dsize = sizeof(time_t) + nData;
+ dbmval.dptr = (char *)malloc(dbmval.dsize);
+ if (dbmval.dptr == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "malloc error creating DBM value");
+ return FALSE;
+ }
+ memcpy((char *)dbmval.dptr, &expiry, sizeof(time_t));
+ memcpy((char *)dbmval.dptr+sizeof(time_t), ucaData, nData);
+
+ /* and store it to the DBM file */
+ ssl_mutex_on(s);
+ if ((rv = apr_dbm_open(&dbm, mc->szSessionCacheDataFile,
+ APR_DBM_RWCREATE, SSL_DBM_FILE_MODE, mc->pPool)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Cannot open SSLSessionCache DBM file `%s' for writing "
+ "(store)",
+ mc->szSessionCacheDataFile);
+ ssl_mutex_off(s);
+ free(dbmval.dptr);
+ return FALSE;
+ }
+ if ((rv = apr_dbm_store(dbm, dbmkey, dbmval)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Cannot store SSL session to DBM file `%s'",
+ mc->szSessionCacheDataFile);
+ apr_dbm_close(dbm);
+ ssl_mutex_off(s);
+ free(dbmval.dptr);
+ return FALSE;
+ }
+ apr_dbm_close(dbm);
+ ssl_mutex_off(s);
+
+ /* free temporary buffers */
+ free(dbmval.dptr);
+
+ /* allow the regular expiring to occur */
+ ssl_scache_dbm_expire(s);
+
+ return TRUE;
+}
+
+SSL_SESSION *ssl_scache_dbm_retrieve(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ apr_dbm_t *dbm;
+ apr_datum_t dbmkey;
+ apr_datum_t dbmval;
+ SSL_SESSION *sess = NULL;
+ MODSSL_D2I_SSL_SESSION_CONST unsigned char *ucpData;
+ int nData;
+ time_t expiry;
+ time_t now;
+ apr_status_t rc;
+
+ /* allow the regular expiring to occur */
+ ssl_scache_dbm_expire(s);
+
+ /* create DBM key and values */
+ dbmkey.dptr = (char *)id;
+ dbmkey.dsize = idlen;
+
+ /* and fetch it from the DBM file
+ * XXX: Should we open the dbm against r->pool so the cleanup will
+ * do the apr_dbm_close? This would make the code a bit cleaner.
+ */
+ ssl_mutex_on(s);
+ if ((rc = apr_dbm_open(&dbm, mc->szSessionCacheDataFile,
+ APR_DBM_RWCREATE, SSL_DBM_FILE_MODE, mc->pPool)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rc, s,
+ "Cannot open SSLSessionCache DBM file `%s' for reading "
+ "(fetch)",
+ mc->szSessionCacheDataFile);
+ ssl_mutex_off(s);
+ return NULL;
+ }
+ rc = apr_dbm_fetch(dbm, dbmkey, &dbmval);
+ if (rc != APR_SUCCESS) {
+ apr_dbm_close(dbm);
+ ssl_mutex_off(s);
+ return NULL;
+ }
+ if (dbmval.dptr == NULL || dbmval.dsize <= sizeof(time_t)) {
+ apr_dbm_close(dbm);
+ ssl_mutex_off(s);
+ return NULL;
+ }
+
+ /* parse resulting data */
+ nData = dbmval.dsize-sizeof(time_t);
+ ucpData = malloc(nData);
+ if (ucpData == NULL) {
+ apr_dbm_close(dbm);
+ ssl_mutex_off(s);
+ return NULL;
+ }
+ /* Cast needed, ucpData may be const */
+ memcpy((unsigned char *)ucpData,
+ (char *)dbmval.dptr + sizeof(time_t), nData);
+ memcpy(&expiry, dbmval.dptr, sizeof(time_t));
+
+ apr_dbm_close(dbm);
+ ssl_mutex_off(s);
+
+ /* make sure the stuff is still not expired */
+ now = time(NULL);
+ if (expiry <= now) {
+ ssl_scache_dbm_remove(s, id, idlen);
+ return NULL;
+ }
+
+ /* unstreamed SSL_SESSION */
+ sess = d2i_SSL_SESSION(NULL, &ucpData, nData);
+
+ return sess;
+}
+
+void ssl_scache_dbm_remove(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ apr_dbm_t *dbm;
+ apr_datum_t dbmkey;
+ apr_status_t rv;
+
+ /* create DBM key and values */
+ dbmkey.dptr = (char *)id;
+ dbmkey.dsize = idlen;
+
+ /* and delete it from the DBM file */
+ ssl_mutex_on(s);
+ if ((rv = apr_dbm_open(&dbm, mc->szSessionCacheDataFile,
+ APR_DBM_RWCREATE, SSL_DBM_FILE_MODE, mc->pPool)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Cannot open SSLSessionCache DBM file `%s' for writing "
+ "(delete)",
+ mc->szSessionCacheDataFile);
+ ssl_mutex_off(s);
+ return;
+ }
+ apr_dbm_delete(dbm, dbmkey);
+ apr_dbm_close(dbm);
+ ssl_mutex_off(s);
+
+ return;
+}
+
+void ssl_scache_dbm_expire(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ SSLSrvConfigRec *sc = mySrvConfig(s);
+ static time_t tLast = 0;
+ apr_dbm_t *dbm;
+ apr_datum_t dbmkey;
+ apr_datum_t dbmval;
+ apr_pool_t *p;
+ time_t tExpiresAt;
+ int nElements = 0;
+ int nDeleted = 0;
+ int bDelete;
+ apr_datum_t *keylist;
+ int keyidx;
+ int i;
+ time_t tNow;
+ apr_status_t rv;
+
+ /*
+ * make sure the expiration for still not-accessed session
+ * cache entries is done only from time to time
+ */
+ tNow = time(NULL);
+ if (tNow < tLast+sc->session_cache_timeout)
+ return;
+ tLast = tNow;
+
+ /*
+ * Here we have to be very carefully: Not all DBM libraries are
+ * smart enough to allow one to iterate over the elements and at the
+ * same time delete expired ones. Some of them get totally crazy
+ * while others have no problems. So we have to do it the slower but
+ * more safe way: we first iterate over all elements and remember
+ * those which have to be expired. Then in a second pass we delete
+ * all those expired elements. Additionally we reopen the DBM file
+ * to be really safe in state.
+ */
+
+#define KEYMAX 1024
+
+ ssl_mutex_on(s);
+ for (;;) {
+ /* allocate the key array in a memory sub pool */
+ apr_pool_sub_make(&p, mc->pPool, NULL);
+ if (p == NULL)
+ break;
+ if ((keylist = apr_palloc(p, sizeof(dbmkey)*KEYMAX)) == NULL) {
+ apr_pool_destroy(p);
+ break;
+ }
+
+ /* pass 1: scan DBM database */
+ keyidx = 0;
+ if ((rv = apr_dbm_open(&dbm, mc->szSessionCacheDataFile,
+ APR_DBM_RWCREATE,SSL_DBM_FILE_MODE,
+ p)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Cannot open SSLSessionCache DBM file `%s' for "
+ "scanning",
+ mc->szSessionCacheDataFile);
+ apr_pool_destroy(p);
+ break;
+ }
+ apr_dbm_firstkey(dbm, &dbmkey);
+ while (dbmkey.dptr != NULL) {
+ nElements++;
+ bDelete = FALSE;
+ apr_dbm_fetch(dbm, dbmkey, &dbmval);
+ if (dbmval.dsize <= sizeof(time_t) || dbmval.dptr == NULL)
+ bDelete = TRUE;
+ else {
+ memcpy(&tExpiresAt, dbmval.dptr, sizeof(time_t));
+ if (tExpiresAt <= tNow)
+ bDelete = TRUE;
+ }
+ if (bDelete) {
+ if ((keylist[keyidx].dptr = apr_palloc(p, dbmkey.dsize)) != NULL) {
+ memcpy(keylist[keyidx].dptr, dbmkey.dptr, dbmkey.dsize);
+ keylist[keyidx].dsize = dbmkey.dsize;
+ keyidx++;
+ if (keyidx == KEYMAX)
+ break;
+ }
+ }
+ apr_dbm_nextkey(dbm, &dbmkey);
+ }
+ apr_dbm_close(dbm);
+
+ /* pass 2: delete expired elements */
+ if (apr_dbm_open(&dbm, mc->szSessionCacheDataFile,
+ APR_DBM_RWCREATE,SSL_DBM_FILE_MODE, p) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Cannot re-open SSLSessionCache DBM file `%s' for "
+ "expiring",
+ mc->szSessionCacheDataFile);
+ apr_pool_destroy(p);
+ break;
+ }
+ for (i = 0; i < keyidx; i++) {
+ apr_dbm_delete(dbm, keylist[i]);
+ nDeleted++;
+ }
+ apr_dbm_close(dbm);
+
+ /* destroy temporary pool */
+ apr_pool_destroy(p);
+
+ if (keyidx < KEYMAX)
+ break;
+ }
+ ssl_mutex_off(s);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Inter-Process Session Cache (DBM) Expiry: "
+ "old: %d, new: %d, removed: %d",
+ nElements, nElements-nDeleted, nDeleted);
+ return;
+}
+
+void ssl_scache_dbm_status(server_rec *s, apr_pool_t *p, void (*func)(char *, void *), void *arg)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ apr_dbm_t *dbm;
+ apr_datum_t dbmkey;
+ apr_datum_t dbmval;
+ int nElem;
+ int nSize;
+ int nAverage;
+ apr_status_t rv;
+
+ nElem = 0;
+ nSize = 0;
+ ssl_mutex_on(s);
+ /*
+ * XXX - Check what pool is to be used - TBD
+ */
+ if ((rv = apr_dbm_open(&dbm, mc->szSessionCacheDataFile,
+ APR_DBM_RWCREATE, SSL_DBM_FILE_MODE,
+ mc->pPool)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Cannot open SSLSessionCache DBM file `%s' for status "
+ "retrival",
+ mc->szSessionCacheDataFile);
+ ssl_mutex_off(s);
+ return;
+ }
+ /*
+ * XXX - Check the return value of apr_dbm_firstkey, apr_dbm_fetch - TBD
+ */
+ apr_dbm_firstkey(dbm, &dbmkey);
+ for ( ; dbmkey.dptr != NULL; apr_dbm_nextkey(dbm, &dbmkey)) {
+ apr_dbm_fetch(dbm, dbmkey, &dbmval);
+ if (dbmval.dptr == NULL)
+ continue;
+ nElem += 1;
+ nSize += dbmval.dsize;
+ }
+ apr_dbm_close(dbm);
+ ssl_mutex_off(s);
+ if (nSize > 0 && nElem > 0)
+ nAverage = nSize / nElem;
+ else
+ nAverage = 0;
+ func(apr_psprintf(p, "cache type: <b>DBM</b>, maximum size: <b>unlimited</b><br>"), arg);
+ func(apr_psprintf(p, "current sessions: <b>%d</b>, current size: <b>%d</b> bytes<br>", nElem, nSize), arg);
+ func(apr_psprintf(p, "average session size: <b>%d</b> bytes<br>", nAverage), arg);
+ return;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache_shmcb.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache_shmcb.c
new file mode 100644
index 00000000..cee66bf5
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache_shmcb.c
@@ -0,0 +1,1362 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_scache_shmcb.c
+ * Session Cache via Shared Memory (Cyclic Buffer Variant)
+ */
+
+#include "mod_ssl.h"
+
+/*
+ * This shared memory based SSL session cache implementation was
+ * originally written by Geoff Thorpe <geoff@geoffthorpe.net> for C2Net
+ * Europe as a contribution to Ralf Engelschall's mod_ssl project.
+ */
+
+/*
+ * The shared-memory segment header can be cast to and from the
+ * SHMCBHeader type, all other structures need to be initialised by
+ * utility functions.
+ *
+ * The "header" looks like this;
+ *
+ * data applying to the overall structure:
+ * - division_offset (unsigned int):
+ * how far into the shared memory segment the first division is.
+ * - division_size (unsigned int):
+ * how many bytes each division occupies.
+ * (NB: This includes the queue and the cache)
+ * - division_mask (unsigned char):
+ * the "mask" in the next line. Add one to this,
+ * and that's the number of divisions.
+ *
+ * data applying to within each division:
+ * - queue_size (unsigned int):
+ * how big each "queue" is. NB: The queue is the first block in each
+ * division and is followed immediately by the cache itself so so
+ * there's no cache_offset value.
+ *
+ * data applying to within each queue:
+ * - index_num (unsigned char):
+ * how many indexes in each cache's queue
+ * - index_offset (unsigned char):
+ * how far into the queue the first index is.
+ * - index_size:
+ * how big each index is.
+ *
+ * data applying to within each cache:
+ * - cache_data_offset (unsigned int):
+ * how far into the cache the session-data array is stored.
+ * - cache_data_size (unsigned int):
+ * how big each cache's data block is.
+ *
+ * statistics data (this will eventually be per-division but right now
+ * there's only one mutex):
+ * - stores (unsigned long):
+ * how many stores have been performed in the cache.
+ * - expiries (unsigned long):
+ * how many session have been expired from the cache.
+ * - scrolled (unsigned long):
+ * how many sessions have been scrolled out of full cache during a
+ * "store" operation. This is different to the "removes" stats as
+ * they are requested by mod_ssl/Apache, these are done because of
+ * cache logistics. (NB: Also, this value should be deducible from
+ * the others if my code has no bugs, but I count it anyway - plus
+ * it helps debugging :-).
+ * - retrieves_hit (unsigned long):
+ * how many session-retrieves have succeeded.
+ * - retrieves_miss (unsigned long):
+ * how many session-retrieves have failed.
+ * - removes_hit (unsigned long):
+ * - removes_miss (unsigned long):
+ *
+ * Following immediately after the header is an array of "divisions".
+ * Each division is simply a "queue" immediately followed by its
+ * corresponding "cache". Each division handles some pre-defined band
+ * of sessions by using the "division_mask" in the header. Eg. if
+ * division_mask=0x1f then there are 32 divisions, the first of which
+ * will store sessions whose least-significant 5 bits are 0, the second
+ * stores session whose LS 5 bits equal 1, etc. A queue is an indexing
+ * structure referring to its corresponding cache.
+ *
+ * A "queue" looks like this;
+ *
+ * - first_pos (unsigned int):
+ * the location within the array of indexes where the virtual
+ * "left-hand-edge" of the cyclic buffer is.
+ * - pos_count (unsigned int):
+ * the number of indexes occupied from first_pos onwards.
+ *
+ * ...followed by an array of indexes, each of which can be
+ * memcpy'd to and from an SHMCBIndex, and look like this;
+ *
+ * - expires (time_t):
+ * the time() value at which this session expires.
+ * - offset (unsigned int):
+ * the offset within the cache data block where the corresponding
+ * session is stored.
+ * - s_id2 (unsigned char):
+ * the second byte of the session_id, stored as an optimisation to
+ * reduce the number of d2i_SSL_SESSION calls that are made when doing
+ * a lookup.
+ * - removed (unsigned char):
+ * a byte used to indicate whether a session has been "passively"
+ * removed. Ie. it is still in the cache but is to be disregarded by
+ * any "retrieve" operation.
+ *
+ * A "cache" looks like this;
+ *
+ * - first_pos (unsigned int):
+ * the location within the data block where the virtual
+ * "left-hand-edge" of the cyclic buffer is.
+ * - pos_count (unsigned int):
+ * the number of bytes used in the data block from first_pos onwards.
+ *
+ * ...followed by the data block in which actual DER-encoded SSL
+ * sessions are stored.
+ */
+
+/*
+ * Header - can be memcpy'd to and from the front of the shared
+ * memory segment. NB: The first copy (commented out) has the
+ * elements in a meaningful order, but due to data-alignment
+ * braindeadness, the second (uncommented) copy has the types grouped
+ * so as to decrease "struct-bloat". sigh.
+ */
+typedef struct {
+ unsigned long num_stores;
+ unsigned long num_expiries;
+ unsigned long num_scrolled;
+ unsigned long num_retrieves_hit;
+ unsigned long num_retrieves_miss;
+ unsigned long num_removes_hit;
+ unsigned long num_removes_miss;
+ unsigned int division_offset;
+ unsigned int division_size;
+ unsigned int queue_size;
+ unsigned int cache_data_offset;
+ unsigned int cache_data_size;
+ unsigned char division_mask;
+ unsigned int index_num;
+ unsigned int index_offset;
+ unsigned int index_size;
+} SHMCBHeader;
+
+/*
+ * Index - can be memcpy'd to and from an index inside each
+ * queue's index array.
+ */
+typedef struct {
+ time_t expires;
+ unsigned int offset;
+ unsigned char s_id2;
+ unsigned char removed;
+} SHMCBIndex;
+
+/*
+ * Queue - must be populated by a call to shmcb_get_division
+ * and the structure's pointers are used for updating (ie.
+ * the structure doesn't need any "set" to update values).
+ */
+typedef struct {
+ SHMCBHeader *header;
+ unsigned int *first_pos;
+ unsigned int *pos_count;
+ SHMCBIndex *indexes;
+} SHMCBQueue;
+
+/*
+ * Cache - same comment as for Queue. 'Queue's are in a 1-1
+ * correspondance with 'Cache's and are usually carried round
+ * in a pair, they are only seperated for clarity.
+ */
+typedef struct {
+ SHMCBHeader *header;
+ unsigned int *first_pos;
+ unsigned int *pos_count;
+ unsigned char *data;
+} SHMCBCache;
+
+/*
+ * Forward function prototypes.
+ */
+
+/* Functions for working around data-alignment-picky systems (sparcs,
+ Irix, etc). These use "memcpy" as a way of foxing these systems into
+ treating the composite types as byte-arrays rather than higher-level
+ primitives that it prefers to have 4-(or 8-)byte aligned. I don't
+ envisage this being a performance issue as a couple of 2 or 4 byte
+ memcpys can hardly make a dent on the massive memmove operations this
+ cache technique avoids, nor the overheads of ASN en/decoding. */
+static unsigned int shmcb_get_safe_uint(unsigned int *);
+static void shmcb_set_safe_uint_ex(unsigned char *, const unsigned char *);
+#define shmcb_set_safe_uint(pdest, src) \
+ do { \
+ unsigned int tmp_uint = src; \
+ shmcb_set_safe_uint_ex((unsigned char *)pdest, \
+ (const unsigned char *)(&tmp_uint)); \
+ } while(0)
+#if 0 /* Unused so far */
+static unsigned long shmcb_get_safe_ulong(unsigned long *);
+static void shmcb_set_safe_ulong_ex(unsigned char *, const unsigned char *);
+#define shmcb_set_safe_ulong(pdest, src) \
+ do { \
+ unsigned long tmp_ulong = src; \
+ shmcb_set_safe_ulong_ex((unsigned char *)pdest, \
+ (const unsigned char *)(&tmp_ulong)); \
+ } while(0)
+#endif
+static time_t shmcb_get_safe_time(time_t *);
+static void shmcb_set_safe_time_ex(unsigned char *, const unsigned char *);
+#define shmcb_set_safe_time(pdest, src) \
+ do { \
+ time_t tmp_time = src; \
+ shmcb_set_safe_time_ex((unsigned char *)pdest, \
+ (const unsigned char *)(&tmp_time)); \
+ } while(0)
+
+/* This is necessary simply so that the size passed to memset() is not a
+ * compile-time constant, preventing the compiler from optimising it. */
+static void shmcb_safe_clear(void *ptr, size_t size)
+{
+ memset(ptr, 0, size);
+}
+
+/* Underlying functions for session-caching */
+static BOOL shmcb_init_memory(server_rec *, void *, unsigned int);
+static BOOL shmcb_store_session(server_rec *, void *, UCHAR *, int, SSL_SESSION *, time_t);
+static SSL_SESSION *shmcb_retrieve_session(server_rec *, void *, UCHAR *, int);
+static BOOL shmcb_remove_session(server_rec *, void *, UCHAR *, int);
+
+/* Utility functions for manipulating the structures */
+static void shmcb_get_header(void *, SHMCBHeader **);
+static BOOL shmcb_get_division(SHMCBHeader *, SHMCBQueue *, SHMCBCache *, unsigned int);
+static SHMCBIndex *shmcb_get_index(const SHMCBQueue *, unsigned int);
+static unsigned int shmcb_expire_division(server_rec *, SHMCBQueue *, SHMCBCache *);
+static BOOL shmcb_insert_encoded_session(server_rec *, SHMCBQueue *, SHMCBCache *, unsigned char *, unsigned int, unsigned char *, time_t);
+static SSL_SESSION *shmcb_lookup_session_id(server_rec *, SHMCBQueue *, SHMCBCache *, UCHAR *, unsigned int);
+static BOOL shmcb_remove_session_id(server_rec *, SHMCBQueue *, SHMCBCache *, UCHAR *, unsigned int);
+
+/*
+ * Data-alignment functions (a.k.a. avoidance tactics)
+ *
+ * NB: On HPUX (and possibly others) there is a *very* mischievous little
+ * "optimisation" in the compilers where it will convert the following;
+ * memcpy(dest_ptr, &source, sizeof(unsigned int));
+ * (where dest_ptr is of type (unsigned int *) and source is (unsigned int))
+ * into;
+ * *dest_ptr = source; (or *dest_ptr = *(&source), not sure).
+ * Either way, it completely destroys the whole point of these _safe_
+ * functions, because the assignment operation will fall victim to the
+ * architecture's byte-alignment dictations, whereas the memcpy (as a
+ * byte-by-byte copy) should not. sigh. So, if you're wondering about the
+ * apparently unnecessary conversions to (unsigned char *) in these
+ * functions, you now have an explanation. Don't just revert them back and
+ * say "ooh look, it still works" - if you try it on HPUX (well, 32-bit
+ * HPUX 11.00 at least) you may find it fails with a SIGBUS. :-(
+ */
+
+static unsigned int shmcb_get_safe_uint(unsigned int *ptr)
+{
+ unsigned int ret;
+ shmcb_set_safe_uint_ex((unsigned char *)(&ret),
+ (const unsigned char *)ptr);
+ return ret;
+}
+
+static void shmcb_set_safe_uint_ex(unsigned char *dest,
+ const unsigned char *src)
+{
+ memcpy(dest, src, sizeof(unsigned int));
+}
+
+#if 0 /* Unused so far */
+static unsigned long shmcb_get_safe_ulong(unsigned long *ptr)
+{
+ unsigned long ret;
+ shmcb_set_safe_ulong_ex((unsigned char *)(&ret),
+ (const unsigned char *)ptr);
+ return ret;
+}
+
+static void shmcb_set_safe_ulong_ex(unsigned char *dest,
+ const unsigned char *src)
+{
+ memcpy(dest, src, sizeof(unsigned long));
+}
+#endif
+
+static time_t shmcb_get_safe_time(time_t * ptr)
+{
+ time_t ret;
+ shmcb_set_safe_time_ex((unsigned char *)(&ret),
+ (const unsigned char *)ptr);
+ return ret;
+}
+
+static void shmcb_set_safe_time_ex(unsigned char *dest,
+ const unsigned char *src)
+{
+ memcpy(dest, src, sizeof(time_t));
+}
+/*
+**
+** High-Level "handlers" as per ssl_scache.c
+**
+*/
+
+void ssl_scache_shmcb_init(server_rec *s, apr_pool_t *p)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ void *shm_segment;
+ apr_size_t shm_segsize;
+ apr_status_t rv;
+
+ /*
+ * Create shared memory segment
+ */
+ if (mc->szSessionCacheDataFile == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "SSLSessionCache required");
+ ssl_die();
+ }
+
+ /* Use anonymous shm by default, fall back on name-based. */
+ rv = apr_shm_create(&(mc->pSessionCacheDataMM),
+ mc->nSessionCacheDataSize,
+ NULL, mc->pPool);
+
+ if (APR_STATUS_IS_ENOTIMPL(rv)) {
+ rv = apr_shm_create(&(mc->pSessionCacheDataMM),
+ mc->nSessionCacheDataSize,
+ mc->szSessionCacheDataFile,
+ mc->pPool);
+ }
+
+ if (rv != APR_SUCCESS) {
+ char buf[100];
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Cannot allocate shared memory: (%d)%s", rv,
+ apr_strerror(rv, buf, sizeof(buf)));
+ ssl_die();
+ }
+ shm_segment = apr_shm_baseaddr_get(mc->pSessionCacheDataMM);
+ shm_segsize = apr_shm_size_get(mc->pSessionCacheDataMM);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "shmcb_init allocated %" APR_SIZE_T_FMT
+ " bytes of shared memory",
+ shm_segsize);
+ if (!shmcb_init_memory(s, shm_segment, shm_segsize)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Failure initialising 'shmcb' shared memory");
+ ssl_die();
+ }
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Shared memory session cache initialised");
+
+ /*
+ * Success ... we hack the memory block into place by cheating for
+ * now and stealing a member variable the original shared memory
+ * cache was using. :-)
+ */
+ mc->tSessionCacheDataTable = (table_t *) shm_segment;
+ return;
+}
+
+void ssl_scache_shmcb_kill(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+
+ if (mc->pSessionCacheDataMM != NULL) {
+ apr_shm_destroy(mc->pSessionCacheDataMM);
+ mc->pSessionCacheDataMM = NULL;
+ }
+ return;
+}
+
+BOOL ssl_scache_shmcb_store(server_rec *s, UCHAR *id, int idlen,
+ time_t timeout, SSL_SESSION * pSession)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ void *shm_segment;
+ BOOL to_return = FALSE;
+
+ /* We've kludged our pointer into the other cache's member variable. */
+ shm_segment = (void *) mc->tSessionCacheDataTable;
+ ssl_mutex_on(s);
+ if (!shmcb_store_session(s, shm_segment, id, idlen, pSession, timeout))
+ /* in this cache engine, "stores" should never fail. */
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "'shmcb' code was unable to store a "
+ "session in the cache.");
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "shmcb_store successful");
+ to_return = TRUE;
+ }
+ ssl_mutex_off(s);
+ return to_return;
+}
+
+SSL_SESSION *ssl_scache_shmcb_retrieve(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ void *shm_segment;
+ SSL_SESSION *pSession;
+
+ /* We've kludged our pointer into the other cache's member variable. */
+ shm_segment = (void *) mc->tSessionCacheDataTable;
+ ssl_mutex_on(s);
+ pSession = shmcb_retrieve_session(s, shm_segment, id, idlen);
+ ssl_mutex_off(s);
+ if (pSession)
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "shmcb_retrieve had a hit");
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "shmcb_retrieve had a miss");
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Client requested a 'session-resume' but "
+ "we have no such session.");
+ }
+ return pSession;
+}
+
+void ssl_scache_shmcb_remove(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ void *shm_segment;
+
+ /* We've kludged our pointer into the other cache's member variable. */
+ shm_segment = (void *) mc->tSessionCacheDataTable;
+ ssl_mutex_on(s);
+ shmcb_remove_session(s, shm_segment, id, idlen);
+ ssl_mutex_off(s);
+}
+
+void ssl_scache_shmcb_expire(server_rec *s)
+{
+ /* NOP */
+ return;
+}
+
+void ssl_scache_shmcb_status(server_rec *s, apr_pool_t *p,
+ void (*func) (char *, void *), void *arg)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ SHMCBHeader *header;
+ SHMCBQueue queue;
+ SHMCBCache cache;
+ SHMCBIndex *idx;
+ void *shm_segment;
+ unsigned int loop, total, cache_total, non_empty_divisions;
+ int index_pct, cache_pct;
+ double expiry_total;
+ time_t average_expiry, now, max_expiry, min_expiry, idxexpiry;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "inside ssl_scache_shmcb_status");
+
+ /* We've kludged our pointer into the other cache's member variable. */
+ shm_segment = (void *) mc->tSessionCacheDataTable;
+
+ /* Get the header structure. */
+ shmcb_get_header(shm_segment, &header);
+ total = cache_total = non_empty_divisions = 0;
+ average_expiry = max_expiry = min_expiry = 0;
+ expiry_total = 0;
+
+ /* It may seem strange to grab "now" at this point, but in theory
+ * we should never have a negative threshold but grabbing "now" after
+ * the loop (which performs expiries) could allow that chance. */
+ now = time(NULL);
+ for (loop = 0; loop <= header->division_mask; loop++) {
+ if (shmcb_get_division(header, &queue, &cache, loop)) {
+ shmcb_expire_division(s, &queue, &cache);
+ total += shmcb_get_safe_uint(queue.pos_count);
+ cache_total += shmcb_get_safe_uint(cache.pos_count);
+ if (shmcb_get_safe_uint(queue.pos_count) > 0) {
+ idx = shmcb_get_index(&queue,
+ shmcb_get_safe_uint(queue.first_pos));
+ non_empty_divisions++;
+ idxexpiry = shmcb_get_safe_time(&(idx->expires));
+ expiry_total += (double) idxexpiry;
+ max_expiry = (idxexpiry > max_expiry ? idxexpiry :
+ max_expiry);
+ if (min_expiry == 0)
+ min_expiry = idxexpiry;
+ else
+ min_expiry = (idxexpiry < min_expiry ? idxexpiry :
+ min_expiry);
+ }
+ }
+ }
+ index_pct = (100 * total) / (header->index_num * (header->division_mask + 1));
+ cache_pct = (100 * cache_total) / (header->cache_data_size * (header->division_mask + 1));
+ func(apr_psprintf(p, "cache type: <b>SHMCB</b>, shared memory: <b>%d</b> "
+ "bytes, current sessions: <b>%d</b><br>",
+ mc->nSessionCacheDataSize, total), arg);
+ func(apr_psprintf(p, "sub-caches: <b>%d</b>, indexes per sub-cache: "
+ "<b>%d</b><br>", (int) header->division_mask + 1,
+ (int) header->index_num), arg);
+ if (non_empty_divisions != 0) {
+ average_expiry = (time_t)(expiry_total / (double)non_empty_divisions);
+ func(apr_psprintf(p, "time left on oldest entries' SSL sessions: "), arg);
+ if (now < average_expiry)
+ func(apr_psprintf(p, "avg: <b>%d</b> seconds, (range: %d...%d)<br>",
+ (int)(average_expiry - now), (int) (min_expiry - now),
+ (int)(max_expiry - now)), arg);
+ else
+ func(apr_psprintf(p, "expiry threshold: <b>Calculation Error!</b>"
+ "<br>"), arg);
+
+ }
+ func(apr_psprintf(p, "index usage: <b>%d%%</b>, cache usage: <b>%d%%</b>"
+ "<br>", index_pct, cache_pct), arg);
+ func(apr_psprintf(p, "total sessions stored since starting: <b>%lu</b><br>",
+ header->num_stores), arg);
+ func(apr_psprintf(p,"total sessions expired since starting: <b>%lu</b><br>",
+ header->num_expiries), arg);
+ func(apr_psprintf(p, "total (pre-expiry) sessions scrolled out of the "
+ "cache: <b>%lu</b><br>", header->num_scrolled), arg);
+ func(apr_psprintf(p, "total retrieves since starting: <b>%lu</b> hit, "
+ "<b>%lu</b> miss<br>", header->num_retrieves_hit,
+ header->num_retrieves_miss), arg);
+ func(apr_psprintf(p, "total removes since starting: <b>%lu</b> hit, "
+ "<b>%lu</b> miss<br>", header->num_removes_hit,
+ header->num_removes_miss), arg);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "leaving shmcb_status");
+ return;
+}
+
+/*
+**
+** Memory manipulation and low-level cache operations
+**
+*/
+
+static BOOL shmcb_init_memory(
+ server_rec *s, void *shm_mem,
+ unsigned int shm_mem_size)
+{
+ SHMCBHeader *header;
+ SHMCBQueue queue;
+ SHMCBCache cache;
+ unsigned int temp, loop, granularity;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "entered shmcb_init_memory()");
+
+ /* Calculate some sizes... */
+ temp = sizeof(SHMCBHeader);
+
+ /* If the segment is ridiculously too small, bail out */
+ if (shm_mem_size < (2*temp)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "shared memory segment too small");
+ return FALSE;
+ }
+
+ /* Make temp the amount of memory without the header */
+ temp = shm_mem_size - temp;
+
+ /* Work on the basis that you need 10 bytes index for each session
+ * (approx 150 bytes), which is to divide temp by 160 - and then
+ * make sure we err on having too index space to burn even when
+ * the cache is full, which is a lot less stupid than having
+ * having not enough index space to utilise the whole cache!. */
+ temp /= 120;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "for %u bytes, recommending %u indexes",
+ shm_mem_size, temp);
+
+ /* We should divide these indexes evenly amongst the queues. Try
+ * to get it so that there are roughly half the number of divisions
+ * as there are indexes in each division. */
+ granularity = 256;
+ while ((temp / granularity) < (2 * granularity))
+ granularity /= 2;
+
+ /* So we have 'granularity' divisions, set 'temp' equal to the
+ * number of indexes in each division. */
+ temp /= granularity;
+
+ /* Too small? Bail ... */
+ if (temp < 5) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "shared memory segment too small");
+ return FALSE;
+ }
+
+ /* OK, we're sorted - from here on in, the return should be TRUE */
+ header = (SHMCBHeader *)shm_mem;
+ header->division_mask = (unsigned char)(granularity - 1);
+ header->division_offset = sizeof(SHMCBHeader);
+ header->index_num = temp;
+ header->index_offset = (2 * sizeof(unsigned int));
+ header->index_size = sizeof(SHMCBIndex);
+ header->queue_size = header->index_offset +
+ (header->index_num * header->index_size);
+
+ /* Now calculate the space for each division */
+ temp = shm_mem_size - header->division_offset;
+ header->division_size = temp / granularity;
+
+ /* Calculate the space left in each division for the cache */
+ temp -= header->queue_size;
+ header->cache_data_offset = (2 * sizeof(unsigned int));
+ header->cache_data_size = header->division_size -
+ header->queue_size - header->cache_data_offset;
+
+ /* Output trace info */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "shmcb_init_memory choices follow");
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "division_mask = 0x%02X", header->division_mask);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "division_offset = %u", header->division_offset);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "division_size = %u", header->division_size);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "queue_size = %u", header->queue_size);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "index_num = %u", header->index_num);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "index_offset = %u", header->index_offset);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "index_size = %u", header->index_size);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "cache_data_offset = %u", header->cache_data_offset);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "cache_data_size = %u", header->cache_data_size);
+
+ /* The header is done, make the caches empty */
+ for (loop = 0; loop < granularity; loop++) {
+ if (!shmcb_get_division(header, &queue, &cache, loop))
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "shmcb_init_memory, " "internal error");
+ shmcb_set_safe_uint(cache.first_pos, 0);
+ shmcb_set_safe_uint(cache.pos_count, 0);
+ shmcb_set_safe_uint(queue.first_pos, 0);
+ shmcb_set_safe_uint(queue.pos_count, 0);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "leaving shmcb_init_memory()");
+ return TRUE;
+}
+
+static BOOL shmcb_store_session(
+ server_rec *s, void *shm_segment, UCHAR *id,
+ int idlen, SSL_SESSION * pSession,
+ time_t timeout)
+{
+ SHMCBHeader *header;
+ SHMCBQueue queue;
+ SHMCBCache cache;
+ unsigned char masked_index;
+ unsigned char encoded[SSL_SESSION_MAX_DER];
+ unsigned char *ptr_encoded;
+ unsigned int len_encoded;
+ time_t expiry_time;
+ unsigned char *session_id = SSL_SESSION_get_session_id(pSession);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "inside shmcb_store_session");
+
+ /* Get the header structure, which division this session will fall into etc. */
+ shmcb_get_header(shm_segment, &header);
+ masked_index = session_id[0] & header->division_mask;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "session_id[0]=%u, masked index=%u",
+ session_id[0], masked_index);
+ if (!shmcb_get_division(header, &queue, &cache, (unsigned int)masked_index)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "shmcb_store_session internal error");
+ return FALSE;
+ }
+
+ /* Serialise the session, work out how much we're dealing
+ * with. NB: This check could be removed if we're not paranoid
+ * or we find some assurance that it will never be necessary. */
+ len_encoded = i2d_SSL_SESSION(pSession, NULL);
+ if (len_encoded > SSL_SESSION_MAX_DER) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "session is too big (%u bytes)", len_encoded);
+ return FALSE;
+ }
+ ptr_encoded = encoded;
+ len_encoded = i2d_SSL_SESSION(pSession, &ptr_encoded);
+ expiry_time = timeout;
+ if (!shmcb_insert_encoded_session(s, &queue, &cache, encoded,
+ len_encoded, session_id,
+ expiry_time)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "can't store a session!");
+ return FALSE;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "leaving shmcb_store successfully");
+ header->num_stores++;
+ return TRUE;
+}
+
+static SSL_SESSION *shmcb_retrieve_session(
+ server_rec *s, void *shm_segment,
+ UCHAR *id, int idlen)
+{
+ SHMCBHeader *header;
+ SHMCBQueue queue;
+ SHMCBCache cache;
+ unsigned char masked_index;
+ SSL_SESSION *pSession;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "inside shmcb_retrieve_session");
+ if (idlen < 2) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "unusably short session_id provided "
+ "(%u bytes)", idlen);
+ return FALSE;
+ }
+
+ /* Get the header structure, which division this session lookup
+ * will come from etc. */
+ shmcb_get_header(shm_segment, &header);
+ masked_index = id[0] & header->division_mask;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "id[0]=%u, masked index=%u", id[0], masked_index);
+ if (!shmcb_get_division(header, &queue, &cache, (unsigned int) masked_index)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "shmcb_retrieve_session internal error");
+ header->num_retrieves_miss++;
+ return FALSE;
+ }
+
+ /* Get the session corresponding to the session_id or NULL if it
+ * doesn't exist (or is flagged as "removed"). */
+ pSession = shmcb_lookup_session_id(s, &queue, &cache, id, idlen);
+ if (pSession)
+ header->num_retrieves_hit++;
+ else
+ header->num_retrieves_miss++;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "leaving shmcb_retrieve_session");
+ return pSession;
+}
+
+static BOOL shmcb_remove_session(
+ server_rec *s, void *shm_segment,
+ UCHAR *id, int idlen)
+{
+ SHMCBHeader *header;
+ SHMCBQueue queue;
+ SHMCBCache cache;
+ unsigned char masked_index;
+ BOOL res;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "inside shmcb_remove_session");
+ if (id == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "remove called with NULL session_id!");
+ return FALSE;
+ }
+
+ /* Get the header structure, which division this session remove
+ * will happen in etc. */
+ shmcb_get_header(shm_segment, &header);
+ masked_index = id[0] & header->division_mask;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "id[0]=%u, masked index=%u", id[0], masked_index);
+ if (!shmcb_get_division(header, &queue, &cache, (unsigned int)masked_index)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "shmcb_remove_session, internal error");
+ header->num_removes_miss++;
+ return FALSE;
+ }
+ res = shmcb_remove_session_id(s, &queue, &cache, id, idlen);
+ if (res)
+ header->num_removes_hit++;
+ else
+ header->num_removes_miss++;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "leaving shmcb_remove_session");
+ return res;
+}
+
+
+/*
+**
+** Weirdo cyclic buffer functions
+**
+*/
+
+/* This gets used in the cyclic "index array" (in the 'Queue's) and
+ * in the cyclic 'Cache's too ... you provide the "width" of the
+ * cyclic store, the starting position and how far to move (with
+ * wrapping if necessary). Basically it's addition modulo buf_size. */
+static unsigned int shmcb_cyclic_increment(
+ unsigned int buf_size,
+ unsigned int start_pos,
+ unsigned int to_add)
+{
+ start_pos += to_add;
+ while (start_pos >= buf_size)
+ start_pos -= buf_size;
+ return start_pos;
+}
+
+/* Given two positions in a cyclic buffer, calculate the "distance".
+ * This is to cover the case ("non-trivial") where the 'next' offset
+ * is to the left of the 'start' offset. NB: This calculates the
+ * space inclusive of one end-point but not the other. There is an
+ * ambiguous case (which is why we use the <start_pos,offset>
+ * coordinate system rather than <start_pos,end_pos> one) when 'start'
+ * is the same as 'next'. It could indicate the buffer is full or it
+ * can indicate the buffer is empty ... I choose the latter as it's
+ * easier and usually necessary to check if the buffer is full anyway
+ * before doing incremental logic (which is this useful for), but we
+ * definitely need the empty case handled - in fact it's our starting
+ * state!! */
+static unsigned int shmcb_cyclic_space(
+ unsigned int buf_size,
+ unsigned int start_offset,
+ unsigned int next_offset)
+{
+ /* Is it the trivial case? */
+ if (start_offset <= next_offset)
+ return (next_offset - start_offset); /* yes */
+ else
+ return ((buf_size - start_offset) + next_offset); /* no */
+}
+
+/* A "normal-to-cyclic" memcpy ... this takes a linear block of
+ * memory and copies it onto a cyclic buffer. The purpose and
+ * function of this is pretty obvious, you need to cover the case
+ * that the destination (cyclic) buffer has to wrap round. */
+static void shmcb_cyclic_ntoc_memcpy(
+ unsigned int buf_size,
+ unsigned char *data,
+ unsigned int dest_offset,
+ unsigned char *src, unsigned int src_len)
+{
+ /* Cover the case that src_len > buf_size */
+ if (src_len > buf_size)
+ src_len = buf_size;
+
+ /* Can it be copied all in one go? */
+ if (dest_offset + src_len < buf_size)
+ /* yes */
+ memcpy(data + dest_offset, src, src_len);
+ else {
+ /* no */
+ memcpy(data + dest_offset, src, buf_size - dest_offset);
+ memcpy(data, src + buf_size - dest_offset,
+ src_len + dest_offset - buf_size);
+ }
+ return;
+}
+
+/* A "cyclic-to-normal" memcpy ... given the last function, this
+ * one's purpose is clear, it copies out of a cyclic buffer handling
+ * wrapping. */
+static void shmcb_cyclic_cton_memcpy(
+ unsigned int buf_size,
+ unsigned char *dest,
+ unsigned char *data,
+ unsigned int src_offset,
+ unsigned int src_len)
+{
+ /* Cover the case that src_len > buf_size */
+ if (src_len > buf_size)
+ src_len = buf_size;
+
+ /* Can it be copied all in one go? */
+ if (src_offset + src_len < buf_size)
+ /* yes */
+ memcpy(dest, data + src_offset, src_len);
+ else {
+ /* no */
+ memcpy(dest, data + src_offset, buf_size - src_offset);
+ memcpy(dest + buf_size - src_offset, data,
+ src_len + src_offset - buf_size);
+ }
+ return;
+}
+
+/* Here's the cool hack that makes it all work ... by simply
+ * making the first collection of bytes *be* our header structure
+ * (casting it into the C structure), we have the perfect way to
+ * maintain state in a shared-memory session cache from one call
+ * (and process) to the next, use the shared memory itself! The
+ * original mod_ssl shared-memory session cache uses variables
+ * inside the context, but we simply use that for storing the
+ * pointer to the shared memory itself. And don't forget, after
+ * Apache's initialisation, this "header" is constant/read-only
+ * so we can read it outside any locking.
+ * <grin> - sometimes I just *love* coding y'know?! */
+static void shmcb_get_header(void *shm_mem, SHMCBHeader **header)
+{
+ *header = (SHMCBHeader *)shm_mem;
+ return;
+}
+
+/* This is what populates our "interesting" structures. Given a
+ * pointer to the header, and an index into the appropriate
+ * division (this must have already been masked using the
+ * division_mask by the caller!), we can populate the provided
+ * SHMCBQueue and SHMCBCache structures with values and
+ * pointers to the underlying shared memory. Upon returning
+ * (if not FALSE), the caller can meddle with the pointer
+ * values and they will map into the shared-memory directly,
+ * as such there's no need to "free" or "set" the Queue or
+ * Cache values, they were themselves references to the *real*
+ * data. */
+static BOOL shmcb_get_division(
+ SHMCBHeader *header, SHMCBQueue *queue,
+ SHMCBCache *cache, unsigned int idx)
+{
+ unsigned char *pQueue;
+ unsigned char *pCache;
+
+ /* bounds check */
+ if (idx > (unsigned int) header->division_mask)
+ return FALSE;
+
+ /* Locate the blocks of memory storing the corresponding data */
+ pQueue = ((unsigned char *) header) + header->division_offset +
+ (idx * header->division_size);
+ pCache = pQueue + header->queue_size;
+
+ /* Populate the structures with appropriate pointers */
+ queue->first_pos = (unsigned int *) pQueue;
+
+ /* Our structures stay packed, no matter what the system's
+ * data-alignment regime is. */
+ queue->pos_count = (unsigned int *) (pQueue + sizeof(unsigned int));
+ queue->indexes = (SHMCBIndex *) (pQueue + (2 * sizeof(unsigned int)));
+ cache->first_pos = (unsigned int *) pCache;
+ cache->pos_count = (unsigned int *) (pCache + sizeof(unsigned int));
+ cache->data = (unsigned char *) (pCache + (2 * sizeof(unsigned int)));
+ queue->header = cache->header = header;
+
+ return TRUE;
+}
+
+/* This returns a pointer to the piece of shared memory containing
+ * a specified 'Index'. SHMCBIndex, like SHMCBHeader, is a fixed
+ * width non-referencing structure of primitive types that can be
+ * cast onto the corresponding block of shared memory. Thus, by
+ * returning a cast pointer to that section of shared memory, the
+ * caller can read and write values to and from the "structure" and
+ * they are actually reading and writing the underlying shared
+ * memory. */
+static SHMCBIndex *shmcb_get_index(
+ const SHMCBQueue *queue, unsigned int idx)
+{
+ /* bounds check */
+ if (idx > queue->header->index_num)
+ return NULL;
+
+ /* Return a pointer to the index. NB: I am being horribly pendantic
+ * here so as to avoid any potential data-alignment assumptions being
+ * placed on the pointer arithmetic by the compiler (sigh). */
+ return (SHMCBIndex *)(((unsigned char *) queue->indexes) +
+ (idx * sizeof(SHMCBIndex)));
+}
+
+/* This functions rolls expired cache (and index) entries off the front
+ * of the cyclic buffers in a division. The function returns the number
+ * of expired sessions. */
+static unsigned int shmcb_expire_division(
+ server_rec *s, SHMCBQueue *queue, SHMCBCache *cache)
+{
+ SHMCBIndex *idx;
+ time_t now;
+ unsigned int loop, index_num, pos_count, new_pos;
+ SHMCBHeader *header;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "entering shmcb_expire_division");
+
+ /* We must calculate num and space ourselves based on expiry times. */
+ now = time(NULL);
+ loop = 0;
+ new_pos = shmcb_get_safe_uint(queue->first_pos);
+
+ /* Cache useful values */
+ header = queue->header;
+ index_num = header->index_num;
+ pos_count = shmcb_get_safe_uint(queue->pos_count);
+ while (loop < pos_count) {
+ idx = shmcb_get_index(queue, new_pos);
+ if (shmcb_get_safe_time(&(idx->expires)) > now)
+ /* it hasn't expired yet, we're done iterating */
+ break;
+ /* This one should be expired too. Shift to the next entry. */
+ loop++;
+ new_pos = shmcb_cyclic_increment(index_num, new_pos, 1);
+ }
+
+ /* Find the new_offset and make the expiries happen. */
+ if (loop > 0) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "will be expiring %u sessions", loop);
+ /* We calculate the new_offset by "peeking" (or in the
+ * case it's the last entry, "sneaking" ;-). */
+ if (loop == pos_count) {
+ /* We are expiring everything! This is easy to do... */
+ shmcb_set_safe_uint(queue->pos_count, 0);
+ shmcb_set_safe_uint(cache->pos_count, 0);
+ }
+ else {
+ /* The Queue is easy to adjust */
+ shmcb_set_safe_uint(queue->pos_count,
+ shmcb_get_safe_uint(queue->pos_count) - loop);
+ shmcb_set_safe_uint(queue->first_pos, new_pos);
+ /* peek to the start of the next session */
+ idx = shmcb_get_index(queue, new_pos);
+ /* We can use shmcb_cyclic_space because we've guaranteed
+ * we don't fit the ambiguous full/empty case. */
+ shmcb_set_safe_uint(cache->pos_count,
+ shmcb_get_safe_uint(cache->pos_count) -
+ shmcb_cyclic_space(header->cache_data_size,
+ shmcb_get_safe_uint(cache->first_pos),
+ shmcb_get_safe_uint(&(idx->offset))));
+ shmcb_set_safe_uint(cache->first_pos, shmcb_get_safe_uint(&(idx->offset)));
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "we now have %u sessions",
+ shmcb_get_safe_uint(queue->pos_count));
+ }
+ header->num_expiries += loop;
+ return loop;
+}
+
+/* Inserts a new encoded session into a queue/cache pair - expiring
+ * (early or otherwise) any leading sessions as necessary to ensure
+ * there is room. An error return (FALSE) should only happen in the
+ * event of surreal values being passed on, or ridiculously small
+ * cache sizes. NB: For tracing purposes, this function is also given
+ * the server_rec to allow "ssl_log()". */
+static BOOL shmcb_insert_encoded_session(
+ server_rec *s, SHMCBQueue * queue,
+ SHMCBCache * cache,
+ unsigned char *encoded,
+ unsigned int encoded_len,
+ unsigned char *session_id,
+ time_t expiry_time)
+{
+ SHMCBHeader *header;
+ SHMCBIndex *idx = NULL;
+ unsigned int gap, new_pos, loop, new_offset;
+ int need;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "entering shmcb_insert_encoded_session, "
+ "*queue->pos_count = %u",
+ shmcb_get_safe_uint(queue->pos_count));
+
+ /* If there's entries to expire, ditch them first thing. */
+ shmcb_expire_division(s, queue, cache);
+ header = cache->header;
+ gap = header->cache_data_size - shmcb_get_safe_uint(cache->pos_count);
+ if (gap < encoded_len) {
+ new_pos = shmcb_get_safe_uint(queue->first_pos);
+ loop = 0;
+ need = (int) encoded_len - (int) gap;
+ while ((need > 0) && (loop + 1 < shmcb_get_safe_uint(queue->pos_count))) {
+ new_pos = shmcb_cyclic_increment(header->index_num, new_pos, 1);
+ loop += 1;
+ idx = shmcb_get_index(queue, new_pos);
+ need = (int) encoded_len - (int) gap -
+ shmcb_cyclic_space(header->cache_data_size,
+ shmcb_get_safe_uint(cache->first_pos),
+ shmcb_get_safe_uint(&(idx->offset)));
+ }
+ if (loop > 0) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "about to scroll %u sessions from %u",
+ loop, shmcb_get_safe_uint(queue->pos_count));
+ /* We are removing "loop" items from the cache. */
+ shmcb_set_safe_uint(cache->pos_count,
+ shmcb_get_safe_uint(cache->pos_count) -
+ shmcb_cyclic_space(header->cache_data_size,
+ shmcb_get_safe_uint(cache->first_pos),
+ shmcb_get_safe_uint(&(idx->offset))));
+ shmcb_set_safe_uint(cache->first_pos, shmcb_get_safe_uint(&(idx->offset)));
+ shmcb_set_safe_uint(queue->pos_count, shmcb_get_safe_uint(queue->pos_count) - loop);
+ shmcb_set_safe_uint(queue->first_pos, new_pos);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "now only have %u sessions",
+ shmcb_get_safe_uint(queue->pos_count));
+ /* Update the stats!!! */
+ header->num_scrolled += loop;
+ }
+ }
+
+ /* probably unecessary checks, but I'll leave them until this code
+ * is verified. */
+ if (shmcb_get_safe_uint(cache->pos_count) + encoded_len >
+ header->cache_data_size) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "shmcb_insert_encoded_session internal error");
+ return FALSE;
+ }
+ if (shmcb_get_safe_uint(queue->pos_count) == header->index_num) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "shmcb_insert_encoded_session internal error");
+ return FALSE;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "we have %u bytes and %u indexes free - enough",
+ header->cache_data_size -
+ shmcb_get_safe_uint(cache->pos_count), header->index_num -
+ shmcb_get_safe_uint(queue->pos_count));
+
+
+ /* HERE WE ASSUME THAT THE NEW SESSION SHOULD GO ON THE END! I'M NOT
+ * CHECKING WHETHER IT SHOULD BE GENUINELY "INSERTED" SOMEWHERE.
+ *
+ * We either fix that, or find out at a "higher" (read "mod_ssl")
+ * level whether it is possible to have distinct session caches for
+ * any attempted tomfoolery to do with different session timeouts.
+ * Knowing in advance that we can have a cache-wide constant timeout
+ * would make this stuff *MUCH* more efficient. Mind you, it's very
+ * efficient right now because I'm ignoring this problem!!!
+ */
+
+ /* Increment to the first unused byte */
+ new_offset = shmcb_cyclic_increment(header->cache_data_size,
+ shmcb_get_safe_uint(cache->first_pos),
+ shmcb_get_safe_uint(cache->pos_count));
+ /* Copy the DER-encoded session into place */
+ shmcb_cyclic_ntoc_memcpy(header->cache_data_size, cache->data,
+ new_offset, encoded, encoded_len);
+ /* Get the new index that this session is stored in. */
+ new_pos = shmcb_cyclic_increment(header->index_num,
+ shmcb_get_safe_uint(queue->first_pos),
+ shmcb_get_safe_uint(queue->pos_count));
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "storing in index %u, at offset %u",
+ new_pos, new_offset);
+ idx = shmcb_get_index(queue, new_pos);
+ if (idx == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "shmcb_insert_encoded_session internal error");
+ return FALSE;
+ }
+ shmcb_safe_clear(idx, sizeof(SHMCBIndex));
+ shmcb_set_safe_time(&(idx->expires), expiry_time);
+ shmcb_set_safe_uint(&(idx->offset), new_offset);
+
+ /* idx->removed = (unsigned char)0; */ /* Not needed given the memset above. */
+ idx->s_id2 = session_id[1];
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "session_id[0]=%u, idx->s_id2=%u",
+ session_id[0], session_id[1]);
+
+ /* All that remains is to adjust the cache's and queue's "pos_count"s. */
+ shmcb_set_safe_uint(cache->pos_count,
+ shmcb_get_safe_uint(cache->pos_count) + encoded_len);
+ shmcb_set_safe_uint(queue->pos_count,
+ shmcb_get_safe_uint(queue->pos_count) + 1);
+
+ /* And just for good debugging measure ... */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "leaving now with %u bytes in the cache and %u indexes",
+ shmcb_get_safe_uint(cache->pos_count),
+ shmcb_get_safe_uint(queue->pos_count));
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "leaving shmcb_insert_encoded_session");
+ return TRUE;
+}
+
+/* Performs a lookup into a queue/cache pair for a
+ * session_id. If found, the session is deserialised
+ * and returned, otherwise NULL. */
+static SSL_SESSION *shmcb_lookup_session_id(
+ server_rec *s, SHMCBQueue *queue,
+ SHMCBCache *cache, UCHAR *id,
+ unsigned int idlen)
+{
+ unsigned char tempasn[SSL_SESSION_MAX_DER];
+ SHMCBIndex *idx;
+ SHMCBHeader *header;
+ SSL_SESSION *pSession = NULL;
+ unsigned int curr_pos, loop, count;
+ MODSSL_D2I_SSL_SESSION_CONST unsigned char *ptr;
+ time_t now;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "entering shmcb_lookup_session_id");
+
+ /* If there are entries to expire, ditch them first thing. */
+ shmcb_expire_division(s, queue, cache);
+ now = time(NULL);
+ curr_pos = shmcb_get_safe_uint(queue->first_pos);
+ count = shmcb_get_safe_uint(queue->pos_count);
+ header = queue->header;
+ for (loop = 0; loop < count; loop++) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "loop=%u, count=%u, curr_pos=%u",
+ loop, count, curr_pos);
+ idx = shmcb_get_index(queue, curr_pos);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "idx->s_id2=%u, id[1]=%u, offset=%u",
+ idx->s_id2, id[1], shmcb_get_safe_uint(&(idx->offset)));
+ /* Only look into the session further if;
+ * (a) the second byte of the session_id matches,
+ * (b) the "removed" flag isn't set,
+ * (c) the session hasn't expired yet.
+ * We do (c) like this so that it saves us having to
+ * do natural expiries ... naturally expired sessions
+ * scroll off the front anyway when the cache is full and
+ * "rotating", the only real issue that remains is the
+ * removal or disabling of forcibly killed sessions. */
+ if ((idx->s_id2 == id[1]) && !idx->removed &&
+ (shmcb_get_safe_time(&(idx->expires)) > now)) {
+ unsigned int session_id_length;
+ unsigned char *session_id;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "at index %u, found possible session match",
+ curr_pos);
+ shmcb_cyclic_cton_memcpy(header->cache_data_size,
+ tempasn, cache->data,
+ shmcb_get_safe_uint(&(idx->offset)),
+ SSL_SESSION_MAX_DER);
+ ptr = tempasn;
+ pSession = d2i_SSL_SESSION(NULL, &ptr, SSL_SESSION_MAX_DER);
+ session_id_length = SSL_SESSION_get_session_id_length(pSession);
+ session_id = SSL_SESSION_get_session_id(pSession);
+
+ if (pSession == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "scach2_lookup_session_id internal error");
+ return NULL;
+ }
+ if ((session_id_length == idlen) &&
+ (memcmp(session_id, id, idlen) == 0)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "a match!");
+ return pSession;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "not a match");
+ SSL_SESSION_free(pSession);
+ pSession = NULL;
+ }
+ curr_pos = shmcb_cyclic_increment(header->index_num, curr_pos, 1);
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "no matching sessions were found");
+ return NULL;
+}
+
+static BOOL shmcb_remove_session_id(
+ server_rec *s, SHMCBQueue *queue,
+ SHMCBCache *cache, UCHAR *id, unsigned int idlen)
+{
+ unsigned char tempasn[SSL_SESSION_MAX_DER];
+ SSL_SESSION *pSession = NULL;
+ SHMCBIndex *idx;
+ SHMCBHeader *header;
+ unsigned int curr_pos, loop, count;
+ MODSSL_D2I_SSL_SESSION_CONST unsigned char *ptr;
+ BOOL to_return = FALSE;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "entering shmcb_remove_session_id");
+
+ /* If there's entries to expire, ditch them first thing. */
+ /* shmcb_expire_division(s, queue, cache); */
+
+ /* Regarding the above ... hmmm ... I know my expiry code is slightly
+ * "faster" than all this remove stuff ... but if the higher level
+ * code calls a "remove" operation (and this *only* seems to happen
+ * when it has spotted an expired session before we had a chance to)
+ * then it should get credit for a remove (stats-wise). Also, in the
+ * off-chance that the server *requests* a renegotiate and wants to
+ * wipe the session clean we should give that priority over our own
+ * routine expiry handling. So I've moved the expiry check to *after*
+ * this general remove stuff. */
+ curr_pos = shmcb_get_safe_uint(queue->first_pos);
+ count = shmcb_get_safe_uint(queue->pos_count);
+ header = cache->header;
+ for (loop = 0; loop < count; loop++) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "loop=%u, count=%u, curr_pos=%u",
+ loop, count, curr_pos);
+ idx = shmcb_get_index(queue, curr_pos);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "idx->s_id2=%u, id[1]=%u", idx->s_id2,
+ id[1]);
+ /* Only look into the session further if the second byte of the
+ * session_id matches. */
+ if (idx->s_id2 == id[1]) {
+ unsigned int session_id_length;
+ unsigned char *session_id;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "at index %u, found possible "
+ "session match", curr_pos);
+ shmcb_cyclic_cton_memcpy(header->cache_data_size,
+ tempasn, cache->data,
+ shmcb_get_safe_uint(&(idx->offset)),
+ SSL_SESSION_MAX_DER);
+ ptr = tempasn;
+ pSession = d2i_SSL_SESSION(NULL, &ptr, SSL_SESSION_MAX_DER);
+ if (pSession == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "shmcb_remove_session_id, internal error");
+ goto end;
+ }
+ session_id_length = SSL_SESSION_get_session_id_length(pSession);
+ session_id = SSL_SESSION_get_session_id(pSession);
+
+ if ((session_id_length == idlen)
+ && (memcmp(id, session_id, idlen) == 0)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "a match!");
+ /* Scrub out this session "quietly" */
+ idx->removed = (unsigned char) 1;
+ SSL_SESSION_free(pSession);
+ to_return = TRUE;
+ goto end;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "not a match");
+ SSL_SESSION_free(pSession);
+ pSession = NULL;
+ }
+ curr_pos = shmcb_cyclic_increment(header->index_num, curr_pos, 1);
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "no matching sessions were found");
+
+ /* If there's entries to expire, ditch them now. */
+ shmcb_expire_division(s, queue, cache);
+end:
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "leaving shmcb_remove_session_id");
+ return to_return;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache_shmht.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache_shmht.c
new file mode 100644
index 00000000..28def647
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_scache_shmht.c
@@ -0,0 +1,351 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_scache_shmht.c
+ * Session Cache via Shared Memory (Hash Table Variant)
+ */
+
+#include "mod_ssl.h"
+
+/*
+ * Wrapper functions for table library which resemble malloc(3) & Co
+ * but use the variants from the MM shared memory library.
+ */
+
+static void *ssl_scache_shmht_malloc(void *opt_param, size_t size)
+{
+ SSLModConfigRec *mc = myModConfig((server_rec *)opt_param);
+
+ apr_rmm_off_t off = apr_rmm_calloc(mc->pSessionCacheDataRMM, size);
+ return apr_rmm_addr_get(mc->pSessionCacheDataRMM, off);
+}
+
+static void *ssl_scache_shmht_calloc(void *opt_param,
+ size_t number, size_t size)
+{
+ SSLModConfigRec *mc = myModConfig((server_rec *)opt_param);
+
+ apr_rmm_off_t off = apr_rmm_calloc(mc->pSessionCacheDataRMM, (number*size));
+
+ return apr_rmm_addr_get(mc->pSessionCacheDataRMM, off);
+}
+
+static void *ssl_scache_shmht_realloc(void *opt_param, void *ptr, size_t size)
+{
+ SSLModConfigRec *mc = myModConfig((server_rec *)opt_param);
+
+ apr_rmm_off_t off = apr_rmm_realloc(mc->pSessionCacheDataRMM, ptr, size);
+ return apr_rmm_addr_get(mc->pSessionCacheDataRMM, off);
+}
+
+static void ssl_scache_shmht_free(void *opt_param, void *ptr)
+{
+ SSLModConfigRec *mc = myModConfig((server_rec *)opt_param);
+
+ apr_rmm_off_t off = apr_rmm_offset_get(mc->pSessionCacheDataRMM, ptr);
+ apr_rmm_free(mc->pSessionCacheDataRMM, off);
+ return;
+}
+
+/*
+ * Now the actual session cache implementation
+ * based on a hash table inside a shared memory segment.
+ */
+
+void ssl_scache_shmht_init(server_rec *s, apr_pool_t *p)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ table_t *ta;
+ int ta_errno;
+ apr_size_t avail;
+ int n;
+ apr_status_t rv;
+
+ /*
+ * Create shared memory segment
+ */
+ if (mc->szSessionCacheDataFile == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "SSLSessionCache required");
+ ssl_die();
+ }
+
+ if ((rv = apr_shm_create(&(mc->pSessionCacheDataMM),
+ mc->nSessionCacheDataSize,
+ mc->szSessionCacheDataFile, mc->pPool)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Cannot allocate shared memory");
+ ssl_die();
+ }
+
+ if ((rv = apr_rmm_init(&(mc->pSessionCacheDataRMM), NULL,
+ apr_shm_baseaddr_get(mc->pSessionCacheDataMM),
+ mc->nSessionCacheDataSize, mc->pPool)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ "Cannot initialize rmm");
+ ssl_die();
+ }
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "initialize MM %pp RMM %pp",
+ mc->pSessionCacheDataMM, mc->pSessionCacheDataRMM);
+
+ /*
+ * Create hash table in shared memory segment
+ */
+ avail = mc->nSessionCacheDataSize;
+ n = (avail/2) / 1024;
+ n = n < 10 ? 10 : n;
+
+ /*
+ * Passing server_rec as opt_param to table_alloc so that we can do
+ * logging if required ssl_util_table. Otherwise, mc is sufficient.
+ */
+ if ((ta = table_alloc(n, &ta_errno,
+ ssl_scache_shmht_malloc,
+ ssl_scache_shmht_calloc,
+ ssl_scache_shmht_realloc,
+ ssl_scache_shmht_free, s )) == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
+ "Cannot allocate hash table in shared memory: %s",
+ table_strerror(ta_errno));
+ ssl_die();
+ }
+
+ table_attr(ta, TABLE_FLAG_AUTO_ADJUST|TABLE_FLAG_ADJUST_DOWN);
+ table_set_data_alignment(ta, sizeof(char *));
+ table_clear(ta);
+ mc->tSessionCacheDataTable = ta;
+
+ /*
+ * Log the done work
+ */
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "Init: Created hash-table (%d buckets) "
+ "in shared memory (%" APR_SIZE_T_FMT
+ " bytes) for SSL session cache",
+ n, avail);
+ return;
+}
+
+void ssl_scache_shmht_kill(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+
+ if (mc->pSessionCacheDataRMM != NULL) {
+ apr_rmm_destroy(mc->pSessionCacheDataRMM);
+ mc->pSessionCacheDataRMM = NULL;
+ }
+
+ if (mc->pSessionCacheDataMM != NULL) {
+ apr_shm_destroy(mc->pSessionCacheDataMM);
+ mc->pSessionCacheDataMM = NULL;
+ }
+ return;
+}
+
+BOOL ssl_scache_shmht_store(server_rec *s, UCHAR *id, int idlen, time_t expiry, SSL_SESSION *sess)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ void *vp;
+ UCHAR ucaData[SSL_SESSION_MAX_DER];
+ int nData;
+ UCHAR *ucp;
+
+ /* streamline session data */
+ if ((nData = i2d_SSL_SESSION(sess, NULL)) > sizeof(ucaData))
+ return FALSE;
+ ucp = ucaData;
+ i2d_SSL_SESSION(sess, &ucp);
+
+ ssl_mutex_on(s);
+ if (table_insert_kd(mc->tSessionCacheDataTable,
+ id, idlen, NULL, sizeof(time_t)+nData,
+ NULL, &vp, 1) != TABLE_ERROR_NONE) {
+ ssl_mutex_off(s);
+ return FALSE;
+ }
+ memcpy(vp, &expiry, sizeof(time_t));
+ memcpy((char *)vp+sizeof(time_t), ucaData, nData);
+ ssl_mutex_off(s);
+
+ /* allow the regular expiring to occur */
+ ssl_scache_shmht_expire(s);
+
+ return TRUE;
+}
+
+SSL_SESSION *ssl_scache_shmht_retrieve(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ void *vp;
+ SSL_SESSION *sess = NULL;
+ MODSSL_D2I_SSL_SESSION_CONST UCHAR *ucpData;
+ int nData;
+ time_t expiry;
+ time_t now;
+ int n;
+
+ /* allow the regular expiring to occur */
+ ssl_scache_shmht_expire(s);
+
+ /* lookup key in table */
+ ssl_mutex_on(s);
+ if (table_retrieve(mc->tSessionCacheDataTable,
+ id, idlen, &vp, &n) != TABLE_ERROR_NONE) {
+ ssl_mutex_off(s);
+ return NULL;
+ }
+
+ /* copy over the information to the SCI */
+ nData = n-sizeof(time_t);
+ ucpData = (UCHAR *)malloc(nData);
+ if (ucpData == NULL) {
+ ssl_mutex_off(s);
+ return NULL;
+ }
+ memcpy(&expiry, vp, sizeof(time_t));
+ memcpy((void *)ucpData, (char *)vp+sizeof(time_t), nData);
+ ssl_mutex_off(s);
+
+ /* make sure the stuff is still not expired */
+ now = time(NULL);
+ if (expiry <= now) {
+ ssl_scache_shmht_remove(s, id, idlen);
+ return NULL;
+ }
+
+ /* unstreamed SSL_SESSION */
+ sess = d2i_SSL_SESSION(NULL, &ucpData, nData);
+
+ return sess;
+}
+
+void ssl_scache_shmht_remove(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+
+ /* remove value under key in table */
+ ssl_mutex_on(s);
+ table_delete(mc->tSessionCacheDataTable, id, idlen, NULL, NULL);
+ ssl_mutex_off(s);
+ return;
+}
+
+void ssl_scache_shmht_expire(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ SSLSrvConfigRec *sc = mySrvConfig(s);
+ static time_t tLast = 0;
+ table_linear_t iterator;
+ time_t tExpiresAt;
+ void *vpKey;
+ void *vpKeyThis;
+ void *vpData;
+ int nKey;
+ int nKeyThis;
+ int nData;
+ int nElements = 0;
+ int nDeleted = 0;
+ int bDelete;
+ int rc;
+ time_t tNow;
+
+ /*
+ * make sure the expiration for still not-accessed session
+ * cache entries is done only from time to time
+ */
+ tNow = time(NULL);
+ if (tNow < tLast+sc->session_cache_timeout)
+ return;
+ tLast = tNow;
+
+ ssl_mutex_on(s);
+ if (table_first_r(mc->tSessionCacheDataTable, &iterator,
+ &vpKey, &nKey, &vpData, &nData) == TABLE_ERROR_NONE) {
+ do {
+ bDelete = FALSE;
+ nElements++;
+ if (nData < sizeof(time_t) || vpData == NULL)
+ bDelete = TRUE;
+ else {
+ memcpy(&tExpiresAt, vpData, sizeof(time_t));
+ /*
+ * XXX : Force the record to be cleaned up. TBD (Madhu)
+ * tExpiresAt = tNow;
+ */
+ if (tExpiresAt <= tNow)
+ bDelete = TRUE;
+ }
+ vpKeyThis = vpKey;
+ nKeyThis = nKey;
+ rc = table_next_r(mc->tSessionCacheDataTable, &iterator,
+ &vpKey, &nKey, &vpData, &nData);
+ if (bDelete) {
+ table_delete(mc->tSessionCacheDataTable,
+ vpKeyThis, nKeyThis, NULL, NULL);
+ nDeleted++;
+ }
+ } while (rc == TABLE_ERROR_NONE);
+ /* (vpKeyThis != vpKey) && (nKeyThis != nKey) */
+ }
+ ssl_mutex_off(s);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Inter-Process Session Cache (SHMHT) Expiry: "
+ "old: %d, new: %d, removed: %d",
+ nElements, nElements-nDeleted, nDeleted);
+ return;
+}
+
+void ssl_scache_shmht_status(server_rec *s, apr_pool_t *p, void (*func)(char *, void *), void *arg)
+{
+ SSLModConfigRec *mc = myModConfig(s);
+ void *vpKey;
+ void *vpData;
+ int nKey;
+ int nData;
+ int nElem;
+ int nSize;
+ int nAverage;
+
+ nElem = 0;
+ nSize = 0;
+ ssl_mutex_on(s);
+ if (table_first(mc->tSessionCacheDataTable,
+ &vpKey, &nKey, &vpData, &nData) == TABLE_ERROR_NONE) {
+ do {
+ if (vpKey == NULL || vpData == NULL)
+ continue;
+ nElem += 1;
+ nSize += nData;
+ } while (table_next(mc->tSessionCacheDataTable,
+ &vpKey, &nKey, &vpData, &nData) == TABLE_ERROR_NONE);
+ }
+ ssl_mutex_off(s);
+ if (nSize > 0 && nElem > 0)
+ nAverage = nSize / nElem;
+ else
+ nAverage = 0;
+ func(apr_psprintf(p, "cache type: <b>SHMHT</b>, maximum size: <b>%d</b> bytes<br>", mc->nSessionCacheDataSize), arg);
+ func(apr_psprintf(p, "current sessions: <b>%d</b>, current size: <b>%d</b> bytes<br>", nElem, nSize), arg);
+ func(apr_psprintf(p, "average session size: <b>%d</b> bytes<br>", nAverage), arg);
+ return;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_toolkit_compat.h b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_toolkit_compat.h
new file mode 100644
index 00000000..72772150
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_toolkit_compat.h
@@ -0,0 +1,239 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SSL_TOOLKIT_COMPAT_H
+#define SSL_TOOLKIT_COMPAT_H
+
+/*
+ * this header file provides a compatiblity layer
+ * between OpenSSL and RSA sslc
+ */
+
+#ifdef OPENSSL_VERSION_NUMBER
+
+/*
+ * rsa sslc uses incomplete types for most structures
+ * so we macroize for OpenSSL those which cannot be dereferenced
+ * using the same sames as the sslc functions
+ */
+
+#define EVP_PKEY_key_type(k) (EVP_PKEY_type(k->type))
+
+#define X509_NAME_get_entries(xs) (xs->entries)
+#define X509_REVOKED_get_serialNumber(xs) (xs->serialNumber)
+
+#define X509_get_signature_algorithm(xs) (xs->cert_info->signature->algorithm)
+#define X509_get_key_algorithm(xs) (xs->cert_info->key->algor->algorithm)
+
+#define X509_NAME_ENTRY_get_data_ptr(xs) (xs->value->data)
+#define X509_NAME_ENTRY_get_data_len(xs) (xs->value->length)
+
+#define SSL_CTX_get_extra_certs(ctx) (ctx->extra_certs)
+#define SSL_CTX_set_extra_certs(ctx,value) {ctx->extra_certs = value;}
+
+#define SSL_CIPHER_get_name(s) (s->name)
+#define SSL_CIPHER_get_valid(s) (s->valid)
+
+#define SSL_SESSION_get_session_id(s) (s->session_id)
+#define SSL_SESSION_get_session_id_length(s) (s->session_id_length)
+
+/*
+ * Support for retrieving/overriding states
+ */
+#ifndef SSL_get_state
+#define SSL_get_state(ssl) SSL_state(ssl)
+#endif
+
+#define SSL_set_state(ssl,val) (ssl)->state = val
+
+#define MODSSL_BIO_CB_ARG_TYPE const char
+#define MODSSL_CRYPTO_CB_ARG_TYPE const char
+#if (OPENSSL_VERSION_NUMBER < 0x00907000)
+#define MODSSL_INFO_CB_ARG_TYPE SSL*
+#else
+#define MODSSL_INFO_CB_ARG_TYPE const SSL*
+#endif
+#define MODSSL_CLIENT_CERT_CB_ARG_TYPE X509
+#define MODSSL_PCHAR_CAST
+
+/* ...shifting sands of openssl... */
+#if (OPENSSL_VERSION_NUMBER >= 0x0090707f)
+# define MODSSL_D2I_SSL_SESSION_CONST const
+#else
+# define MODSSL_D2I_SSL_SESSION_CONST
+#endif
+
+#if (OPENSSL_VERSION_NUMBER >= 0x00908000)
+# define MODSSL_D2I_PrivateKey_CONST const
+# define MODSSL_D2I_X509_CONST const
+#else
+# define MODSSL_D2I_PrivateKey_CONST
+# define MODSSL_D2I_X509_CONST
+#endif
+
+#define modssl_X509_verify_cert X509_verify_cert
+
+typedef int (modssl_read_bio_cb_fn)(char*,int,int,void*);
+
+#if (OPENSSL_VERSION_NUMBER < 0x00904000)
+#define modssl_PEM_read_bio_X509(b, x, cb, arg) PEM_read_bio_X509(b, x, cb)
+#else
+#define modssl_PEM_read_bio_X509(b, x, cb, arg) PEM_read_bio_X509(b, x, cb, arg)
+#endif
+
+#define modssl_PEM_X509_INFO_read_bio PEM_X509_INFO_read_bio
+
+#define modssl_PEM_read_bio_PrivateKey PEM_read_bio_PrivateKey
+
+#define modssl_set_cipher_list SSL_set_cipher_list
+
+#define modssl_free OPENSSL_free
+
+#define EVP_PKEY_reference_inc(pkey) \
+ CRYPTO_add(&((pkey)->references), +1, CRYPTO_LOCK_X509_PKEY)
+
+#define X509_reference_inc(cert) \
+ CRYPTO_add(&((cert)->references), +1, CRYPTO_LOCK_X509)
+
+#define HAVE_SSL_RAND_EGD /* since 9.5.1 */
+
+#ifdef HAVE_SSL_X509V3_H
+#define HAVE_SSL_X509V3_EXT_d2i
+#endif
+
+#ifndef PEM_F_DEF_CALLBACK
+#ifdef PEM_F_PEM_DEF_CALLBACK
+/* In OpenSSL 0.9.8 PEM_F_DEF_CALLBACK was renamed */
+#define PEM_F_DEF_CALLBACK PEM_F_PEM_DEF_CALLBACK
+#endif
+#endif
+
+#elif defined (SSLC_VERSION_NUMBER) /* RSA */
+
+/* sslc does not support this function, OpenSSL has since 9.5.1 */
+#define RAND_status() 1
+
+/* sslc names this function a bit differently */
+#define CRYPTO_num_locks() CRYPTO_get_num_locks()
+
+#ifndef STACK_OF
+#define STACK_OF(type) STACK
+#endif
+
+#define MODSSL_BIO_CB_ARG_TYPE char
+#define MODSSL_CRYPTO_CB_ARG_TYPE char
+#define MODSSL_INFO_CB_ARG_TYPE SSL*
+#define MODSSL_CLIENT_CERT_CB_ARG_TYPE void
+#define MODSSL_PCHAR_CAST (char *)
+#define MODSSL_D2I_SSL_SESSION_CONST
+#define MODSSL_D2I_PrivateKey_CONST
+#define MODSSL_D2I_X509_CONST
+
+typedef int (modssl_read_bio_cb_fn)(char*,int,int);
+
+#define modssl_X509_verify_cert(c) X509_verify_cert(c, NULL)
+
+#define modssl_PEM_read_bio_X509(b, x, cb, arg) \
+ PEM_read_bio_X509(b, x, cb)
+
+#define modssl_PEM_X509_INFO_read_bio(b, x, cb, arg)\
+ PEM_X509_INFO_read_bio(b, x, cb)
+
+#define modssl_PEM_read_bio_PrivateKey(b, k, cb, arg) \
+ PEM_read_bio_PrivateKey(b, k, cb)
+
+#ifndef HAVE_SSL_SET_STATE
+#define SSL_set_state(ssl, state) /* XXX: should throw an error */
+#endif
+
+#define modssl_set_cipher_list(ssl, l) \
+ SSL_set_cipher_list(ssl, (char *)l)
+
+#define modssl_free free
+
+#ifndef PEM_F_DEF_CALLBACK
+#define PEM_F_DEF_CALLBACK PEM_F_DEF_CB
+#endif
+
+#if SSLC_VERSION_NUMBER < 0x2000
+
+#define X509_STORE_CTX_set_depth(st, d)
+#define X509_CRL_get_lastUpdate(x) ((x)->crl->lastUpdate)
+#define X509_CRL_get_nextUpdate(x) ((x)->crl->nextUpdate)
+#define X509_CRL_get_REVOKED(x) ((x)->crl->revoked)
+#define X509_REVOKED_get_serialNumber(xs) (xs->serialNumber)
+
+#define modssl_set_verify(ssl, verify, cb) \
+ SSL_set_verify(ssl, verify)
+
+#define NO_SSL_X509V3_H
+
+#else /* SSLC_VERSION_NUMBER >= 0x2000 */
+
+#define CRYPTO_malloc_init R_malloc_init
+
+#define EVP_cleanup()
+
+#endif /* SSLC_VERSION_NUMBER >= 0x2000 */
+
+typedef void (*modssl_popfree_fn)(char *data);
+
+#define sk_SSL_CIPHER_dup sk_dup
+#define sk_SSL_CIPHER_find(st, data) sk_find(st, (void *)data)
+#define sk_SSL_CIPHER_free sk_free
+#define sk_SSL_CIPHER_num sk_num
+#define sk_SSL_CIPHER_value (SSL_CIPHER *)sk_value
+#define sk_X509_num sk_num
+#define sk_X509_push sk_push
+#define sk_X509_pop_free(st, free) sk_pop_free((STACK*)(st), (modssl_popfree_fn)(free))
+#define sk_X509_value (X509 *)sk_value
+#define sk_X509_INFO_free sk_free
+#define sk_X509_INFO_pop_free(st, free) sk_pop_free((STACK*)(st), (modssl_popfree_fn)(free))
+#define sk_X509_INFO_num sk_num
+#define sk_X509_INFO_new_null sk_new_null
+#define sk_X509_INFO_value (X509_INFO *)sk_value
+#define sk_X509_NAME_find(st, data) sk_find(st, (void *)data)
+#define sk_X509_NAME_free sk_free
+#define sk_X509_NAME_new sk_new
+#define sk_X509_NAME_num sk_num
+#define sk_X509_NAME_push(st, data) sk_push(st, (void *)data)
+#define sk_X509_NAME_value (X509_NAME *)sk_value
+#define sk_X509_NAME_ENTRY_num sk_num
+#define sk_X509_NAME_ENTRY_value (X509_NAME_ENTRY *)sk_value
+#define sk_X509_NAME_set_cmp_func sk_set_cmp_func
+#define sk_X509_REVOKED_num sk_num
+#define sk_X509_REVOKED_value (X509_REVOKED *)sk_value
+
+#else /* ! OPENSSL_VERSION_NUMBER && ! SSLC_VERSION_NUMBER */
+
+#error "Unrecognized SSL Toolkit!"
+
+#endif /* ! OPENSSL_VERSION_NUMBER && ! SSLC_VERSION_NUMBER */
+
+#ifndef modssl_set_verify
+#define modssl_set_verify(ssl, verify, cb) \
+ SSL_set_verify(ssl, verify, cb)
+#endif
+
+#ifndef NO_SSL_X509V3_H
+#define HAVE_SSL_X509V3_H
+#endif
+
+#ifndef SSL_SESS_CACHE_NO_INTERNAL
+#define SSL_SESS_CACHE_NO_INTERNAL SSL_SESS_CACHE_NO_INTERNAL_LOOKUP
+#endif
+
+#endif /* SSL_TOOLKIT_COMPAT_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util.c
new file mode 100644
index 00000000..f1319547
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util.c
@@ -0,0 +1,449 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_util.c
+ * Utility Functions
+ */
+ /* ``Every day of my life
+ I am forced to add another
+ name to the list of people
+ who piss me off!''
+ -- Calvin */
+
+#include "mod_ssl.h"
+#include "ap_mpm.h"
+#include "apr_thread_mutex.h"
+
+/* _________________________________________________________________
+**
+** Utility Functions
+** _________________________________________________________________
+*/
+
+char *ssl_util_vhostid(apr_pool_t *p, server_rec *s)
+{
+ char *id;
+ SSLSrvConfigRec *sc;
+ char *host;
+ apr_port_t port;
+
+ host = s->server_hostname;
+ if (s->port != 0)
+ port = s->port;
+ else {
+ sc = mySrvConfig(s);
+ if (sc->enabled)
+ port = DEFAULT_HTTPS_PORT;
+ else
+ port = DEFAULT_HTTP_PORT;
+ }
+ id = apr_psprintf(p, "%s:%lu", host, (unsigned long)port);
+ return id;
+}
+
+void ssl_util_strupper(char *s)
+{
+ for (; *s; ++s)
+ *s = apr_toupper(*s);
+ return;
+}
+
+static const char ssl_util_uuencode_six2pr[64+1] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+void ssl_util_uuencode(char *szTo, const char *szFrom, BOOL bPad)
+{
+ ssl_util_uuencode_binary((unsigned char *)szTo,
+ (const unsigned char *)szFrom,
+ strlen(szFrom), bPad);
+}
+
+void ssl_util_uuencode_binary(unsigned char *szTo,
+ const unsigned char *szFrom,
+ int nLength, BOOL bPad)
+{
+ const unsigned char *s;
+ int nPad = 0;
+
+ for (s = szFrom; nLength > 0; s += 3) {
+ *szTo++ = ssl_util_uuencode_six2pr[s[0] >> 2];
+ *szTo++ = ssl_util_uuencode_six2pr[(s[0] << 4 | s[1] >> 4) & 0x3f];
+ if (--nLength == 0) {
+ nPad = 2;
+ break;
+ }
+ *szTo++ = ssl_util_uuencode_six2pr[(s[1] << 2 | s[2] >> 6) & 0x3f];
+ if (--nLength == 0) {
+ nPad = 1;
+ break;
+ }
+ *szTo++ = ssl_util_uuencode_six2pr[s[2] & 0x3f];
+ --nLength;
+ }
+ while(bPad && nPad--) {
+ *szTo++ = NUL;
+ }
+ *szTo = NUL;
+ return;
+}
+
+apr_file_t *ssl_util_ppopen(server_rec *s, apr_pool_t *p, const char *cmd,
+ const char * const *argv)
+{
+ apr_procattr_t *procattr;
+ apr_proc_t *proc;
+
+ if (apr_procattr_create(&procattr, p) != APR_SUCCESS)
+ return NULL;
+ if (apr_procattr_io_set(procattr, APR_FULL_BLOCK, APR_FULL_BLOCK,
+ APR_FULL_BLOCK) != APR_SUCCESS)
+ return NULL;
+ if (apr_procattr_dir_set(procattr,
+ ap_make_dirstr_parent(p, cmd)) != APR_SUCCESS)
+ return NULL;
+ if (apr_procattr_cmdtype_set(procattr, APR_PROGRAM) != APR_SUCCESS)
+ return NULL;
+ if ((proc = (apr_proc_t *)apr_pcalloc(p, sizeof(apr_proc_t))) == NULL)
+ return NULL;
+ if (apr_proc_create(proc, cmd, argv, NULL, procattr, p) != APR_SUCCESS)
+ return NULL;
+ return proc->out;
+}
+
+void ssl_util_ppclose(server_rec *s, apr_pool_t *p, apr_file_t *fp)
+{
+ apr_file_close(fp);
+ return;
+}
+
+/*
+ * Run a filter program and read the first line of its stdout output
+ */
+char *ssl_util_readfilter(server_rec *s, apr_pool_t *p, const char *cmd,
+ const char * const *argv)
+{
+ static char buf[MAX_STRING_LEN];
+ apr_file_t *fp;
+ apr_size_t nbytes = 1;
+ char c;
+ int k;
+
+ if ((fp = ssl_util_ppopen(s, p, cmd, argv)) == NULL)
+ return NULL;
+ /* XXX: we are reading 1 byte at a time here */
+ for (k = 0; apr_file_read(fp, &c, &nbytes) == APR_SUCCESS
+ && nbytes == 1 && (k < MAX_STRING_LEN-1) ; ) {
+ if (c == '\n' || c == '\r')
+ break;
+ buf[k++] = c;
+ }
+ buf[k] = NUL;
+ ssl_util_ppclose(s, p, fp);
+
+ return buf;
+}
+
+BOOL ssl_util_path_check(ssl_pathcheck_t pcm, const char *path, apr_pool_t *p)
+{
+ apr_finfo_t finfo;
+
+ if (path == NULL)
+ return FALSE;
+ if (pcm & SSL_PCM_EXISTS && apr_stat(&finfo, path,
+ APR_FINFO_TYPE|APR_FINFO_SIZE, p) != 0)
+ return FALSE;
+ if (pcm & SSL_PCM_ISREG && finfo.filetype != APR_REG)
+ return FALSE;
+ if (pcm & SSL_PCM_ISDIR && finfo.filetype != APR_DIR)
+ return FALSE;
+ if (pcm & SSL_PCM_ISNONZERO && finfo.size <= 0)
+ return FALSE;
+ return TRUE;
+}
+
+ssl_algo_t ssl_util_algotypeof(X509 *pCert, EVP_PKEY *pKey)
+{
+ ssl_algo_t t;
+
+ t = SSL_ALGO_UNKNOWN;
+ if (pCert != NULL)
+ pKey = X509_get_pubkey(pCert);
+ if (pKey != NULL) {
+ switch (EVP_PKEY_key_type(pKey)) {
+ case EVP_PKEY_RSA:
+ t = SSL_ALGO_RSA;
+ break;
+ case EVP_PKEY_DSA:
+ t = SSL_ALGO_DSA;
+ break;
+ default:
+ break;
+ }
+ }
+ return t;
+}
+
+char *ssl_util_algotypestr(ssl_algo_t t)
+{
+ char *cp;
+
+ cp = "UNKNOWN";
+ switch (t) {
+ case SSL_ALGO_RSA:
+ cp = "RSA";
+ break;
+ case SSL_ALGO_DSA:
+ cp = "DSA";
+ break;
+ default:
+ break;
+ }
+ return cp;
+}
+
+char *ssl_util_ptxtsub(apr_pool_t *p, const char *cpLine,
+ const char *cpMatch, char *cpSubst)
+{
+#define MAX_PTXTSUB 100
+ char *cppMatch[MAX_PTXTSUB];
+ char *cpResult;
+ int nResult;
+ int nLine;
+ int nSubst;
+ int nMatch;
+ char *cpI;
+ char *cpO;
+ char *cp;
+ int i;
+
+ /*
+ * Pass 1: find substitution locations and calculate sizes
+ */
+ nLine = strlen(cpLine);
+ nMatch = strlen(cpMatch);
+ nSubst = strlen(cpSubst);
+ for (cpI = (char *)cpLine, i = 0, nResult = 0;
+ cpI < cpLine+nLine && i < MAX_PTXTSUB; ) {
+ if ((cp = strstr(cpI, cpMatch)) != NULL) {
+ cppMatch[i++] = cp;
+ nResult += ((cp-cpI)+nSubst);
+ cpI = (cp+nMatch);
+ }
+ else {
+ nResult += strlen(cpI);
+ break;
+ }
+ }
+ cppMatch[i] = NULL;
+ if (i == 0)
+ return NULL;
+
+ /*
+ * Pass 2: allocate memory and assemble result
+ */
+ cpResult = apr_pcalloc(p, nResult+1);
+ for (cpI = (char *)cpLine, cpO = cpResult, i = 0;
+ cppMatch[i] != NULL;
+ i++) {
+ apr_cpystrn(cpO, cpI, cppMatch[i]-cpI+1);
+ cpO += (cppMatch[i]-cpI);
+ apr_cpystrn(cpO, cpSubst, nSubst+1);
+ cpO += nSubst;
+ cpI = (cppMatch[i]+nMatch);
+ }
+ apr_cpystrn(cpO, cpI, cpResult+nResult-cpO+1);
+
+ return cpResult;
+}
+
+/*
+ * certain key and cert data needs to survive restarts,
+ * which are stored in the user data table of s->process->pool.
+ * to prevent "leaking" of this data, we use malloc/free
+ * rather than apr_palloc and these wrappers to help make sure
+ * we do not leak the malloc-ed data.
+ */
+unsigned char *ssl_asn1_table_set(apr_hash_t *table,
+ const char *key,
+ long int length)
+{
+ apr_ssize_t klen = strlen(key);
+ ssl_asn1_t *asn1 = apr_hash_get(table, key, klen);
+
+ /*
+ * if a value for this key already exists,
+ * reuse as much of the already malloc-ed data
+ * as possible.
+ */
+ if (asn1) {
+ if (asn1->nData != length) {
+ free(asn1->cpData); /* XXX: realloc? */
+ asn1->cpData = NULL;
+ }
+ }
+ else {
+ asn1 = malloc(sizeof(*asn1));
+ asn1->source_mtime = 0; /* used as a note for encrypted private keys */
+ asn1->cpData = NULL;
+ }
+
+ asn1->nData = length;
+ if (!asn1->cpData) {
+ asn1->cpData = malloc(length);
+ }
+
+ apr_hash_set(table, key, klen, asn1);
+
+ return asn1->cpData; /* caller will assign a value to this */
+}
+
+ssl_asn1_t *ssl_asn1_table_get(apr_hash_t *table,
+ const char *key)
+{
+ return (ssl_asn1_t *)apr_hash_get(table, key, APR_HASH_KEY_STRING);
+}
+
+void ssl_asn1_table_unset(apr_hash_t *table,
+ const char *key)
+{
+ apr_ssize_t klen = strlen(key);
+ ssl_asn1_t *asn1 = apr_hash_get(table, key, klen);
+
+ if (!asn1) {
+ return;
+ }
+
+ if (asn1->cpData) {
+ free(asn1->cpData);
+ }
+ free(asn1);
+
+ apr_hash_set(table, key, klen, NULL);
+}
+
+static const char *ssl_asn1_key_types[] = {"RSA", "DSA"};
+
+const char *ssl_asn1_keystr(int keytype)
+{
+ if (keytype >= SSL_AIDX_MAX) {
+ return NULL;
+ }
+
+ return ssl_asn1_key_types[keytype];
+}
+
+const char *ssl_asn1_table_keyfmt(apr_pool_t *p,
+ const char *id,
+ int keytype)
+{
+ const char *keystr = ssl_asn1_keystr(keytype);
+
+ return apr_pstrcat(p, id, ":", keystr, NULL);
+}
+
+
+#if APR_HAS_THREADS
+/*
+ * To ensure thread-safetyness in OpenSSL - work in progress
+ */
+
+static apr_thread_mutex_t **lock_cs;
+static int lock_num_locks;
+
+#ifdef SSLC_VERSION_NUMBER
+#if SSLC_VERSION_NUMBER >= 0x2000
+static int ssl_util_thr_lock(int mode, int type,
+ const char *file, int line)
+#else
+static void ssl_util_thr_lock(int mode, int type,
+ const char *file, int line)
+#endif
+#else
+static void ssl_util_thr_lock(int mode, int type,
+ const char *file, int line)
+#endif
+{
+ if (type < lock_num_locks) {
+ if (mode & CRYPTO_LOCK) {
+ apr_thread_mutex_lock(lock_cs[type]);
+ }
+ else {
+ apr_thread_mutex_unlock(lock_cs[type]);
+ }
+#ifdef SSLC_VERSION_NUMBER
+#if SSLC_VERSION_NUMBER >= 0x2000
+ return 1;
+ }
+ else {
+ return -1;
+#endif
+#endif
+ }
+}
+
+static unsigned long ssl_util_thr_id(void)
+{
+ /* OpenSSL needs this to return an unsigned long. On OS/390, the pthread
+ * id is a structure twice that big. Use the TCB pointer instead as a
+ * unique unsigned long.
+ */
+#ifdef __MVS__
+ struct PSA {
+ char unmapped[540];
+ unsigned long PSATOLD;
+ } *psaptr = 0;
+
+ return psaptr->PSATOLD;
+#else
+ return (unsigned long) apr_os_thread_current();
+#endif
+}
+
+static apr_status_t ssl_util_thread_cleanup(void *data)
+{
+ CRYPTO_set_locking_callback(NULL);
+ CRYPTO_set_id_callback(NULL);
+
+ /* Let the registered mutex cleanups do their own thing
+ */
+ return APR_SUCCESS;
+}
+
+void ssl_util_thread_setup(apr_pool_t *p)
+{
+ int i;
+
+ lock_num_locks = CRYPTO_num_locks();
+ lock_cs = apr_palloc(p, lock_num_locks * sizeof(*lock_cs));
+
+ for (i = 0; i < lock_num_locks; i++) {
+ apr_thread_mutex_create(&(lock_cs[i]), APR_THREAD_MUTEX_DEFAULT, p);
+ }
+
+ CRYPTO_set_id_callback(ssl_util_thr_id);
+
+ CRYPTO_set_locking_callback(ssl_util_thr_lock);
+
+ apr_pool_cleanup_register(p, NULL, ssl_util_thread_cleanup,
+ apr_pool_cleanup_null);
+}
+#endif
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_ssl.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_ssl.c
new file mode 100644
index 00000000..857bc304
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_ssl.c
@@ -0,0 +1,574 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_util_ssl.c
+ * Additional Utility Functions for OpenSSL
+ */
+
+#include "mod_ssl.h"
+
+/* _________________________________________________________________
+**
+** Additional High-Level Functions for OpenSSL
+** _________________________________________________________________
+*/
+
+/* we initialize this index at startup time
+ * and never write to it at request time,
+ * so this static is thread safe.
+ * also note that OpenSSL increments at static variable when
+ * SSL_get_ex_new_index() is called, so we _must_ do this at startup.
+ */
+static int SSL_app_data2_idx = -1;
+
+void SSL_init_app_data2_idx(void)
+{
+ int i;
+
+ if (SSL_app_data2_idx > -1) {
+ return;
+ }
+
+ /* we _do_ need to call this twice */
+ for (i=0; i<=1; i++) {
+ SSL_app_data2_idx =
+ SSL_get_ex_new_index(0,
+ "Second Application Data for SSL",
+ NULL, NULL, NULL);
+ }
+}
+
+void *SSL_get_app_data2(SSL *ssl)
+{
+ return (void *)SSL_get_ex_data(ssl, SSL_app_data2_idx);
+}
+
+void SSL_set_app_data2(SSL *ssl, void *arg)
+{
+ SSL_set_ex_data(ssl, SSL_app_data2_idx, (char *)arg);
+ return;
+}
+
+/* _________________________________________________________________
+**
+** High-Level Certificate / Private Key Loading
+** _________________________________________________________________
+*/
+
+X509 *SSL_read_X509(char* filename, X509 **x509, modssl_read_bio_cb_fn *cb)
+{
+ X509 *rc;
+ BIO *bioS;
+ BIO *bioF;
+
+ /* 1. try PEM (= DER+Base64+headers) */
+ if ((bioS=BIO_new_file(filename, "r")) == NULL)
+ return NULL;
+ rc = modssl_PEM_read_bio_X509 (bioS, x509, cb, NULL);
+ BIO_free(bioS);
+
+ if (rc == NULL) {
+ /* 2. try DER+Base64 */
+ if ((bioS=BIO_new_file(filename, "r")) == NULL)
+ return NULL;
+
+ if ((bioF = BIO_new(BIO_f_base64())) == NULL) {
+ BIO_free(bioS);
+ return NULL;
+ }
+ bioS = BIO_push(bioF, bioS);
+ rc = d2i_X509_bio(bioS, NULL);
+ BIO_free_all(bioS);
+
+ if (rc == NULL) {
+ /* 3. try plain DER */
+ if ((bioS=BIO_new_file(filename, "r")) == NULL)
+ return NULL;
+ rc = d2i_X509_bio(bioS, NULL);
+ BIO_free(bioS);
+ }
+ }
+ if (rc != NULL && x509 != NULL) {
+ if (*x509 != NULL)
+ X509_free(*x509);
+ *x509 = rc;
+ }
+ return rc;
+}
+
+#if SSL_LIBRARY_VERSION <= 0x00904100
+static EVP_PKEY *d2i_PrivateKey_bio(BIO *bio, EVP_PKEY **key)
+{
+ return ((EVP_PKEY *)ASN1_d2i_bio(
+ (char *(*)())EVP_PKEY_new,
+ (char *(*)())d2i_PrivateKey,
+ (bio), (unsigned char **)(key)));
+}
+#endif
+
+EVP_PKEY *SSL_read_PrivateKey(char* filename, EVP_PKEY **key, modssl_read_bio_cb_fn *cb, void *s)
+{
+ EVP_PKEY *rc;
+ BIO *bioS;
+ BIO *bioF;
+
+ /* 1. try PEM (= DER+Base64+headers) */
+ if ((bioS=BIO_new_file(filename, "r")) == NULL)
+ return NULL;
+ rc = modssl_PEM_read_bio_PrivateKey(bioS, key, cb, s);
+ BIO_free(bioS);
+
+ if (rc == NULL) {
+ /* 2. try DER+Base64 */
+ if ((bioS = BIO_new_file(filename, "r")) == NULL)
+ return NULL;
+
+ if ((bioF = BIO_new(BIO_f_base64())) == NULL) {
+ BIO_free(bioS);
+ return NULL;
+ }
+ bioS = BIO_push(bioF, bioS);
+ rc = d2i_PrivateKey_bio(bioS, NULL);
+ BIO_free_all(bioS);
+
+ if (rc == NULL) {
+ /* 3. try plain DER */
+ if ((bioS = BIO_new_file(filename, "r")) == NULL)
+ return NULL;
+ rc = d2i_PrivateKey_bio(bioS, NULL);
+ BIO_free(bioS);
+ }
+ }
+ if (rc != NULL && key != NULL) {
+ if (*key != NULL)
+ EVP_PKEY_free(*key);
+ *key = rc;
+ }
+ return rc;
+}
+
+/* _________________________________________________________________
+**
+** Smart shutdown
+** _________________________________________________________________
+*/
+
+int SSL_smart_shutdown(SSL *ssl)
+{
+ int i;
+ int rc;
+
+ /*
+ * Repeat the calls, because SSL_shutdown internally dispatches through a
+ * little state machine. Usually only one or two interation should be
+ * needed, so we restrict the total number of restrictions in order to
+ * avoid process hangs in case the client played bad with the socket
+ * connection and OpenSSL cannot recognize it.
+ */
+ rc = 0;
+ for (i = 0; i < 4 /* max 2x pending + 2x data = 4 */; i++) {
+ if ((rc = SSL_shutdown(ssl)))
+ break;
+ }
+ return rc;
+}
+
+/* _________________________________________________________________
+**
+** Certificate Revocation List (CRL) Storage
+** _________________________________________________________________
+*/
+
+X509_STORE *SSL_X509_STORE_create(char *cpFile, char *cpPath)
+{
+ X509_STORE *pStore;
+ X509_LOOKUP *pLookup;
+
+ if (cpFile == NULL && cpPath == NULL)
+ return NULL;
+ if ((pStore = X509_STORE_new()) == NULL)
+ return NULL;
+ if (cpFile != NULL) {
+ pLookup = X509_STORE_add_lookup(pStore, X509_LOOKUP_file());
+ if (pLookup == NULL) {
+ X509_STORE_free(pStore);
+ return NULL;
+ }
+ X509_LOOKUP_load_file(pLookup, cpFile, X509_FILETYPE_PEM);
+ }
+ if (cpPath != NULL) {
+ pLookup = X509_STORE_add_lookup(pStore, X509_LOOKUP_hash_dir());
+ if (pLookup == NULL) {
+ X509_STORE_free(pStore);
+ return NULL;
+ }
+ X509_LOOKUP_add_dir(pLookup, cpPath, X509_FILETYPE_PEM);
+ }
+ return pStore;
+}
+
+int SSL_X509_STORE_lookup(X509_STORE *pStore, int nType,
+ X509_NAME *pName, X509_OBJECT *pObj)
+{
+ X509_STORE_CTX pStoreCtx;
+ int rc;
+
+ X509_STORE_CTX_init(&pStoreCtx, pStore, NULL, NULL);
+ rc = X509_STORE_get_by_subject(&pStoreCtx, nType, pName, pObj);
+ X509_STORE_CTX_cleanup(&pStoreCtx);
+ return rc;
+}
+
+/* _________________________________________________________________
+**
+** Cipher Suite Spec String Creation
+** _________________________________________________________________
+*/
+
+char *SSL_make_ciphersuite(apr_pool_t *p, SSL *ssl)
+{
+ STACK_OF(SSL_CIPHER) *sk;
+ SSL_CIPHER *c;
+ int i;
+ int l;
+ char *cpCipherSuite;
+ char *cp;
+
+ if (ssl == NULL)
+ return "";
+ if ((sk = (STACK_OF(SSL_CIPHER) *)SSL_get_ciphers(ssl)) == NULL)
+ return "";
+ l = 0;
+ for (i = 0; i < sk_SSL_CIPHER_num(sk); i++) {
+ c = sk_SSL_CIPHER_value(sk, i);
+ l += strlen(SSL_CIPHER_get_name(c))+2+1;
+ }
+ if (l == 0)
+ return "";
+ cpCipherSuite = (char *)apr_palloc(p, l+1);
+ cp = cpCipherSuite;
+ for (i = 0; i < sk_SSL_CIPHER_num(sk); i++) {
+ c = sk_SSL_CIPHER_value(sk, i);
+ l = strlen(SSL_CIPHER_get_name(c));
+ memcpy(cp, SSL_CIPHER_get_name(c), l);
+ cp += l;
+ *cp++ = '/';
+ *cp++ = (SSL_CIPHER_get_valid(c) == 1 ? '1' : '0');
+ *cp++ = ':';
+ }
+ *(cp-1) = NUL;
+ return cpCipherSuite;
+}
+
+/* _________________________________________________________________
+**
+** Certificate Checks
+** _________________________________________________________________
+*/
+
+/* check whether cert contains extended key usage with a SGC tag */
+BOOL SSL_X509_isSGC(X509 *cert)
+{
+#ifdef HAVE_SSL_X509V3_EXT_d2i
+ X509_EXTENSION *ext;
+ int ext_nid;
+ STACK *sk;
+ BOOL is_sgc;
+ int idx;
+ int i;
+
+ is_sgc = FALSE;
+ idx = X509_get_ext_by_NID(cert, NID_ext_key_usage, -1);
+ if (idx >= 0) {
+ ext = X509_get_ext(cert, idx);
+ if ((sk = (STACK *)X509V3_EXT_d2i(ext)) != NULL) {
+ for (i = 0; i < sk_num(sk); i++) {
+ ext_nid = OBJ_obj2nid((ASN1_OBJECT *)sk_value(sk, i));
+ if (ext_nid == NID_ms_sgc || ext_nid == NID_ns_sgc) {
+ is_sgc = TRUE;
+ break;
+ }
+ }
+ }
+ }
+ return is_sgc;
+#else
+ return FALSE;
+#endif
+}
+
+/* retrieve basic constraints ingredients */
+BOOL SSL_X509_getBC(X509 *cert, int *ca, int *pathlen)
+{
+#ifdef HAVE_SSL_X509V3_EXT_d2i
+ X509_EXTENSION *ext;
+ BASIC_CONSTRAINTS *bc;
+ int idx;
+ BIGNUM *bn = NULL;
+ char *cp;
+
+ if ((idx = X509_get_ext_by_NID(cert, NID_basic_constraints, -1)) < 0)
+ return FALSE;
+ ext = X509_get_ext(cert, idx);
+ if (ext == NULL)
+ return FALSE;
+ if ((bc = (BASIC_CONSTRAINTS *)X509V3_EXT_d2i(ext)) == NULL)
+ return FALSE;
+ *ca = bc->ca;
+ *pathlen = -1 /* unlimited */;
+ if (bc->pathlen != NULL) {
+ if ((bn = ASN1_INTEGER_to_BN(bc->pathlen, NULL)) == NULL)
+ return FALSE;
+ if ((cp = BN_bn2dec(bn)) == NULL)
+ return FALSE;
+ *pathlen = atoi(cp);
+ free(cp);
+ BN_free(bn);
+ }
+ BASIC_CONSTRAINTS_free(bc);
+ return TRUE;
+#else
+ return FALSE;
+#endif
+}
+
+/* retrieve subject CommonName of certificate */
+BOOL SSL_X509_getCN(apr_pool_t *p, X509 *xs, char **cppCN)
+{
+ X509_NAME *xsn;
+ X509_NAME_ENTRY *xsne;
+ int i, nid;
+ unsigned char *data_ptr;
+ int data_len;
+
+ xsn = X509_get_subject_name(xs);
+ for (i = 0; i < sk_X509_NAME_ENTRY_num((STACK_OF(X509_NAME_ENTRY) *)
+ X509_NAME_get_entries(xsn)); i++) {
+ xsne = sk_X509_NAME_ENTRY_value((STACK_OF(X509_NAME_ENTRY) *)
+ X509_NAME_get_entries(xsn), i);
+ nid = OBJ_obj2nid((ASN1_OBJECT *)X509_NAME_ENTRY_get_object(xsne));
+ if (nid == NID_commonName) {
+ data_ptr = X509_NAME_ENTRY_get_data_ptr(xsne);
+ data_len = X509_NAME_ENTRY_get_data_len(xsne);
+ *cppCN = apr_palloc(p, data_len+1);
+ apr_cpystrn(*cppCN, (char *)data_ptr, data_len+1);
+ (*cppCN)[data_len] = NUL;
+#ifdef CHARSET_EBCDIC
+ ascii2ebcdic(*cppCN, *cppCN, strlen(*cppCN));
+#endif
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+/* _________________________________________________________________
+**
+** Low-Level CA Certificate Loading
+** _________________________________________________________________
+*/
+
+BOOL SSL_X509_INFO_load_file(apr_pool_t *ptemp,
+ STACK_OF(X509_INFO) *sk,
+ const char *filename)
+{
+ BIO *in;
+
+ if (!(in = BIO_new(BIO_s_file()))) {
+ return FALSE;
+ }
+
+ if (BIO_read_filename(in, MODSSL_PCHAR_CAST filename) <= 0) {
+ BIO_free(in);
+ return FALSE;
+ }
+
+ ERR_clear_error();
+
+ modssl_PEM_X509_INFO_read_bio(in, sk, NULL, NULL);
+
+ BIO_free(in);
+
+ return TRUE;
+}
+
+BOOL SSL_X509_INFO_load_path(apr_pool_t *ptemp,
+ STACK_OF(X509_INFO) *sk,
+ const char *pathname)
+{
+ /* XXX: this dir read code is exactly the same as that in
+ * ssl_engine_init.c, only the call to handle the fullname is different,
+ * should fold the duplication.
+ */
+ apr_dir_t *dir;
+ apr_finfo_t dirent;
+ apr_int32_t finfo_flags = APR_FINFO_TYPE|APR_FINFO_NAME;
+ const char *fullname;
+ BOOL ok = FALSE;
+
+ if (apr_dir_open(&dir, pathname, ptemp) != APR_SUCCESS) {
+ return FALSE;
+ }
+
+ while ((apr_dir_read(&dirent, finfo_flags, dir)) == APR_SUCCESS) {
+ if (dirent.filetype == APR_DIR) {
+ continue; /* don't try to load directories */
+ }
+
+ fullname = apr_pstrcat(ptemp,
+ pathname, "/", dirent.name,
+ NULL);
+
+ if (SSL_X509_INFO_load_file(ptemp, sk, fullname)) {
+ ok = TRUE;
+ }
+ }
+
+ apr_dir_close(dir);
+
+ return ok;
+}
+
+/* _________________________________________________________________
+**
+** Extra Server Certificate Chain Support
+** _________________________________________________________________
+*/
+
+/*
+ * Read a file that optionally contains the server certificate in PEM
+ * format, possibly followed by a sequence of CA certificates that
+ * should be sent to the peer in the SSL Certificate message.
+ */
+int SSL_CTX_use_certificate_chain(
+ SSL_CTX *ctx, char *file, int skipfirst, modssl_read_bio_cb_fn *cb)
+{
+ BIO *bio;
+ X509 *x509;
+ unsigned long err;
+ int n;
+ STACK *extra_certs;
+
+ if ((bio = BIO_new(BIO_s_file_internal())) == NULL)
+ return -1;
+ if (BIO_read_filename(bio, file) <= 0) {
+ BIO_free(bio);
+ return -1;
+ }
+ /* optionally skip a leading server certificate */
+ if (skipfirst) {
+ if ((x509 = modssl_PEM_read_bio_X509(bio, NULL, cb, NULL)) == NULL) {
+ BIO_free(bio);
+ return -1;
+ }
+ X509_free(x509);
+ }
+ /* free a perhaps already configured extra chain */
+ extra_certs=SSL_CTX_get_extra_certs(ctx);
+ if (extra_certs != NULL) {
+ sk_X509_pop_free((STACK_OF(X509) *)extra_certs, X509_free);
+ SSL_CTX_set_extra_certs(ctx,NULL);
+ }
+ /* create new extra chain by loading the certs */
+ n = 0;
+ while ((x509 = modssl_PEM_read_bio_X509(bio, NULL, cb, NULL)) != NULL) {
+ if (!SSL_CTX_add_extra_chain_cert(ctx, x509)) {
+ X509_free(x509);
+ BIO_free(bio);
+ return -1;
+ }
+ n++;
+ }
+ /* Make sure that only the error is just an EOF */
+ if ((err = ERR_peek_error()) > 0) {
+ if (!( ERR_GET_LIB(err) == ERR_LIB_PEM
+ && ERR_GET_REASON(err) == PEM_R_NO_START_LINE)) {
+ BIO_free(bio);
+ return -1;
+ }
+ while (ERR_get_error() > 0) ;
+ }
+ BIO_free(bio);
+ return n;
+}
+
+/* _________________________________________________________________
+**
+** Session Stuff
+** _________________________________________________________________
+*/
+
+char *SSL_SESSION_id2sz(unsigned char *id, int idlen,
+ char *str, int strsize)
+{
+ char *cp;
+ int n;
+
+ cp = str;
+ for (n = 0; n < idlen && n < SSL_MAX_SSL_SESSION_ID_LENGTH; n++) {
+ apr_snprintf(cp, strsize - (cp-str), "%02X", id[n]);
+ cp += 2;
+ }
+ *cp = NUL;
+ return str;
+}
+
+/* sslc+OpenSSL compat */
+
+int modssl_session_get_time(SSL_SESSION *session)
+{
+#ifdef OPENSSL_VERSION_NUMBER
+ return SSL_SESSION_get_time(session);
+#else /* assume sslc */
+ CRYPTO_TIME_T ct;
+ SSL_SESSION_get_time(session, &ct);
+ return CRYPTO_time_to_int(&ct);
+#endif
+}
+
+#ifndef SSLC_VERSION_NUMBER
+#define SSLC_VERSION_NUMBER 0x0000
+#endif
+
+DH *modssl_dh_configure(unsigned char *p, int plen,
+ unsigned char *g, int glen)
+{
+ DH *dh;
+
+ if (!(dh = DH_new())) {
+ return NULL;
+ }
+
+#if defined(OPENSSL_VERSION_NUMBER) || (SSLC_VERSION_NUMBER < 0x2000)
+ dh->p = BN_bin2bn(p, plen, NULL);
+ dh->g = BN_bin2bn(g, glen, NULL);
+ if (!(dh->p && dh->g)) {
+ DH_free(dh);
+ return NULL;
+ }
+#else
+ R_EITEMS_add(dh->data, PK_TYPE_DH, PK_DH_P, 0, p, plen, R_EITEMS_PF_COPY);
+ R_EITEMS_add(dh->data, PK_TYPE_DH, PK_DH_G, 0, g, glen, R_EITEMS_PF_COPY);
+#endif
+
+ return dh;
+}
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_ssl.h b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_ssl.h
new file mode 100644
index 00000000..d5c48f16
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_ssl.h
@@ -0,0 +1,93 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_util_ssl.h
+ * Additional Utility Functions for OpenSSL
+ */
+
+#ifndef __SSL_UTIL_SSL_H__
+#define __SSL_UTIL_SSL_H__
+
+/*
+ * Determine SSL library version number
+ */
+#define SSL_NIBBLE(x,n) ((x >> (n * 4)) & 0xF)
+
+#ifdef OPENSSL_VERSION_NUMBER
+#define SSL_LIBRARY_VERSION OPENSSL_VERSION_NUMBER
+#define SSL_LIBRARY_NAME "OpenSSL"
+#define SSL_LIBRARY_TEXT OPENSSL_VERSION_TEXT
+#define SSL_LIBRARY_DYNTEXT SSLeay_version(SSLEAY_VERSION)
+#elif defined(SSLC_VERSION_NUMBER)
+#define SSL_LIBRARY_VERSION SSLC_VERSION_NUMBER
+#define SSL_LIBRARY_NAME "SSL-C"
+#define SSL_LIBRARY_TEXT { 'S', 'S', 'L', '-', 'C', ' ', \
+ '0' + SSL_NIBBLE(SSLC_VERSION_NUMBER,3), '.', \
+ '0' + SSL_NIBBLE(SSLC_VERSION_NUMBER,2), '.', \
+ '0' + SSL_NIBBLE(SSLC_VERSION_NUMBER,1), '.', \
+ '0' + SSL_NIBBLE(SSLC_VERSION_NUMBER,0), 0 }
+#define SSL_LIBRARY_DYNTEXT SSLC_library_info(SSLC_INFO_VERSION)
+#elif !defined(SSL_LIBRARY_VERSION)
+#define SSL_LIBRARY_VERSION 0x0000
+#define SSL_LIBRARY_NAME "OtherSSL"
+#define SSL_LIBRARY_TEXT "OtherSSL 0.0.0 00 XXX 0000"
+#define SSL_LIBRARY_DYNTEXT "OtherSSL 0.0.0 00 XXX 0000"
+#endif
+
+/*
+ * Maximum length of a DER encoded session.
+ * FIXME: There is no define in OpenSSL, but OpenSSL uses 1024*10,
+ * so this value should be ok. Although we have no warm feeling.
+ */
+#define SSL_SESSION_MAX_DER 1024*10
+
+/* max length for SSL_SESSION_id2sz */
+#define SSL_SESSION_ID_STRING_LEN \
+ ((SSL_MAX_SSL_SESSION_ID_LENGTH + 1) * 2)
+
+/*
+ * Additional Functions
+ */
+void SSL_init_app_data2_idx(void);
+void *SSL_get_app_data2(SSL *);
+void SSL_set_app_data2(SSL *, void *);
+X509 *SSL_read_X509(char *, X509 **, modssl_read_bio_cb_fn *);
+EVP_PKEY *SSL_read_PrivateKey(char *, EVP_PKEY **, modssl_read_bio_cb_fn *, void *);
+int SSL_smart_shutdown(SSL *ssl);
+X509_STORE *SSL_X509_STORE_create(char *, char *);
+int SSL_X509_STORE_lookup(X509_STORE *, int, X509_NAME *, X509_OBJECT *);
+char *SSL_make_ciphersuite(apr_pool_t *, SSL *);
+BOOL SSL_X509_isSGC(X509 *);
+BOOL SSL_X509_getBC(X509 *, int *, int *);
+BOOL SSL_X509_getCN(apr_pool_t *, X509 *, char **);
+BOOL SSL_X509_INFO_load_file(apr_pool_t *, STACK_OF(X509_INFO) *, const char *);
+BOOL SSL_X509_INFO_load_path(apr_pool_t *, STACK_OF(X509_INFO) *, const char *);
+int SSL_CTX_use_certificate_chain(SSL_CTX *, char *, int, modssl_read_bio_cb_fn *);
+char *SSL_SESSION_id2sz(unsigned char *, int, char *, int);
+
+/* util functions for OpenSSL+sslc compat */
+int modssl_session_get_time(SSL_SESSION *session);
+
+DH *modssl_dh_configure(unsigned char *p, int plen,
+ unsigned char *g, int glen);
+
+#endif /* __SSL_UTIL_SSL_H__ */
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_table.c b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_table.c
new file mode 100644
index 00000000..5eb98ec8
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_table.c
@@ -0,0 +1,2518 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_util_table.c
+ * High Performance Hash Table Functions
+ */
+
+/*
+ * Generic hash table handler
+ * Table 4.1.0 July-28-1998
+ *
+ * This library is a generic open hash table with buckets and
+ * linked lists. It is pretty high performance. Each element
+ * has a key and a data. The user indexes on the key to find the
+ * data.
+ *
+ * Copyright 1998 by Gray Watson <gray@letters.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose and without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies,
+ * and that the name of Gray Watson not be used in advertising or
+ * publicity pertaining to distribution of the document or software
+ * without specific, written prior permission.
+ *
+ * Gray Watson makes no representations about the suitability of the
+ * software described herein for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * Modified in March 1999 by Ralf S. Engelschall <rse@engelschall.com>
+ * for use in the mod_ssl project:
+ * o merged table_loc.h header into table.c
+ * o removed fillproto-comments from table.h
+ * o removed mmap() support because it's too unportable
+ * o added support for MM library via ta_{malloc,calloc,realloc,free}
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+/* forward definitions for table.h */
+typedef struct table_st table_t;
+typedef struct table_entry_st table_entry_t;
+
+#define TABLE_PRIVATE
+#include "ssl_util_table.h"
+#include "mod_ssl.h"
+
+/****************************** local defines ******************************/
+
+#ifndef BITSPERBYTE
+#define BITSPERBYTE 8
+#endif
+#ifndef BITS
+#define BITS(type) (BITSPERBYTE * (int)sizeof(type))
+#endif
+
+#define TABLE_MAGIC 0xBADF00D /* very magic magicness */
+#define LINEAR_MAGIC 0xAD00D00 /* magic value for linear struct */
+#define DEFAULT_SIZE 1024 /* default table size */
+#define MAX_ALIGNMENT 128 /* max alignment value */
+#define MAX_SORT_SPLITS 128 /* qsort can handle 2^128 entries */
+
+/* returns 1 when we should grow or shrink the table */
+#define SHOULD_TABLE_GROW(tab) ((tab)->ta_entry_n > (tab)->ta_bucket_n * 2)
+#define SHOULD_TABLE_SHRINK(tab) ((tab)->ta_entry_n < (tab)->ta_bucket_n / 2)
+
+/*
+ * void HASH_MIX
+ *
+ * DESCRIPTION:
+ *
+ * Mix 3 32-bit values reversibly. For every delta with one or two bits
+ * set, and the deltas of all three high bits or all three low bits,
+ * whether the original value of a,b,c is almost all zero or is
+ * uniformly distributed.
+ *
+ * If HASH_MIX() is run forward or backward, at least 32 bits in a,b,c
+ * have at least 1/4 probability of changing. If mix() is run
+ * forward, every bit of c will change between 1/3 and 2/3 of the
+ * time. (Well, 22/100 and 78/100 for some 2-bit deltas.)
+ *
+ * HASH_MIX() takes 36 machine instructions, but only 18 cycles on a
+ * superscalar machine (like a Pentium or a Sparc). No faster mixer
+ * seems to work, that's the result of my brute-force search. There
+ * were about 2^68 hashes to choose from. I only tested about a
+ * billion of those.
+ */
+#define HASH_MIX(a, b, c) \
+ do { \
+ a -= b; a -= c; a ^= (c >> 13); \
+ b -= c; b -= a; b ^= (a << 8); \
+ c -= a; c -= b; c ^= (b >> 13); \
+ a -= b; a -= c; a ^= (c >> 12); \
+ b -= c; b -= a; b ^= (a << 16); \
+ c -= a; c -= b; c ^= (b >> 5); \
+ a -= b; a -= c; a ^= (c >> 3); \
+ b -= c; b -= a; b ^= (a << 10); \
+ c -= a; c -= b; c ^= (b >> 15); \
+ } while(0)
+
+#define TABLE_POINTER(table, type, pnt) (pnt)
+
+/*
+ * Macros to get at the key and the data pointers
+ */
+#define ENTRY_KEY_BUF(entry_p) ((entry_p)->te_key_buf)
+#define ENTRY_DATA_BUF(tab_p, entry_p) \
+ (ENTRY_KEY_BUF(entry_p) + (entry_p)->te_key_size)
+
+/*
+ * Table structures...
+ */
+
+/*
+ * HACK: this should be equiv as the table_entry_t without the key_buf
+ * char. We use this with the ENTRY_SIZE() macro above which solves
+ * the problem with the lack of the [0] GNU hack. We use the
+ * table_entry_t structure to better map the memory and make things
+ * faster.
+ */
+typedef struct table_shell_st {
+ unsigned int te_key_size; /* size of data */
+ unsigned int te_data_size; /* size of data */
+ struct table_shell_st *te_next_p; /* pointer to next in the list */
+ /* NOTE: this does not have the te_key_buf field here */
+} table_shell_t;
+
+/*
+ * Elements in the bucket linked-lists. The key[1] is the start of
+ * the key with the rest of the key and all of the data information
+ * packed in memory directly after the end of this structure.
+ *
+ * NOTE: if this structure is changed, the table_shell_t must be changed
+ * to match.
+ */
+struct table_entry_st {
+ unsigned int te_key_size; /* size of data */
+ unsigned int te_data_size; /* size of data */
+ struct table_entry_st *te_next_p; /* pointer to next in the list */
+ unsigned char te_key_buf[1]; /* 1st byte of key buf */
+};
+
+/* external structure for debuggers be able to see void */
+typedef table_entry_t table_entry_ext_t;
+
+/* main table structure */
+struct table_st {
+ unsigned int ta_magic; /* magic number */
+ unsigned int ta_flags; /* table's flags defined in table.h */
+ unsigned int ta_bucket_n; /* num of buckets, should be 2^X */
+ unsigned int ta_entry_n; /* num of entries in all buckets */
+ unsigned int ta_data_align; /* data alignment value */
+ table_entry_t **ta_buckets; /* array of linked lists */
+ table_linear_t ta_linear; /* linear tracking */
+ unsigned long ta_file_size; /* size of on-disk space */
+ void *(*ta_malloc)(void *opt_param, size_t size);
+ void *(*ta_calloc)(void *opt_param, size_t number, size_t size);
+ void *(*ta_realloc)(void *opt_param, void *ptr, size_t size);
+ void (*ta_free)(void *opt_param, void *ptr);
+ void *opt_param;
+};
+
+/* external table structure for debuggers */
+typedef table_t table_ext_t;
+
+/* local comparison functions */
+typedef int (*compare_t) (const void *element1_p, const void *element2_p,
+ table_compare_t user_compare,
+ const table_t * table_p);
+
+/*
+ * to map error to string
+ */
+typedef struct {
+ int es_error; /* error number */
+ char *es_string; /* assocaited string */
+} error_str_t;
+
+static error_str_t errors[] =
+{
+ {TABLE_ERROR_NONE, "no error"},
+ {TABLE_ERROR_PNT, "invalid table pointer"},
+ {TABLE_ERROR_ARG_NULL, "buffer argument is null"},
+ {TABLE_ERROR_SIZE, "incorrect size argument"},
+ {TABLE_ERROR_OVERWRITE, "key exists and no overwrite"},
+ {TABLE_ERROR_NOT_FOUND, "key does not exist"},
+ {TABLE_ERROR_ALLOC, "error allocating memory"},
+ {TABLE_ERROR_LINEAR, "linear access not in progress"},
+ {TABLE_ERROR_OPEN, "could not open file"},
+ {TABLE_ERROR_SEEK, "could not seek to position in file"},
+ {TABLE_ERROR_READ, "could not read from file"},
+ {TABLE_ERROR_WRITE, "could not write to file"},
+ {TABLE_ERROR_EMPTY, "table is empty"},
+ {TABLE_ERROR_NOT_EMPTY, "table contains data"},
+ {TABLE_ERROR_ALIGNMENT, "invalid alignment value"},
+ {0}
+};
+
+#define INVALID_ERROR "invalid error code"
+
+
+/********************** wrappers for system functions ************************/
+static void *sys_malloc(void *param, size_t size)
+{
+ return malloc(size);
+}
+
+static void *sys_calloc(void *param, size_t size1, size_t size2)
+{
+ return calloc(size1, size2);
+}
+
+static void *sys_realloc(void *param, void *ptr, size_t size)
+{
+ return realloc(ptr, size);
+}
+
+static void sys_free(void *param, void *ptr)
+{
+ free(ptr);
+}
+
+/****************************** local functions ******************************/
+
+/*
+ * static table_entry_t *first_entry
+ *
+ * DESCRIPTION:
+ *
+ * Return the first entry in the table. It will set the linear
+ * structure counter to the position of the first entry.
+ *
+ * RETURNS:
+ *
+ * Success: A pointer to the first entry in the table.
+ *
+ * Failure: NULL if there is no first entry.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table whose next entry we are finding.
+ *
+ * linear_p - Pointer to a linear structure which we will advance and
+ * then find the corresponding entry.
+ */
+static table_entry_t *first_entry(table_t * table_p,
+ table_linear_t * linear_p)
+{
+ table_entry_t *entry_p;
+ unsigned int bucket_c = 0;
+
+ /* look for the first non-empty bucket */
+ for (bucket_c = 0; bucket_c < table_p->ta_bucket_n; bucket_c++) {
+ entry_p = table_p->ta_buckets[bucket_c];
+ if (entry_p != NULL) {
+ if (linear_p != NULL) {
+ linear_p->tl_bucket_c = bucket_c;
+ linear_p->tl_entry_c = 0;
+ }
+ return TABLE_POINTER(table_p, table_entry_t *, entry_p);
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * static table_entry_t *next_entry
+ *
+ * DESCRIPTION:
+ *
+ * Return the next entry in the table which is past the position in
+ * our linear pointer. It will advance the linear structure counters.
+ *
+ * RETURNS:
+ *
+ * Success: A pointer to the next entry in the table.
+ *
+ * Failure: NULL.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table whose next entry we are finding.
+ *
+ * linear_p - Pointer to a linear structure which we will advance and
+ * then find the corresponding entry.
+ *
+ * error_p - Pointer to an integer which when the routine returns will
+ * contain a table error code.
+ */
+static table_entry_t *next_entry(table_t * table_p, table_linear_t * linear_p,
+ int *error_p)
+{
+ table_entry_t *entry_p;
+ int entry_c;
+
+ /* can't next if we haven't first-ed */
+ if (linear_p == NULL) {
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_LINEAR;
+ return NULL;
+ }
+
+ if (linear_p->tl_bucket_c >= table_p->ta_bucket_n) {
+ /*
+ * NOTE: this might happen if we delete an item which shortens the
+ * table bucket numbers.
+ */
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_NOT_FOUND;
+ return NULL;
+ }
+
+ linear_p->tl_entry_c++;
+
+ /* find the entry which is the nth in the list */
+ entry_p = table_p->ta_buckets[linear_p->tl_bucket_c];
+ /* NOTE: we swap the order here to be more efficient */
+ for (entry_c = linear_p->tl_entry_c; entry_c > 0; entry_c--) {
+ /* did we reach the end of the list? */
+ if (entry_p == NULL)
+ break;
+ entry_p = TABLE_POINTER(table_p, table_entry_t *, entry_p)->te_next_p;
+ }
+
+ /* did we find an entry in the current bucket? */
+ if (entry_p != NULL) {
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_NONE;
+ return TABLE_POINTER(table_p, table_entry_t *, entry_p);
+ }
+
+ /* find the first entry in the next non-empty bucket */
+
+ linear_p->tl_entry_c = 0;
+ for (linear_p->tl_bucket_c++; linear_p->tl_bucket_c < table_p->ta_bucket_n;
+ linear_p->tl_bucket_c++) {
+ entry_p = table_p->ta_buckets[linear_p->tl_bucket_c];
+ if (entry_p != NULL) {
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_NONE;
+ return TABLE_POINTER(table_p, table_entry_t *, entry_p);
+ }
+ }
+
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_NOT_FOUND;
+ return NULL;
+}
+
+/*
+ * static unsigned int hash
+ *
+ * DESCRIPTION:
+ *
+ * Hash a variable-length key into a 32-bit value. Every bit of the
+ * key affects every bit of the return value. Every 1-bit and 2-bit
+ * delta achieves avalanche. About (6 * len + 35) instructions. The
+ * best hash table sizes are powers of 2. There is no need to use mod
+ * (sooo slow!). If you need less than 32 bits, use a bitmask. For
+ * example, if you need only 10 bits, do h = (h & hashmask(10)); In
+ * which case, the hash table should have hashsize(10) elements.
+ *
+ * By Bob Jenkins, 1996. bob_jenkins@compuserve.com. You may use
+ * this code any way you wish, private, educational, or commercial.
+ * It's free. See
+ * http://ourworld.compuserve.com/homepages/bob_jenkins/evahash.htm
+ * Use for hash table lookup, or anything where one collision in 2^^32
+ * is acceptable. Do NOT use for cryptographic purposes.
+ *
+ * RETURNS:
+ *
+ * Returns a 32-bit hash value.
+ *
+ * ARGUMENTS:
+ *
+ * key - Key (the unaligned variable-length array of bytes) that we
+ * are hashing.
+ *
+ * length - Length of the key in bytes.
+ *
+ * init_val - Initialization value of the hash if you need to hash a
+ * number of strings together. For instance, if you are hashing N
+ * strings (unsigned char **)keys, do it like this:
+ *
+ * for (i=0, h=0; i<N; ++i) h = hash( keys[i], len[i], h);
+ */
+static unsigned int hash(const unsigned char *key,
+ const unsigned int length,
+ const unsigned int init_val)
+{
+ const unsigned char *key_p = key;
+ unsigned int a, b, c, len;
+
+ /* set up the internal state */
+ a = 0x9e3779b9; /* the golden ratio; an arbitrary value */
+ b = 0x9e3779b9;
+ c = init_val; /* the previous hash value */
+
+ /* handle most of the key */
+ for (len = length; len >= 12; len -= 12) {
+ a += (key_p[0]
+ + ((unsigned long) key_p[1] << 8)
+ + ((unsigned long) key_p[2] << 16)
+ + ((unsigned long) key_p[3] << 24));
+ b += (key_p[4]
+ + ((unsigned long) key_p[5] << 8)
+ + ((unsigned long) key_p[6] << 16)
+ + ((unsigned long) key_p[7] << 24));
+ c += (key_p[8]
+ + ((unsigned long) key_p[9] << 8)
+ + ((unsigned long) key_p[10] << 16)
+ + ((unsigned long) key_p[11] << 24));
+ HASH_MIX(a, b, c);
+ key_p += 12;
+ }
+
+ c += length;
+
+ /* all the case statements fall through to the next */
+ switch (len) {
+ case 11:
+ c += ((unsigned long) key_p[10] << 24);
+ case 10:
+ c += ((unsigned long) key_p[9] << 16);
+ case 9:
+ c += ((unsigned long) key_p[8] << 8);
+ /* the first byte of c is reserved for the length */
+ case 8:
+ b += ((unsigned long) key_p[7] << 24);
+ case 7:
+ b += ((unsigned long) key_p[6] << 16);
+ case 6:
+ b += ((unsigned long) key_p[5] << 8);
+ case 5:
+ b += key_p[4];
+ case 4:
+ a += ((unsigned long) key_p[3] << 24);
+ case 3:
+ a += ((unsigned long) key_p[2] << 16);
+ case 2:
+ a += ((unsigned long) key_p[1] << 8);
+ case 1:
+ a += key_p[0];
+ /* case 0: nothing left to add */
+ }
+ HASH_MIX(a, b, c);
+
+ return c;
+}
+
+/*
+ * static int entry_size
+ *
+ * DESCRIPTION:
+ *
+ * Calculates the appropriate size of an entry to include the key and
+ * data sizes as well as any associated alignment to the data.
+ *
+ * RETURNS:
+ *
+ * The associated size of the entry.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table associated with the entries whose size we are
+ * determining.
+ *
+ * key_size - Size of the entry key.
+ *
+ * data - Size of the entry data.
+ */
+static int entry_size(const table_t * table_p, const unsigned int key_size,
+ const unsigned int data_size)
+{
+ int size, left;
+
+ /* initial size -- key is already aligned if right after struct */
+ size = sizeof(struct table_shell_st) + key_size;
+
+ /* if there is no alignment then it is easy */
+ if (table_p->ta_data_align == 0)
+ return size + data_size;
+ /* add in our alignement */
+ left = size & (table_p->ta_data_align - 1);
+ if (left > 0)
+ size += table_p->ta_data_align - left;
+ /* we add the data size here after the alignment */
+ size += data_size;
+
+ return size;
+}
+
+/*
+ * static unsigned char *entry_data_buf
+ *
+ * DESCRIPTION:
+ *
+ * Companion to the ENTRY_DATA_BUF macro but this handles any
+ * associated alignment to the data in the entry.
+ *
+ * RETURNS:
+ *
+ * Pointer to the data segment of the entry.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table associated with the entry.
+ *
+ * entry_p - Entry whose data pointer we are determining.
+ */
+static unsigned char *entry_data_buf(const table_t * table_p,
+ const table_entry_t * entry_p)
+{
+ const unsigned char *buf_p;
+ int size, pad;
+
+ buf_p = entry_p->te_key_buf + entry_p->te_key_size;
+
+ /* if there is no alignment then it is easy */
+ if (table_p->ta_data_align == 0)
+ return (unsigned char *) buf_p;
+ /* we need the size of the space before the data */
+ size = sizeof(struct table_shell_st) + entry_p->te_key_size;
+
+ /* add in our alignment */
+ pad = size & (table_p->ta_data_align - 1);
+ if (pad > 0)
+ pad = table_p->ta_data_align - pad;
+ return (unsigned char *) buf_p + pad;
+}
+
+/******************************* sort routines *******************************/
+
+/*
+ * static int our_compare
+ *
+ * DESCRIPTION:
+ *
+ * Compare two entries by calling user's compare program or by using
+ * memcmp.
+ *
+ * RETURNS:
+ *
+ * < 0, == 0, or > 0 depending on whether p1 is > p2, == p2, < p2.
+ *
+ * ARGUMENTS:
+ *
+ * p1 - First entry pointer to compare.
+ *
+ * p2 - Second entry pointer to compare.
+ *
+ * compare - User comparison function. Ignored.
+ *
+ * table_p - Associated table being ordered. Ignored.
+ */
+static int local_compare(const void *p1, const void *p2,
+ table_compare_t compare, const table_t * table_p)
+{
+ const table_entry_t *const *ent1_p = p1, *const *ent2_p = p2;
+ int cmp;
+ unsigned int size;
+
+ /* compare as many bytes as we can */
+ size = (*ent1_p)->te_key_size;
+ if ((*ent2_p)->te_key_size < size)
+ size = (*ent2_p)->te_key_size;
+ cmp = memcmp(ENTRY_KEY_BUF(*ent1_p), ENTRY_KEY_BUF(*ent2_p), size);
+ /* if common-size equal, then if next more bytes, it is larger */
+ if (cmp == 0)
+ cmp = (*ent1_p)->te_key_size - (*ent2_p)->te_key_size;
+ return cmp;
+}
+
+/*
+ * static int external_compare
+ *
+ * DESCRIPTION:
+ *
+ * Compare two entries by calling user's compare program or by using
+ * memcmp.
+ *
+ * RETURNS:
+ *
+ * < 0, == 0, or > 0 depending on whether p1 is > p2, == p2, < p2.
+ *
+ * ARGUMENTS:
+ *
+ * p1 - First entry pointer to compare.
+ *
+ * p2 - Second entry pointer to compare.
+ *
+ * user_compare - User comparison function.
+ *
+ * table_p - Associated table being ordered.
+ */
+static int external_compare(const void *p1, const void *p2,
+ table_compare_t user_compare,
+ const table_t * table_p)
+{
+ const table_entry_t *const *ent1_p = p1, *const *ent2_p = p2;
+ /* since we know we are not aligned we can use the EXTRY_DATA_BUF macro */
+ return user_compare(ENTRY_KEY_BUF(*ent1_p), (*ent1_p)->te_key_size,
+ ENTRY_DATA_BUF(table_p, *ent1_p),
+ (*ent1_p)->te_data_size,
+ ENTRY_KEY_BUF(*ent2_p), (*ent2_p)->te_key_size,
+ ENTRY_DATA_BUF(table_p, *ent2_p),
+ (*ent2_p)->te_data_size);
+}
+
+/*
+ * static int external_compare_align
+ *
+ * DESCRIPTION:
+ *
+ * Compare two entries by calling user's compare program or by using
+ * memcmp. Alignment information is necessary.
+ *
+ * RETURNS:
+ *
+ * < 0, == 0, or > 0 depending on whether p1 is > p2, == p2, < p2.
+ *
+ * ARGUMENTS:
+ *
+ * p1 - First entry pointer to compare.
+ *
+ * p2 - Second entry pointer to compare.
+ *
+ * user_compare - User comparison function.
+ *
+ * table_p - Associated table being ordered.
+ */
+static int external_compare_align(const void *p1, const void *p2,
+ table_compare_t user_compare,
+ const table_t * table_p)
+{
+ const table_entry_t *const *ent1_p = p1, *const *ent2_p = p2;
+ /* since we are aligned we have to use the entry_data_buf function */
+ return user_compare(ENTRY_KEY_BUF(*ent1_p), (*ent1_p)->te_key_size,
+ entry_data_buf(table_p, *ent1_p),
+ (*ent1_p)->te_data_size,
+ ENTRY_KEY_BUF(*ent2_p), (*ent2_p)->te_key_size,
+ entry_data_buf(table_p, *ent2_p),
+ (*ent2_p)->te_data_size);
+}
+
+/*
+ * static void split
+ *
+ * DESCRIPTION:
+ *
+ * This sorts an array of longs via the quick sort algorithm (it's
+ * pretty quick)
+ *
+ * RETURNS:
+ *
+ * None.
+ *
+ * ARGUMENTS:
+ *
+ * first_p - Start of the list that we are splitting.
+ *
+ * last_p - Last entry in the list that we are splitting.
+ *
+ * compare - Comparison function which is handling the actual
+ * elements. This is either a local function or a function to setup
+ * the problem element key and data pointers which then hands off to
+ * the user function.
+ *
+ * user_compare - User comparison function. Could be NULL if we are
+ * just using a local comparison function.
+ *
+ * table_p - Associated table being sorted.
+ */
+static void split(void *first_p, void *last_p, compare_t compare,
+ table_compare_t user_compare, table_t * table_p)
+{
+ void *pivot_p, *left_p, *right_p, *left_last_p, *right_first_p;
+ void *firsts[MAX_SORT_SPLITS], *lasts[MAX_SORT_SPLITS];
+ int split_c = 0;
+
+ for (;;) {
+
+ /* no need to split the list if it is < 2 elements */
+ while (first_p >= last_p) {
+ if (split_c == 0) {
+ /* we are done */
+ return;
+ }
+ split_c--;
+ first_p = firsts[split_c];
+ last_p = lasts[split_c];
+ }
+
+ left_p = first_p;
+ right_p = last_p;
+ pivot_p = first_p;
+
+ do {
+ /* scan from right hand side */
+ while (right_p > left_p
+ && compare(right_p, pivot_p, user_compare, table_p) > 0)
+ right_p = (char *) right_p - sizeof(table_entry_t *);
+ /* scan from left hand side */
+ while (right_p > left_p
+ && compare(pivot_p, left_p, user_compare, table_p) >= 0)
+ left_p = (char *) left_p + sizeof(table_entry_t *);
+ /* if the pointers haven't met then swap values */
+ if (right_p > left_p) {
+ /* swap_bytes(left_p, right_p) */
+ table_entry_t *temp;
+
+ temp = *(table_entry_t **) left_p;
+ *(table_entry_t **) left_p = *(table_entry_t **) right_p;
+ *(table_entry_t **) right_p = temp;
+ }
+ } while (right_p > left_p);
+
+ /* now we swap the pivot with the right-hand side */
+ {
+ /* swap_bytes(pivot_p, right_p); */
+ table_entry_t *temp;
+
+ temp = *(table_entry_t **) pivot_p;
+ *(table_entry_t **) pivot_p = *(table_entry_t **) right_p;
+ *(table_entry_t **) right_p = temp;
+ }
+ pivot_p = right_p;
+
+ /* save the section to the right of the pivot in our stack */
+ right_first_p = (char *) pivot_p + sizeof(table_entry_t *);
+ left_last_p = (char *) pivot_p - sizeof(table_entry_t *);
+
+ /* do we need to save the righthand side? */
+ if (right_first_p < last_p) {
+ if (split_c >= MAX_SORT_SPLITS) {
+ /* sanity check here -- we should never get here */
+ abort();
+ }
+ firsts[split_c] = right_first_p;
+ lasts[split_c] = last_p;
+ split_c++;
+ }
+
+ /* do the left hand side of the pivot */
+ /* first_p = first_p */
+ last_p = left_last_p;
+ }
+}
+
+/*************************** exported routines *******************************/
+
+/*
+ * table_t *table_alloc
+ *
+ * DESCRIPTION:
+ *
+ * Allocate a new table structure.
+ *
+ * RETURNS:
+ *
+ * A pointer to the new table structure which must be passed to
+ * table_free to be deallocated. On error a NULL is returned.
+ *
+ * ARGUMENTS:
+ *
+ * bucket_n - Number of buckets for the hash table. Our current hash
+ * value works best with base two numbers. Set to 0 to take the
+ * library default of 1024.
+ *
+ * error_p - Pointer to an integer which, if not NULL, will contain a
+ * table error code.
+ *
+ * malloc_f, realloc_f, free_f - Pointers to malloc(3)-, realloc(3)-
+ * and free(3)-style functions.
+ */
+table_t *table_alloc(const unsigned int bucket_n, int *error_p,
+ void *(*malloc_f)(void *opt_param, size_t size),
+ void *(*calloc_f)(void *opt_param, size_t number, size_t size),
+ void *(*realloc_f)(void *opt_param, void *ptr, size_t size),
+ void (*free_f)(void *opt_param, void *ptr), void *opt_param)
+{
+ table_t *table_p = NULL;
+ unsigned int buck_n;
+
+ /* allocate a table structure */
+ if (malloc_f != NULL)
+ table_p = malloc_f(opt_param, sizeof(table_t));
+ else
+ table_p = malloc(sizeof(table_t));
+ if (table_p == NULL) {
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_ALLOC;
+ return NULL;
+ }
+
+ if (bucket_n > 0)
+ buck_n = bucket_n;
+ else
+ buck_n = DEFAULT_SIZE;
+ /* allocate the buckets which are NULLed */
+ if (calloc_f != NULL)
+ table_p->ta_buckets = (table_entry_t **)calloc_f(opt_param, buck_n,
+ sizeof(table_entry_t *));
+ else
+ table_p->ta_buckets = (table_entry_t **)calloc(buck_n, sizeof(table_entry_t *));
+ if (table_p->ta_buckets == NULL) {
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_ALLOC;
+ if (free_f != NULL)
+ free_f(opt_param, table_p);
+ else
+ free(table_p);
+ return NULL;
+ }
+
+ /* initialize structure */
+ table_p->ta_magic = TABLE_MAGIC;
+ table_p->ta_flags = 0;
+ table_p->ta_bucket_n = buck_n;
+ table_p->ta_entry_n = 0;
+ table_p->ta_data_align = 0;
+ table_p->ta_linear.tl_magic = 0;
+ table_p->ta_linear.tl_bucket_c = 0;
+ table_p->ta_linear.tl_entry_c = 0;
+ table_p->ta_file_size = 0;
+ table_p->ta_malloc = malloc_f != NULL ? malloc_f : sys_malloc;
+ table_p->ta_calloc = calloc_f != NULL ? calloc_f : sys_calloc;
+ table_p->ta_realloc = realloc_f != NULL ? realloc_f : sys_realloc;
+ table_p->ta_free = free_f != NULL ? free_f : sys_free;
+ table_p->opt_param = opt_param;
+
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_NONE;
+ return table_p;
+}
+
+/*
+ * int table_attr
+ *
+ * DESCRIPTION:
+ *
+ * Set the attributes for the table. The available attributes are
+ * specified at the top of table.h.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Pointer to a table structure which we will be altering.
+ *
+ * attr - Attribute(s) that we will be applying to the table.
+ */
+int table_attr(table_t * table_p, const int attr)
+{
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ table_p->ta_flags = attr;
+
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_set_data_alignment
+ *
+ * DESCRIPTION:
+ *
+ * Set the alignment for the data in the table. For data elements
+ * sizeof(long) is recommended unless you use smaller data types
+ * exclusively.
+ *
+ * WARNING: This must be done before any data gets put into the table.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Pointer to a table structure which we will be altering.
+ *
+ * alignment - Alignment requested for the data. Must be a power of
+ * 2. Set to 0 for none.
+ */
+int table_set_data_alignment(table_t * table_p, const int alignment)
+{
+ int val;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ if (table_p->ta_entry_n > 0)
+ return TABLE_ERROR_NOT_EMPTY;
+ /* defaults */
+ if (alignment < 2)
+ table_p->ta_data_align = 0;
+ else {
+ /* verify we have a base 2 number */
+ for (val = 2; val < MAX_ALIGNMENT; val *= 2) {
+ if (val == alignment)
+ break;
+ }
+ if (val >= MAX_ALIGNMENT)
+ return TABLE_ERROR_ALIGNMENT;
+ table_p->ta_data_align = alignment;
+ }
+
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_clear
+ *
+ * DESCRIPTION:
+ *
+ * Clear out and free all elements in a table structure.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer that we will be clearing.
+ */
+int table_clear(table_t * table_p)
+{
+ table_entry_t *entry_p, *next_p;
+ table_entry_t **bucket_p, **bounds_p;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ /* free the table allocation and table structure */
+ bounds_p = table_p->ta_buckets + table_p->ta_bucket_n;
+ for (bucket_p = table_p->ta_buckets; bucket_p < bounds_p; bucket_p++) {
+ for (entry_p = *bucket_p; entry_p != NULL; entry_p = next_p) {
+ /* record the next pointer before we free */
+ next_p = entry_p->te_next_p;
+ table_p->ta_free(table_p->opt_param, entry_p);
+ }
+ /* clear the bucket entry after we free its entries */
+ *bucket_p = NULL;
+ }
+
+ /* reset table state info */
+ table_p->ta_entry_n = 0;
+ table_p->ta_linear.tl_magic = 0;
+ table_p->ta_linear.tl_bucket_c = 0;
+ table_p->ta_linear.tl_entry_c = 0;
+
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_free
+ *
+ * DESCRIPTION:
+ *
+ * Deallocates a table structure.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer that we will be freeing.
+ */
+int table_free(table_t * table_p)
+{
+ int ret;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ ret = table_clear(table_p);
+
+ if (table_p->ta_buckets != NULL)
+ table_p->ta_free(table_p->opt_param, table_p->ta_buckets);
+ table_p->ta_magic = 0;
+ table_p->ta_free(table_p->opt_param, table_p);
+
+ return ret;
+}
+
+/*
+ * int table_insert_kd
+ *
+ * DESCRIPTION:
+ *
+ * Like table_insert except it passes back a pointer to the key and
+ * the data buffers after they have been inserted into the table
+ * structure.
+ *
+ * This routine adds a key/data pair both of which are made up of a
+ * buffer of bytes and an associated size. Both the key and the data
+ * will be copied into buffers allocated inside the table. If the key
+ * exists already, the associated data will be replaced if the
+ * overwrite flag is set, otherwise an error is returned.
+ *
+ * NOTE: be very careful changing the values since the table library
+ * provides the pointers to its memory. The key can _never_ be
+ * changed otherwise you will not find it again. The data can be
+ * changed but its length can never be altered unless you delete and
+ * re-insert it into the table.
+ *
+ * WARNING: The pointers to the key and data are not in any specific
+ * alignment. Accessing the key and/or data as an short, integer, or
+ * long pointer directly can cause problems.
+ *
+ * WARNING: Replacing a data cell (not inserting) will cause the table
+ * linked list to be temporarily invalid. Care must be taken with
+ * multiple threaded programs which are relying on the first/next
+ * linked list to be always valid.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer into which we will be inserting a
+ * new key/data pair.
+ *
+ * key_buf - Buffer of bytes of the key that we are inserting. If you
+ * are storing an (int) as the key (for example) then key_buf should
+ * be a (int *).
+ *
+ * key_size - Size of the key_buf buffer. If set to < 0 then the
+ * library will do a strlen of key_buf and add 1 for the '\0'. If you
+ * are storing an (int) as the key (for example) then key_size should
+ * be sizeof(int).
+ *
+ * data_buf - Buffer of bytes of the data that we are inserting. If
+ * it is NULL then the library will allocate space for the data in the
+ * table without copying in any information. If data_buf is NULL and
+ * data_size is 0 then the library will associate a NULL data pointer
+ * with the key. If you are storing a (long) as the data (for
+ * example) then data_buf should be a (long *).
+ *
+ * data_size - Size of the data_buf buffer. If set to < 0 then the
+ * library will do a strlen of data_buf and add 1 for the '\0'. If
+ * you are storing an (long) as the key (for example) then key_size
+ * should be sizeof(long).
+ *
+ * key_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the key storage that was allocated in the table. If you are
+ * storing an (int) as the key (for example) then key_buf_p should be
+ * (int **) i.e. the address of a (int *).
+ *
+ * data_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the data storage that was allocated in the table. If you are
+ * storing an (long) as the data (for example) then data_buf_p should
+ * be (long **) i.e. the address of a (long *).
+ *
+ * overwrite - Flag which, if set to 1, will allow the overwriting of
+ * the data in the table with the new data if the key already exists
+ * in the table.
+ */
+int table_insert_kd(table_t * table_p,
+ const void *key_buf, const int key_size,
+ const void *data_buf, const int data_size,
+ void **key_buf_p, void **data_buf_p,
+ const char overwrite_b)
+{
+ int bucket;
+ unsigned int ksize, dsize;
+ table_entry_t *entry_p, *last_p;
+ void *key_copy_p, *data_copy_p;
+
+ /* check the arguments */
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ if (key_buf == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ /* data_buf can be null but size must be >= 0, if it isn't null size != 0 */
+ if ((data_buf == NULL && data_size < 0)
+ || (data_buf != NULL && data_size == 0))
+ return TABLE_ERROR_SIZE;
+ /* determine sizes of key and data */
+ if (key_size < 0)
+ ksize = strlen((char *) key_buf) + sizeof(char);
+ else
+ ksize = key_size;
+ if (data_size < 0)
+ dsize = strlen((char *) data_buf) + sizeof(char);
+ else
+ dsize = data_size;
+ /* get the bucket number via a hash function */
+ bucket = hash(key_buf, ksize, 0) % table_p->ta_bucket_n;
+
+ /* look for the entry in this bucket, only check keys of the same size */
+ last_p = NULL;
+ for (entry_p = table_p->ta_buckets[bucket];
+ (entry_p != NULL) && (entry_p->te_next_p != last_p);
+ last_p = entry_p, entry_p = entry_p->te_next_p) {
+ if (entry_p->te_key_size == ksize
+ && memcmp(ENTRY_KEY_BUF(entry_p), key_buf, ksize) == 0)
+ break;
+ }
+
+ /* did we find it? then we are in replace mode. */
+ if (entry_p != NULL) {
+
+ /* can we not overwrite existing data? */
+ if (!overwrite_b) {
+ if (key_buf_p != NULL)
+ *key_buf_p = ENTRY_KEY_BUF(entry_p);
+ if (data_buf_p != NULL) {
+ if (entry_p->te_data_size == 0)
+ *data_buf_p = NULL;
+ else {
+ if (table_p->ta_data_align == 0)
+ *data_buf_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ *data_buf_p = entry_data_buf(table_p, entry_p);
+ }
+ }
+ return TABLE_ERROR_OVERWRITE;
+ }
+
+ /* re-alloc entry's data if the new size != the old */
+ if (dsize != entry_p->te_data_size) {
+
+ /*
+ * First we delete it from the list to keep the list whole.
+ * This properly preserves the linked list in case we have a
+ * thread marching through the linked list while we are
+ * inserting. Maybe this is an unnecessary protection but it
+ * should not harm that much.
+ */
+ if (last_p == NULL)
+ table_p->ta_buckets[bucket] = entry_p->te_next_p;
+ else
+ last_p->te_next_p = entry_p->te_next_p;
+ /*
+ * Realloc the structure which may change its pointer. NOTE:
+ * this may change any previous data_key_p and data_copy_p
+ * pointers.
+ */
+ entry_p = (table_entry_t *)
+ table_p->ta_realloc(table_p->opt_param, entry_p,
+ entry_size(table_p, entry_p->te_key_size, dsize));
+ if (entry_p == NULL)
+ return TABLE_ERROR_ALLOC;
+ /* add it back to the front of the list */
+ entry_p->te_data_size = dsize;
+ entry_p->te_next_p = table_p->ta_buckets[bucket];
+ table_p->ta_buckets[bucket] = entry_p;
+ }
+
+ /* copy or replace data in storage */
+ if (dsize > 0) {
+ if (table_p->ta_data_align == 0)
+ data_copy_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ data_copy_p = entry_data_buf(table_p, entry_p);
+ if (data_buf != NULL)
+ memcpy(data_copy_p, data_buf, dsize);
+ }
+ else
+ data_copy_p = NULL;
+ if (key_buf_p != NULL)
+ *key_buf_p = ENTRY_KEY_BUF(entry_p);
+ if (data_buf_p != NULL)
+ *data_buf_p = data_copy_p;
+ /* returning from the section where we were overwriting table data */
+ return TABLE_ERROR_NONE;
+ }
+
+ /*
+ * It is a new entry.
+ */
+
+ /* allocate a new entry */
+ entry_p = (table_entry_t *)
+ table_p->ta_malloc(table_p->opt_param,
+ entry_size(table_p, ksize, dsize));
+ if (entry_p == NULL)
+ return TABLE_ERROR_ALLOC;
+ /* copy key into storage */
+ entry_p->te_key_size = ksize;
+ key_copy_p = ENTRY_KEY_BUF(entry_p);
+ memcpy(key_copy_p, key_buf, ksize);
+
+ /* copy data in */
+ entry_p->te_data_size = dsize;
+ if (dsize > 0) {
+ if (table_p->ta_data_align == 0)
+ data_copy_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ data_copy_p = entry_data_buf(table_p, entry_p);
+ if (data_buf != NULL)
+ memcpy(data_copy_p, data_buf, dsize);
+ }
+ else
+ data_copy_p = NULL;
+ if (key_buf_p != NULL)
+ *key_buf_p = key_copy_p;
+ if (data_buf_p != NULL)
+ *data_buf_p = data_copy_p;
+ /* insert into list, no need to append */
+ entry_p->te_next_p = table_p->ta_buckets[bucket];
+ table_p->ta_buckets[bucket] = entry_p;
+
+ table_p->ta_entry_n++;
+
+ /* do we need auto-adjust? */
+ if (table_p->ta_flags & TABLE_FLAG_AUTO_ADJUST
+ && SHOULD_TABLE_GROW(table_p))
+ return table_adjust(table_p, table_p->ta_entry_n);
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_insert
+ *
+ * DESCRIPTION:
+ *
+ * Exactly the same as table_insert_kd except it does not pass back a
+ * pointer to the key after they have been inserted into the table
+ * structure. This is still here for backwards compatibility.
+ *
+ * See table_insert_kd for more information.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer into which we will be inserting a
+ * new key/data pair.
+ *
+ * key_buf - Buffer of bytes of the key that we are inserting. If you
+ * are storing an (int) as the key (for example) then key_buf should
+ * be a (int *).
+ *
+ * key_size - Size of the key_buf buffer. If set to < 0 then the
+ * library will do a strlen of key_buf and add 1 for the '\0'. If you
+ * are storing an (int) as the key (for example) then key_size should
+ * be sizeof(int).
+ *
+ * data_buf - Buffer of bytes of the data that we are inserting. If
+ * it is NULL then the library will allocate space for the data in the
+ * table without copying in any information. If data_buf is NULL and
+ * data_size is 0 then the library will associate a NULL data pointer
+ * with the key. If you are storing a (long) as the data (for
+ * example) then data_buf should be a (long *).
+ *
+ * data_size - Size of the data_buf buffer. If set to < 0 then the
+ * library will do a strlen of data_buf and add 1 for the '\0'. If
+ * you are storing an (long) as the key (for example) then key_size
+ * should be sizeof(long).
+ *
+ * data_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the data storage that was allocated in the table. If you are
+ * storing an (long) as the data (for example) then data_buf_p should
+ * be (long **) i.e. the address of a (long *).
+ *
+ * overwrite - Flag which, if set to 1, will allow the overwriting of
+ * the data in the table with the new data if the key already exists
+ * in the table.
+ */
+int table_insert(table_t * table_p,
+ const void *key_buf, const int key_size,
+ const void *data_buf, const int data_size,
+ void **data_buf_p, const char overwrite_b)
+{
+ return table_insert_kd(table_p, key_buf, key_size, data_buf, data_size,
+ NULL, data_buf_p, overwrite_b);
+}
+
+/*
+ * int table_retrieve
+ *
+ * DESCRIPTION:
+ *
+ * This routine looks up a key made up of a buffer of bytes and an
+ * associated size in the table. If found then it returns the
+ * associated data information.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer into which we will be searching
+ * for the key.
+ *
+ * key_buf - Buffer of bytes of the key that we are searching for. If
+ * you are looking for an (int) as the key (for example) then key_buf
+ * should be a (int *).
+ *
+ * key_size - Size of the key_buf buffer. If set to < 0 then the
+ * library will do a strlen of key_buf and add 1 for the '\0'. If you
+ * are looking for an (int) as the key (for example) then key_size
+ * should be sizeof(int).
+ *
+ * data_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the data storage that was allocated in the table and that is
+ * associated with the key. If a (long) was stored as the data (for
+ * example) then data_buf_p should be (long **) i.e. the address of a
+ * (long *).
+ *
+ * data_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the data stored in the table that is associated with
+ * the key.
+ */
+int table_retrieve(table_t * table_p,
+ const void *key_buf, const int key_size,
+ void **data_buf_p, int *data_size_p)
+{
+ int bucket;
+ unsigned int ksize;
+ table_entry_t *entry_p, **buckets;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ if (key_buf == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ /* find key size */
+ if (key_size < 0)
+ ksize = strlen((char *) key_buf) + sizeof(char);
+ else
+ ksize = key_size;
+ /* get the bucket number via a has function */
+ bucket = hash(key_buf, ksize, 0) % table_p->ta_bucket_n;
+
+ /* look for the entry in this bucket, only check keys of the same size */
+ buckets = table_p->ta_buckets;
+ for (entry_p = buckets[bucket];
+ entry_p != NULL;
+ entry_p = entry_p->te_next_p) {
+ entry_p = TABLE_POINTER(table_p, table_entry_t *, entry_p);
+ if (entry_p->te_key_size == ksize
+ && memcmp(ENTRY_KEY_BUF(entry_p), key_buf, ksize) == 0)
+ break;
+ }
+
+ /* not found? */
+ if (entry_p == NULL)
+ return TABLE_ERROR_NOT_FOUND;
+ if (data_buf_p != NULL) {
+ if (entry_p->te_data_size == 0)
+ *data_buf_p = NULL;
+ else {
+ if (table_p->ta_data_align == 0)
+ *data_buf_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ *data_buf_p = entry_data_buf(table_p, entry_p);
+ }
+ }
+ if (data_size_p != NULL)
+ *data_size_p = entry_p->te_data_size;
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_delete
+ *
+ * DESCRIPTION:
+ *
+ * This routine looks up a key made up of a buffer of bytes and an
+ * associated size in the table. If found then it will be removed
+ * from the table. The associated data can be passed back to the user
+ * if requested.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * NOTE: this could be an allocation error if the library is to return
+ * the data to the user.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer from which we will be deleteing
+ * the key.
+ *
+ * key_buf - Buffer of bytes of the key that we are searching for to
+ * delete. If you are deleting an (int) key (for example) then
+ * key_buf should be a (int *).
+ *
+ * key_size - Size of the key_buf buffer. If set to < 0 then the
+ * library will do a strlen of key_buf and add 1 for the '\0'. If you
+ * are deleting an (int) key (for example) then key_size should be
+ * sizeof(int).
+ *
+ * data_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the data storage that was allocated in the table and that was
+ * associated with the key. If a (long) was stored as the data (for
+ * example) then data_buf_p should be (long **) i.e. the address of a
+ * (long *). If a pointer is passed in, the caller is responsible for
+ * freeing it after use. If data_buf_p is NULL then the library will
+ * free up the data allocation itself.
+ *
+ * data_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the data that was stored in the table and that was
+ * associated with the key.
+ */
+int table_delete(table_t * table_p,
+ const void *key_buf, const int key_size,
+ void **data_buf_p, int *data_size_p)
+{
+ int bucket;
+ unsigned int ksize;
+ unsigned char *data_copy_p;
+ table_entry_t *entry_p, *last_p;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ if (key_buf == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ /* get the key size */
+ if (key_size < 0)
+ ksize = strlen((char *) key_buf) + sizeof(char);
+ else
+ ksize = key_size;
+ /* find our bucket */
+ bucket = hash(key_buf, ksize, 0) % table_p->ta_bucket_n;
+
+ /* look for the entry in this bucket, only check keys of the same size */
+ for (last_p = NULL, entry_p = table_p->ta_buckets[bucket]; entry_p != NULL;
+ last_p = entry_p, entry_p = entry_p->te_next_p) {
+ if (entry_p->te_key_size == ksize
+ && memcmp(ENTRY_KEY_BUF(entry_p), key_buf, ksize) == 0)
+ break;
+ }
+
+ /* did we find it? */
+ if (entry_p == NULL)
+ return TABLE_ERROR_NOT_FOUND;
+ /*
+ * NOTE: we may want to adjust the linear counters here if the entry
+ * we are deleting is the one we are pointing on or is ahead of the
+ * one in the bucket list
+ */
+
+ /* remove entry from the linked list */
+ if (last_p == NULL)
+ table_p->ta_buckets[bucket] = entry_p->te_next_p;
+ else
+ last_p->te_next_p = entry_p->te_next_p;
+ /* free entry */
+ if (data_buf_p != NULL) {
+ if (entry_p->te_data_size == 0)
+ *data_buf_p = NULL;
+ else {
+ /*
+ * if we were storing it compacted, we now need to malloc some
+ * space if the user wants the value after the delete.
+ */
+ *data_buf_p = table_p->ta_malloc(table_p->opt_param,
+ entry_p->te_data_size);
+ if (*data_buf_p == NULL)
+ return TABLE_ERROR_ALLOC;
+ if (table_p->ta_data_align == 0)
+ data_copy_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ data_copy_p = entry_data_buf(table_p, entry_p);
+ memcpy(*data_buf_p, data_copy_p, entry_p->te_data_size);
+ }
+ }
+ if (data_size_p != NULL)
+ *data_size_p = entry_p->te_data_size;
+ table_p->ta_free(table_p->opt_param, entry_p);
+ entry_p = NULL;
+
+ table_p->ta_entry_n--;
+
+ /* do we need auto-adjust down? */
+ if ((table_p->ta_flags & TABLE_FLAG_AUTO_ADJUST)
+ && (table_p->ta_flags & TABLE_FLAG_ADJUST_DOWN)
+ && SHOULD_TABLE_SHRINK(table_p))
+ return table_adjust(table_p, table_p->ta_entry_n);
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_delete_first
+ *
+ * DESCRIPTION:
+ *
+ * This is like the table_delete routines except it deletes the first
+ * key/data pair in the table instead of an entry corresponding to a
+ * particular key. The associated key and data information can be
+ * passed back to the user if requested. This routines is handy to
+ * clear out a table.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * NOTE: this could be an allocation error if the library is to return
+ * the data to the user.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer from which we will be deleteing
+ * the first key.
+ *
+ * key_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the storage of the first key that was allocated in the table.
+ * If an (int) was stored as the first key (for example) then
+ * key_buf_p should be (int **) i.e. the address of a (int *). If a
+ * pointer is passed in, the caller is responsible for freeing it
+ * after use. If key_buf_p is NULL then the library will free up the
+ * key allocation itself.
+ *
+ * key_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the key that was stored in the table and that was
+ * associated with the key.
+ *
+ * data_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the data storage that was allocated in the table and that was
+ * associated with the key. If a (long) was stored as the data (for
+ * example) then data_buf_p should be (long **) i.e. the address of a
+ * (long *). If a pointer is passed in, the caller is responsible for
+ * freeing it after use. If data_buf_p is NULL then the library will
+ * free up the data allocation itself.
+ *
+ * data_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the data that was stored in the table and that was
+ * associated with the key.
+ */
+int table_delete_first(table_t * table_p,
+ void **key_buf_p, int *key_size_p,
+ void **data_buf_p, int *data_size_p)
+{
+ unsigned char *data_copy_p;
+ table_entry_t *entry_p;
+ table_linear_t linear;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ /* take the first entry */
+ entry_p = first_entry(table_p, &linear);
+ if (entry_p == NULL)
+ return TABLE_ERROR_NOT_FOUND;
+ /*
+ * NOTE: we may want to adjust the linear counters here if the entry
+ * we are deleting is the one we are pointing on or is ahead of the
+ * one in the bucket list
+ */
+
+ /* remove entry from the linked list */
+ table_p->ta_buckets[linear.tl_bucket_c] = entry_p->te_next_p;
+
+ /* free entry */
+ if (key_buf_p != NULL) {
+ if (entry_p->te_key_size == 0)
+ *key_buf_p = NULL;
+ else {
+ /*
+ * if we were storing it compacted, we now need to malloc some
+ * space if the user wants the value after the delete.
+ */
+ *key_buf_p = table_p->ta_malloc(table_p->opt_param,
+ entry_p->te_key_size);
+ if (*key_buf_p == NULL)
+ return TABLE_ERROR_ALLOC;
+ memcpy(*key_buf_p, ENTRY_KEY_BUF(entry_p), entry_p->te_key_size);
+ }
+ }
+ if (key_size_p != NULL)
+ *key_size_p = entry_p->te_key_size;
+ if (data_buf_p != NULL) {
+ if (entry_p->te_data_size == 0)
+ *data_buf_p = NULL;
+ else {
+ /*
+ * if we were storing it compacted, we now need to malloc some
+ * space if the user wants the value after the delete.
+ */
+ *data_buf_p = table_p->ta_malloc(table_p->opt_param,
+ entry_p->te_data_size);
+ if (*data_buf_p == NULL)
+ return TABLE_ERROR_ALLOC;
+ if (table_p->ta_data_align == 0)
+ data_copy_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ data_copy_p = entry_data_buf(table_p, entry_p);
+ memcpy(*data_buf_p, data_copy_p, entry_p->te_data_size);
+ }
+ }
+ if (data_size_p != NULL)
+ *data_size_p = entry_p->te_data_size;
+ table_p->ta_free(table_p->opt_param, entry_p);
+
+ table_p->ta_entry_n--;
+
+ /* do we need auto-adjust down? */
+ if ((table_p->ta_flags & TABLE_FLAG_AUTO_ADJUST)
+ && (table_p->ta_flags & TABLE_FLAG_ADJUST_DOWN)
+ && SHOULD_TABLE_SHRINK(table_p))
+ return table_adjust(table_p, table_p->ta_entry_n);
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_info
+ *
+ * DESCRIPTION:
+ *
+ * Get some information about a table_p structure.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer from which we are getting
+ * information.
+ *
+ * num_buckets_p - Pointer to an integer which, if not NULL, will
+ * contain the number of buckets in the table.
+ *
+ * num_entries_p - Pointer to an integer which, if not NULL, will
+ * contain the number of entries stored in the table.
+ */
+int table_info(table_t * table_p, int *num_buckets_p, int *num_entries_p)
+{
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ if (num_buckets_p != NULL)
+ *num_buckets_p = table_p->ta_bucket_n;
+ if (num_entries_p != NULL)
+ *num_entries_p = table_p->ta_entry_n;
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_adjust
+ *
+ * DESCRIPTION:
+ *
+ * Set the number of buckets in a table to a certain value.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer of which we are adjusting.
+ *
+ * bucket_n - Number buckets to adjust the table to. Set to 0 to
+ * adjust the table to its number of entries.
+ */
+int table_adjust(table_t * table_p, const int bucket_n)
+{
+ table_entry_t *entry_p, *next_p;
+ table_entry_t **buckets, **bucket_p, **bounds_p;
+ int bucket;
+ unsigned int buck_n;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ /*
+ * NOTE: we walk through the entries and rehash them. If we stored
+ * the hash value as a full int in the table-entry, all we would
+ * have to do is remod it.
+ */
+
+ /* normalize to the number of entries */
+ if (bucket_n == 0)
+ buck_n = table_p->ta_entry_n;
+ else
+ buck_n = bucket_n;
+ /* we must have at least 1 bucket */
+ if (buck_n == 0)
+ buck_n = 1;
+ /* make sure we have somethign to do */
+ if (buck_n <= table_p->ta_bucket_n)
+ return TABLE_ERROR_NONE;
+ /* allocate a new bucket list */
+ buckets = (table_entry_t **)
+ table_p->ta_calloc(table_p->opt_param,
+ buck_n, sizeof(table_entry_t *));
+ if (table_p->ta_buckets == NULL)
+ return TABLE_ERROR_ALLOC;
+ /*
+ * run through each of the items in the current table and rehash
+ * them into the newest bucket sizes
+ */
+ bounds_p = table_p->ta_buckets + table_p->ta_bucket_n;
+ for (bucket_p = table_p->ta_buckets; bucket_p < bounds_p; bucket_p++) {
+ for (entry_p = *bucket_p; entry_p != NULL; entry_p = next_p) {
+
+ /* hash the old data into the new table size */
+ bucket = hash(ENTRY_KEY_BUF(entry_p), entry_p->te_key_size, 0) % buck_n;
+
+ /* record the next one now since we overwrite next below */
+ next_p = entry_p->te_next_p;
+
+ /* insert into new list, no need to append */
+ entry_p->te_next_p = buckets[bucket];
+ buckets[bucket] = entry_p;
+
+ /*
+ * NOTE: we may want to adjust the bucket_c linear entry here to
+ * keep it current
+ */
+ }
+ /* remove the old table pointers as we go by */
+ *bucket_p = NULL;
+ }
+
+ /* replace the table buckets with the new ones */
+ table_p->ta_free(table_p->opt_param, table_p->ta_buckets);
+ table_p->ta_buckets = buckets;
+ table_p->ta_bucket_n = buck_n;
+
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * const char *table_strerror
+ *
+ * DESCRIPTION:
+ *
+ * Return the corresponding string for the error number.
+ *
+ * RETURNS:
+ *
+ * Success - String equivalient of the error.
+ *
+ * Failure - String "invalid error code"
+ *
+ * ARGUMENTS:
+ *
+ * error - Error number that we are converting.
+ */
+const char *table_strerror(const int error)
+{
+ error_str_t *err_p;
+
+ for (err_p = errors; err_p->es_error != 0; err_p++) {
+ if (err_p->es_error == error)
+ return err_p->es_string;
+ }
+
+ return INVALID_ERROR;
+}
+
+/*
+ * int table_type_size
+ *
+ * DESCRIPTION:
+ *
+ * Return the size of the internal table type.
+ *
+ * RETURNS:
+ *
+ * The size of the table_t type.
+ *
+ * ARGUMENTS:
+ *
+ * None.
+ */
+int table_type_size(void)
+{
+ return sizeof(table_t);
+}
+
+/************************* linear access routines ****************************/
+
+/*
+ * int table_first
+ *
+ * DESCRIPTION:
+ *
+ * Find first element in a table and pass back information about the
+ * key/data pair. If any of the key/data pointers are NULL then they
+ * are ignored.
+ *
+ * NOTE: This function is not reentrant. More than one thread cannot
+ * be doing a first and next on the same table at the same time. Use
+ * the table_first_r version below for this.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer from which we are getting the
+ * first element.
+ *
+ * key_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the storage of the first key that is allocated in the table. If
+ * an (int) is stored as the first key (for example) then key_buf_p
+ * should be (int **) i.e. the address of a (int *).
+ *
+ * key_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the key that is stored in the table and that is
+ * associated with the first key.
+ *
+ * data_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the data storage that is allocated in the table and that is
+ * associated with the first key. If a (long) is stored as the data
+ * (for example) then data_buf_p should be (long **) i.e. the address
+ * of a (long *).
+ *
+ * data_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the data that is stored in the table and that is
+ * associated with the first key.
+ */
+int table_first(table_t * table_p,
+ void **key_buf_p, int *key_size_p,
+ void **data_buf_p, int *data_size_p)
+{
+ table_entry_t *entry_p;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ /* initialize our linear magic number */
+ table_p->ta_linear.tl_magic = LINEAR_MAGIC;
+
+ entry_p = first_entry(table_p, &table_p->ta_linear);
+ if (entry_p == NULL)
+ return TABLE_ERROR_NOT_FOUND;
+ if (key_buf_p != NULL)
+ *key_buf_p = ENTRY_KEY_BUF(entry_p);
+ if (key_size_p != NULL)
+ *key_size_p = entry_p->te_key_size;
+ if (data_buf_p != NULL) {
+ if (entry_p->te_data_size == 0)
+ *data_buf_p = NULL;
+ else {
+ if (table_p->ta_data_align == 0)
+ *data_buf_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ *data_buf_p = entry_data_buf(table_p, entry_p);
+ }
+ }
+ if (data_size_p != NULL)
+ *data_size_p = entry_p->te_data_size;
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_next
+ *
+ * DESCRIPTION:
+ *
+ * Find the next element in a table and pass back information about
+ * the key/data pair. If any of the key/data pointers are NULL then
+ * they are ignored.
+ *
+ * NOTE: This function is not reentrant. More than one thread cannot
+ * be doing a first and next on the same table at the same time. Use
+ * the table_next_r version below for this.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer from which we are getting the
+ * next element.
+ *
+ * key_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the storage of the next key that is allocated in the table. If
+ * an (int) is stored as the next key (for example) then key_buf_p
+ * should be (int **) i.e. the address of a (int *).
+ *
+ * key_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the key that is stored in the table and that is
+ * associated with the next key.
+ *
+ * data_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the data storage that is allocated in the table and that is
+ * associated with the next key. If a (long) is stored as the data
+ * (for example) then data_buf_p should be (long **) i.e. the address
+ * of a (long *).
+ *
+ * data_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the data that is stored in the table and that is
+ * associated with the next key.
+ */
+int table_next(table_t * table_p,
+ void **key_buf_p, int *key_size_p,
+ void **data_buf_p, int *data_size_p)
+{
+ table_entry_t *entry_p;
+ int error;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ if (table_p->ta_linear.tl_magic != LINEAR_MAGIC)
+ return TABLE_ERROR_LINEAR;
+ /* move to the next entry */
+ entry_p = next_entry(table_p, &table_p->ta_linear, &error);
+ if (entry_p == NULL)
+ return error;
+ if (key_buf_p != NULL)
+ *key_buf_p = ENTRY_KEY_BUF(entry_p);
+ if (key_size_p != NULL)
+ *key_size_p = entry_p->te_key_size;
+ if (data_buf_p != NULL) {
+ if (entry_p->te_data_size == 0)
+ *data_buf_p = NULL;
+ else {
+ if (table_p->ta_data_align == 0)
+ *data_buf_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ *data_buf_p = entry_data_buf(table_p, entry_p);
+ }
+ }
+ if (data_size_p != NULL)
+ *data_size_p = entry_p->te_data_size;
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_this
+ *
+ * DESCRIPTION:
+ *
+ * Find the current element in a table and pass back information about
+ * the key/data pair. If any of the key/data pointers are NULL then
+ * they are ignored.
+ *
+ * NOTE: This function is not reentrant. Use the table_current_r
+ * version below.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer from which we are getting the
+ * current element.
+ *
+ * key_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the storage of the current key that is allocated in the table.
+ * If an (int) is stored as the current key (for example) then
+ * key_buf_p should be (int **) i.e. the address of a (int *).
+ *
+ * key_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the key that is stored in the table and that is
+ * associated with the current key.
+ *
+ * data_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the data storage that is allocated in the table and that is
+ * associated with the current key. If a (long) is stored as the data
+ * (for example) then data_buf_p should be (long **) i.e. the address
+ * of a (long *).
+ *
+ * data_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the data that is stored in the table and that is
+ * associated with the current key.
+ */
+int table_this(table_t * table_p,
+ void **key_buf_p, int *key_size_p,
+ void **data_buf_p, int *data_size_p)
+{
+ table_entry_t *entry_p = NULL;
+ int entry_c;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ if (table_p->ta_linear.tl_magic != LINEAR_MAGIC)
+ return TABLE_ERROR_LINEAR;
+ /* if we removed an item that shorted the bucket list, we may get this */
+ if (table_p->ta_linear.tl_bucket_c >= table_p->ta_bucket_n) {
+ /*
+ * NOTE: this might happen if we delete an item which shortens the
+ * table bucket numbers.
+ */
+ return TABLE_ERROR_NOT_FOUND;
+ }
+
+ /* find the entry which is the nth in the list */
+ entry_p = table_p->ta_buckets[table_p->ta_linear.tl_bucket_c];
+ /* NOTE: we swap the order here to be more efficient */
+ for (entry_c = table_p->ta_linear.tl_entry_c; entry_c > 0; entry_c--) {
+ /* did we reach the end of the list? */
+ if (entry_p == NULL)
+ break;
+ entry_p = TABLE_POINTER(table_p, table_entry_t *, entry_p)->te_next_p;
+ }
+
+ /* is this a NOT_FOUND or a LINEAR error */
+ if (entry_p == NULL)
+ return TABLE_ERROR_NOT_FOUND;
+ if (key_buf_p != NULL)
+ *key_buf_p = ENTRY_KEY_BUF(entry_p);
+ if (key_size_p != NULL)
+ *key_size_p = entry_p->te_key_size;
+ if (data_buf_p != NULL) {
+ if (entry_p->te_data_size == 0)
+ *data_buf_p = NULL;
+ else {
+ if (table_p->ta_data_align == 0)
+ *data_buf_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ *data_buf_p = entry_data_buf(table_p, entry_p);
+ }
+ }
+ if (data_size_p != NULL)
+ *data_size_p = entry_p->te_data_size;
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_first_r
+ *
+ * DESCRIPTION:
+ *
+ * Reetrant version of the table_first routine above. Find first
+ * element in a table and pass back information about the key/data
+ * pair. If any of the key/data pointers are NULL then they are
+ * ignored.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer from which we are getting the
+ * first element.
+ *
+ * linear_p - Pointer to a table linear structure which is initialized
+ * here. The same pointer should then be passed to table_next_r
+ * below.
+ *
+ * key_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the storage of the first key that is allocated in the table. If
+ * an (int) is stored as the first key (for example) then key_buf_p
+ * should be (int **) i.e. the address of a (int *).
+ *
+ * key_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the key that is stored in the table and that is
+ * associated with the first key.
+ *
+ * data_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the data storage that is allocated in the table and that is
+ * associated with the first key. If a (long) is stored as the data
+ * (for example) then data_buf_p should be (long **) i.e. the address
+ * of a (long *).
+ *
+ * data_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the data that is stored in the table and that is
+ * associated with the first key.
+ */
+int table_first_r(table_t * table_p, table_linear_t * linear_p,
+ void **key_buf_p, int *key_size_p,
+ void **data_buf_p, int *data_size_p)
+{
+ table_entry_t *entry_p;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ if (linear_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ /* initialize our linear magic number */
+ linear_p->tl_magic = LINEAR_MAGIC;
+
+ entry_p = first_entry(table_p, linear_p);
+ if (entry_p == NULL)
+ return TABLE_ERROR_NOT_FOUND;
+ if (key_buf_p != NULL)
+ *key_buf_p = ENTRY_KEY_BUF(entry_p);
+ if (key_size_p != NULL)
+ *key_size_p = entry_p->te_key_size;
+ if (data_buf_p != NULL) {
+ if (entry_p->te_data_size == 0)
+ *data_buf_p = NULL;
+ else {
+ if (table_p->ta_data_align == 0)
+ *data_buf_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ *data_buf_p = entry_data_buf(table_p, entry_p);
+ }
+ }
+ if (data_size_p != NULL)
+ *data_size_p = entry_p->te_data_size;
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_next_r
+ *
+ * DESCRIPTION:
+ *
+ * Reetrant version of the table_next routine above. Find next
+ * element in a table and pass back information about the key/data
+ * pair. If any of the key/data pointers are NULL then they are
+ * ignored.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer from which we are getting the
+ * next element.
+ *
+ * linear_p - Pointer to a table linear structure which is incremented
+ * here. The same pointer must have been passed to table_first_r
+ * first so that it can be initialized.
+ *
+ * key_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the storage of the next key that is allocated in the table. If
+ * an (int) is stored as the next key (for example) then key_buf_p
+ * should be (int **) i.e. the address of a (int *).
+ *
+ * key_size_p - Pointer to an integer which, if not NULL will be set
+ * to the size of the key that is stored in the table and that is
+ * associated with the next key.
+ *
+ * data_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the data storage that is allocated in the table and that is
+ * associated with the next key. If a (long) is stored as the data
+ * (for example) then data_buf_p should be (long **) i.e. the address
+ * of a (long *).
+ *
+ * data_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the data that is stored in the table and that is
+ * associated with the next key.
+ */
+int table_next_r(table_t * table_p, table_linear_t * linear_p,
+ void **key_buf_p, int *key_size_p,
+ void **data_buf_p, int *data_size_p)
+{
+ table_entry_t *entry_p;
+ int error;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ if (linear_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (linear_p->tl_magic != LINEAR_MAGIC)
+ return TABLE_ERROR_LINEAR;
+ /* move to the next entry */
+ entry_p = next_entry(table_p, linear_p, &error);
+ if (entry_p == NULL)
+ return error;
+ if (key_buf_p != NULL)
+ *key_buf_p = ENTRY_KEY_BUF(entry_p);
+ if (key_size_p != NULL)
+ *key_size_p = entry_p->te_key_size;
+ if (data_buf_p != NULL) {
+ if (entry_p->te_data_size == 0)
+ *data_buf_p = NULL;
+ else {
+ if (table_p->ta_data_align == 0)
+ *data_buf_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ *data_buf_p = entry_data_buf(table_p, entry_p);
+ }
+ }
+ if (data_size_p != NULL)
+ *data_size_p = entry_p->te_data_size;
+ return TABLE_ERROR_NONE;
+}
+
+/*
+ * int table_this_r
+ *
+ * DESCRIPTION:
+ *
+ * Reetrant version of the table_this routine above. Find current
+ * element in a table and pass back information about the key/data
+ * pair. If any of the key/data pointers are NULL then they are
+ * ignored.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer from which we are getting the
+ * current element.
+ *
+ * linear_p - Pointer to a table linear structure which is accessed
+ * here. The same pointer must have been passed to table_first_r
+ * first so that it can be initialized.
+ *
+ * key_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the storage of the current key that is allocated in the table.
+ * If an (int) is stored as the current key (for example) then
+ * key_buf_p should be (int **) i.e. the address of a (int *).
+ *
+ * key_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the key that is stored in the table and that is
+ * associated with the current key.
+ *
+ * data_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the data storage that is allocated in the table and that is
+ * associated with the current key. If a (long) is stored as the data
+ * (for example) then data_buf_p should be (long **) i.e. the address
+ * of a (long *).
+ *
+ * data_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the data that is stored in the table and that is
+ * associated with the current key.
+ */
+int table_this_r(table_t * table_p, table_linear_t * linear_p,
+ void **key_buf_p, int *key_size_p,
+ void **data_buf_p, int *data_size_p)
+{
+ table_entry_t *entry_p;
+ int entry_c;
+
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ if (linear_p->tl_magic != LINEAR_MAGIC)
+ return TABLE_ERROR_LINEAR;
+ /* if we removed an item that shorted the bucket list, we may get this */
+ if (linear_p->tl_bucket_c >= table_p->ta_bucket_n) {
+ /*
+ * NOTE: this might happen if we delete an item which shortens the
+ * table bucket numbers.
+ */
+ return TABLE_ERROR_NOT_FOUND;
+ }
+
+ /* find the entry which is the nth in the list */
+ for (entry_c = linear_p->tl_entry_c,
+ entry_p = table_p->ta_buckets[linear_p->tl_bucket_c];
+ entry_p != NULL && entry_c > 0;
+ entry_c--, entry_p = TABLE_POINTER(table_p, table_entry_t *,
+ entry_p)->te_next_p) {
+ }
+
+ if (entry_p == NULL)
+ return TABLE_ERROR_NOT_FOUND;
+ if (key_buf_p != NULL)
+ *key_buf_p = ENTRY_KEY_BUF(entry_p);
+ if (key_size_p != NULL)
+ *key_size_p = entry_p->te_key_size;
+ if (data_buf_p != NULL) {
+ if (entry_p->te_data_size == 0)
+ *data_buf_p = NULL;
+ else {
+ if (table_p->ta_data_align == 0)
+ *data_buf_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ *data_buf_p = entry_data_buf(table_p, entry_p);
+ }
+ }
+ if (data_size_p != NULL)
+ *data_size_p = entry_p->te_data_size;
+ return TABLE_ERROR_NONE;
+}
+
+/******************************** table order ********************************/
+
+/*
+ * table_entry_t *table_order
+ *
+ * DESCRIPTION:
+ *
+ * Order a table by building an array of table entry pointers and then
+ * sorting this array using the qsort function. To retrieve the
+ * sorted entries, you can then use the table_entry routine to access
+ * each entry in order.
+ *
+ * NOTE: This routine is now thread safe in that two table_order calls
+ * can now happen at the same time, even on the same table.
+ *
+ * RETURNS:
+ *
+ * An allocated list of entry pointers which must be freed later.
+ * Returns null on error.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Pointer to the table that we are ordering.
+ *
+ * compare - Comparison function defined by the user. Its definition
+ * is at the top of the table.h file. If this is NULL then it will
+ * order the table my memcmp-ing the keys.
+ *
+ * num_entries_p - Pointer to an integer which, if not NULL, will
+ * contain the number of entries in the returned entry pointer array.
+ *
+ * error_p - Pointer to an integer which, if not NULL, will contain a
+ * table error code.
+ */
+table_entry_t **table_order(table_t * table_p, table_compare_t compare,
+ int *num_entries_p, int *error_p)
+{
+ table_entry_t *entry_p, **entries, **entries_p;
+ table_linear_t linear;
+ compare_t comp_func;
+ int error;
+
+ if (table_p == NULL) {
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_ARG_NULL;
+ return NULL;
+ }
+ if (table_p->ta_magic != TABLE_MAGIC) {
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_PNT;
+ return NULL;
+ }
+
+ /* there must be at least 1 element in the table for this to work */
+ if (table_p->ta_entry_n == 0) {
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_EMPTY;
+ return NULL;
+ }
+
+ entries = (table_entry_t **)
+ table_p->ta_malloc(table_p->opt_param,
+ table_p->ta_entry_n *sizeof(table_entry_t *));
+ if (entries == NULL) {
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_ALLOC;
+ return NULL;
+ }
+
+ /* get a pointer to all entries */
+ entry_p = first_entry(table_p, &linear);
+ if (entry_p == NULL) {
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_NOT_FOUND;
+ return NULL;
+ }
+
+ /* add all of the entries to the array */
+ for (entries_p = entries;
+ entry_p != NULL;
+ entry_p = next_entry(table_p, &linear, &error))
+ *entries_p++ = entry_p;
+ if (error != TABLE_ERROR_NOT_FOUND) {
+ if (error_p != NULL)
+ *error_p = error;
+ return NULL;
+ }
+
+ if (compare == NULL) {
+ /* this is regardless of the alignment */
+ comp_func = local_compare;
+ }
+ else if (table_p->ta_data_align == 0)
+ comp_func = external_compare;
+ else
+ comp_func = external_compare_align;
+ /* now qsort the entire entries array from first to last element */
+ split(entries, entries + table_p->ta_entry_n - 1, comp_func, compare,
+ table_p);
+
+ if (num_entries_p != NULL)
+ *num_entries_p = table_p->ta_entry_n;
+ if (error_p != NULL)
+ *error_p = TABLE_ERROR_NONE;
+ return entries;
+}
+
+/*
+ * int table_entry
+ *
+ * DESCRIPTION:
+ *
+ * Get information about an element. The element is one from the
+ * array returned by the table_order function. If any of the key/data
+ * pointers are NULL then they are ignored.
+ *
+ * RETURNS:
+ *
+ * Success - TABLE_ERROR_NONE
+ *
+ * Failure - Table error code.
+ *
+ * ARGUMENTS:
+ *
+ * table_p - Table structure pointer from which we are getting the
+ * element.
+ *
+ * entry_p - Pointer to a table entry from the array returned by the
+ * table_order function.
+ *
+ * key_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the storage of this entry that is allocated in the table. If an
+ * (int) is stored as this entry (for example) then key_buf_p should
+ * be (int **) i.e. the address of a (int *).
+ *
+ * key_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the key that is stored in the table.
+ *
+ * data_buf_p - Pointer which, if not NULL, will be set to the address
+ * of the data storage of this entry that is allocated in the table.
+ * If a (long) is stored as this entry data (for example) then
+ * data_buf_p should be (long **) i.e. the address of a (long *).
+ *
+ * data_size_p - Pointer to an integer which, if not NULL, will be set
+ * to the size of the data that is stored in the table.
+ */
+int table_entry_info(table_t * table_p, table_entry_t * entry_p,
+ void **key_buf_p, int *key_size_p,
+ void **data_buf_p, int *data_size_p)
+{
+ if (table_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (table_p->ta_magic != TABLE_MAGIC)
+ return TABLE_ERROR_PNT;
+ if (entry_p == NULL)
+ return TABLE_ERROR_ARG_NULL;
+ if (key_buf_p != NULL)
+ *key_buf_p = ENTRY_KEY_BUF(entry_p);
+ if (key_size_p != NULL)
+ *key_size_p = entry_p->te_key_size;
+ if (data_buf_p != NULL) {
+ if (entry_p->te_data_size == 0)
+ *data_buf_p = NULL;
+ else {
+ if (table_p->ta_data_align == 0)
+ *data_buf_p = ENTRY_DATA_BUF(table_p, entry_p);
+ else
+ *data_buf_p = entry_data_buf(table_p, entry_p);
+ }
+ }
+ if (data_size_p != NULL)
+ *data_size_p = entry_p->te_data_size;
+ return TABLE_ERROR_NONE;
+}
+
diff --git a/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_table.h b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_table.h
new file mode 100644
index 00000000..33438b2f
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/ssl/ssl_util_table.h
@@ -0,0 +1,152 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* _ _
+ * _ __ ___ ___ __| | ___ ___| | mod_ssl
+ * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+ * | | | | | | (_) | (_| | \__ \__ \ |
+ * |_| |_| |_|\___/ \__,_|___|___/___/_|
+ * |_____|
+ * ssl_util_table.h
+ * High Performance Hash Table Header
+ */
+
+/*
+ * Generic hash table defines
+ * Table 4.1.0 July-28-1998
+ *
+ * This library is a generic open hash table with buckets and
+ * linked lists. It is pretty high performance. Each element
+ * has a key and a data. The user indexes on the key to find the
+ * data.
+ *
+ * Copyright 1998 by Gray Watson <gray@letters.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose and without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies,
+ * and that the name of Gray Watson not be used in advertising or
+ * publicity pertaining to distribution of the document or software
+ * without specific, written prior permission.
+ *
+ * Gray Watson makes no representations about the suitability of the
+ * software described herein for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ */
+
+#ifndef __SSL_UTIL_TABLE_H__
+#define __SSL_UTIL_TABLE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/*
+ * To build a "key" in any of the below routines, pass in a pointer to
+ * the key and its size [i.e. sizeof(int), etc]. With any of the
+ * "key" or "data" arguments, if their size is < 0, it will do an
+ * internal strlen of the item and add 1 for the \0.
+ *
+ * If you are using firstkey() and nextkey() functions, be careful if,
+ * after starting your firstkey loop, you use delete or insert, it
+ * will not crash but may produce interesting results. If you are
+ * deleting from firstkey to NULL it will work fine.
+ */
+
+/* return types for table functions */
+#define TABLE_ERROR_NONE 1 /* no error from function */
+#define TABLE_ERROR_PNT 2 /* bad table pointer */
+#define TABLE_ERROR_ARG_NULL 3 /* buffer args were null */
+#define TABLE_ERROR_SIZE 4 /* size of data was bad */
+#define TABLE_ERROR_OVERWRITE 5 /* key exists and we cant overwrite */
+#define TABLE_ERROR_NOT_FOUND 6 /* key does not exist */
+#define TABLE_ERROR_ALLOC 7 /* memory allocation error */
+#define TABLE_ERROR_LINEAR 8 /* no linear access started */
+#define TABLE_ERROR_OPEN 9 /* could not open file */
+#define TABLE_ERROR_SEEK 10 /* could not seek to pos in file */
+#define TABLE_ERROR_READ 11 /* could not read from file */
+#define TABLE_ERROR_WRITE 12 /* could not write to file */
+#define TABLE_ERROR_EMPTY 13 /* table is empty */
+#define TABLE_ERROR_NOT_EMPTY 14 /* table contains data */
+#define TABLE_ERROR_ALIGNMENT 15 /* invalid alignment value */
+
+/*
+ * Table flags set with table_attr.
+ */
+
+/*
+ * Automatically adjust the number of table buckets on the fly.
+ * Whenever the number of entries gets above some threshold, the
+ * number of buckets is realloced to a new size and each entry is
+ * re-hashed. Although this may take some time when it re-hashes, the
+ * table will perform better over time.
+ */
+#define TABLE_FLAG_AUTO_ADJUST (1<<0)
+
+/*
+ * If the above auto-adjust flag is set, also adjust the number of
+ * table buckets down as we delete entries.
+ */
+#define TABLE_FLAG_ADJUST_DOWN (1<<1)
+
+/* structure to walk through the fields in a linear order */
+typedef struct {
+ unsigned int tl_magic; /* magic structure to ensure correct init */
+ unsigned int tl_bucket_c; /* where in the table buck array we are */
+ unsigned int tl_entry_c; /* in the bucket, which entry we are on */
+} table_linear_t;
+
+typedef int (*table_compare_t)(const void *key1, const int key1_size,
+ const void *data1, const int data1_size,
+ const void *key2, const int key2_size,
+ const void *data2, const int data2_size);
+
+#ifndef TABLE_PRIVATE
+typedef void table_t;
+typedef void table_entry_t;
+#endif
+
+/*
+ * Prototypes
+ */
+extern table_t *table_alloc(const unsigned int bucket_n, int *error_p, void *(*malloc_f)(void *opt_param, size_t size), void *(*calloc_f)(void *opt_param, size_t number, size_t size), void *(*realloc_f)(void *opt_param, void *ptr, size_t size), void (*free_f)(void *opt_param, void *ptr), void *opt_param);
+extern int table_attr(table_t *table_p, const int attr);
+extern int table_set_data_alignment(table_t *table_p, const int alignment);
+extern int table_clear(table_t *table_p);
+extern int table_free(table_t *table_p);
+extern int table_insert_kd(table_t *table_p, const void *key_buf, const int key_size, const void *data_buf, const int data_size, void **key_buf_p, void **data_buf_p, const char overwrite_b);
+extern int table_insert(table_t *table_p, const void *key_buf, const int key_size, const void *data_buf, const int data_size, void **data_buf_p, const char overwrite_b);
+extern int table_retrieve(table_t *table_p, const void *key_buf, const int key_size, void **data_buf_p, int *data_size_p);
+extern int table_delete(table_t *table_p, const void *key_buf, const int key_size, void **data_buf_p, int *data_size_p);
+extern int table_delete_first(table_t *table_p, void **key_buf_p, int *key_size_p, void **data_buf_p, int *data_size_p);
+extern int table_info(table_t *table_p, int *num_buckets_p, int *num_entries_p);
+extern int table_adjust(table_t *table_p, const int bucket_n);
+extern const char *table_strerror(const int error);
+extern int table_type_size(void);
+extern int table_first(table_t *table_p, void **key_buf_p, int *key_size_p, void **data_buf_p, int *data_size_p);
+extern int table_next(table_t *table_p, void **key_buf_p, int *key_size_p, void **data_buf_p, int *data_size_p);
+extern int table_this(table_t *table_p, void **key_buf_p, int *key_size_p, void **data_buf_p, int *data_size_p);
+extern int table_first_r(table_t *table_p, table_linear_t *linear_p, void **key_buf_p, int *key_size_p, void **data_buf_p, int *data_size_p);
+extern int table_next_r(table_t *table_p, table_linear_t *linear_p, void **key_buf_p, int *key_size_p, void **data_buf_p, int *data_size_p);
+extern int table_this_r(table_t *table_p, table_linear_t *linear_p, void **key_buf_p, int *key_size_p, void **data_buf_p, int *data_size_p);
+extern table_entry_t **table_order(table_t *table_p, table_compare_t compare, int *num_entries_p, int *error_p);
+extern int table_entry_info(table_t *table_p, table_entry_t *entry_p, void **key_buf_p, int *key_size_p, void **data_buf_p, int *data_size_p);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __SSL_UTIL_TABLE_H__ */
diff --git a/rubbos/app/httpd-2.0.64/modules/test/.deps b/rubbos/app/httpd-2.0.64/modules/test/.deps
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/.deps
diff --git a/rubbos/app/httpd-2.0.64/modules/test/.indent.pro b/rubbos/app/httpd-2.0.64/modules/test/.indent.pro
new file mode 100644
index 00000000..a9fbe9f9
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/rubbos/app/httpd-2.0.64/modules/test/Makefile b/rubbos/app/httpd-2.0.64/modules/test/Makefile
new file mode 100644
index 00000000..e5fdd0f2
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/Makefile
@@ -0,0 +1,8 @@
+top_srcdir = /bottlenecks/rubbos/app/httpd-2.0.64
+top_builddir = /bottlenecks/rubbos/app/httpd-2.0.64
+srcdir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/test
+builddir = /bottlenecks/rubbos/app/httpd-2.0.64/modules/test
+VPATH = /bottlenecks/rubbos/app/httpd-2.0.64/modules/test
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/test/Makefile.in b/rubbos/app/httpd-2.0.64/modules/test/Makefile.in
new file mode 100644
index 00000000..7c5c149d
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/Makefile.in
@@ -0,0 +1,3 @@
+# a modules Makefile has no explicit targets -- they will be defined by
+# whatever modules are enabled. just grab special.mk to deal with this.
+include $(top_srcdir)/build/special.mk
diff --git a/rubbos/app/httpd-2.0.64/modules/test/README b/rubbos/app/httpd-2.0.64/modules/test/README
new file mode 100644
index 00000000..f122368a
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/README
@@ -0,0 +1 @@
+test modules have moved to httpd-test/perl-framework/c-modules
diff --git a/rubbos/app/httpd-2.0.64/modules/test/config.m4 b/rubbos/app/httpd-2.0.64/modules/test/config.m4
new file mode 100644
index 00000000..337d0957
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/config.m4
@@ -0,0 +1,10 @@
+
+APACHE_MODPATH_INIT(test)
+
+APACHE_MODULE(optional_hook_export, example optional hook exporter, , , no)
+APACHE_MODULE(optional_hook_import, example optional hook importer, , , no)
+APACHE_MODULE(optional_fn_import, example optional function importer, , , no)
+APACHE_MODULE(optional_fn_export, example optional function exporter, , , no)
+APACHE_MODULE(bucketeer, buckets manipulation filter, , , no)
+
+APACHE_MODPATH_FINISH
diff --git a/rubbos/app/httpd-2.0.64/modules/test/mod_bucketeer.c b/rubbos/app/httpd-2.0.64/modules/test/mod_bucketeer.c
new file mode 100644
index 00000000..ec8436b0
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/mod_bucketeer.c
@@ -0,0 +1,181 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_bucketeer.c: split buckets whenever we find a control-char
+ *
+ * Written by Ian Holsman
+ *
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "apr_strings.h"
+#include "apr_general.h"
+#include "util_filter.h"
+#include "apr_buckets.h"
+#include "http_request.h"
+#include "http_protocol.h"
+
+static const char bucketeerFilterName[] = "BUCKETEER";
+module AP_MODULE_DECLARE_DATA bucketeer_module;
+
+typedef struct bucketeer_filter_config_t
+{
+ char bucketdelimiter;
+ char passdelimiter;
+ char flushdelimiter;
+} bucketeer_filter_config_t;
+
+
+static void *create_bucketeer_server_config(apr_pool_t *p, server_rec *s)
+{
+ bucketeer_filter_config_t *c = apr_pcalloc(p, sizeof *c);
+
+ c->bucketdelimiter = 0x02; /* ^B */
+ c->passdelimiter = 0x10; /* ^P */
+ c->flushdelimiter = 0x06; /* ^F */
+
+ return c;
+}
+
+typedef struct bucketeer_ctx_t
+{
+ apr_bucket_brigade *bb;
+} bucketeer_ctx_t;
+
+static apr_status_t bucketeer_out_filter(ap_filter_t *f,
+ apr_bucket_brigade *bb)
+{
+ apr_bucket *e;
+ request_rec *r = f->r;
+ bucketeer_ctx_t *ctx = f->ctx;
+ bucketeer_filter_config_t *c;
+
+ c = ap_get_module_config(r->server->module_config, &bucketeer_module);
+
+ /* If have a context, it means we've done this before successfully. */
+ if (!ctx) {
+ if (!r->content_type || strncmp(r->content_type, "text/", 5)) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ /* We're cool with filtering this. */
+ ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
+ ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+ apr_table_unset(f->r->headers_out, "Content-Length");
+ }
+
+ APR_BRIGADE_FOREACH(e, bb) {
+ const char *data;
+ apr_size_t len, i, lastpos;
+
+ if (APR_BUCKET_IS_EOS(e)) {
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, e);
+
+ /* Okay, we've seen the EOS.
+ * Time to pass it along down the chain.
+ */
+ return ap_pass_brigade(f->next, ctx->bb);
+ }
+
+ if (APR_BUCKET_IS_FLUSH(e)) {
+ /*
+ * Ignore flush buckets for the moment..
+ * we decide what to stream
+ */
+ continue;
+ }
+
+ if (APR_BUCKET_IS_METADATA(e)) {
+ /* metadata bucket */
+ apr_bucket *cpy;
+ apr_bucket_copy(e, &cpy);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, cpy);
+ continue;
+ }
+
+ /* read */
+ apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
+
+ if (len > 0) {
+ lastpos = 0;
+ for (i = 0; i < len; i++) {
+ if (data[i] == c->flushdelimiter ||
+ data[i] == c->bucketdelimiter ||
+ data[i] == c->passdelimiter) {
+ apr_bucket *p;
+ if (i - lastpos > 0) {
+ p = apr_bucket_pool_create(apr_pmemdup(f->r->pool,
+ &data[lastpos],
+ i - lastpos),
+ i - lastpos,
+ f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, p);
+ }
+ lastpos = i + 1;
+ if (data[i] == c->flushdelimiter) {
+ p = apr_bucket_flush_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, p);
+ }
+ if (data[i] == c->flushdelimiter ||
+ data[i] == c->passdelimiter) {
+ ap_pass_brigade(f->next, ctx->bb);
+ /* apr_brigade_cleanup(ctx->bb);*/
+ }
+ }
+ }
+ /* XXX: really should append this to the next 'real' bucket */
+ if (lastpos < i) {
+ apr_bucket *p;
+ p = apr_bucket_pool_create(apr_pmemdup(f->r->pool,
+ &data[lastpos],
+ i - lastpos),
+ i - lastpos,
+ f->r->pool,
+ f->c->bucket_alloc);
+ lastpos = i;
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, p);
+ }
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+static void register_hooks(apr_pool_t * p)
+{
+ ap_register_output_filter(bucketeerFilterName, bucketeer_out_filter,
+ NULL, AP_FTYPE_RESOURCE-1);
+}
+
+static const command_rec bucketeer_filter_cmds[] = {
+ {NULL}
+};
+
+module AP_MODULE_DECLARE_DATA bucketeer_module = {
+ STANDARD20_MODULE_STUFF,
+ NULL,
+ NULL,
+ create_bucketeer_server_config,
+ NULL,
+ bucketeer_filter_cmds,
+ register_hooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/test/mod_bucketeer.dsp b/rubbos/app/httpd-2.0.64/modules/test/mod_bucketeer.dsp
new file mode 100644
index 00000000..0a1a1d41
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/mod_bucketeer.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="mod_bucketeer" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_bucketeer - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_bucketeer.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_bucketeer.mak" CFG="mod_bucketeer - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_bucketeer - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_bucketeer - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_bucketeer - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /Zi /O2 /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_bucketeer_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:"Release/mod_bucketeer.so" /base:@..\..\os\win32\BaseAddr.ref,mod_bucketeer.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Release/mod_bucketeer.so" /base:@..\..\os\win32\BaseAddr.ref,mod_bucketeer.so /opt:ref
+
+!ELSEIF "$(CFG)" == "mod_bucketeer - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_bucketeer_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_bucketeer.so" /base:@..\..\os\win32\BaseAddr.ref,mod_bucketeer.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:"Debug/mod_bucketeer.so" /base:@..\..\os\win32\BaseAddr.ref,mod_bucketeer.so
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_bucketeer - Win32 Release"
+# Name "mod_bucketeer - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_bucketeer.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_bucketeer.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\win32ver.awk
+
+!IF "$(CFG)" == "mod_bucketeer - Win32 Release"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_bucketeer.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_bucketeer.so "bucketeer_module for Apache" ../../include/ap_release.h > .\mod_bucketeer.rc
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mod_bucketeer - Win32 Debug"
+
+# PROP Ignore_Default_Tool 1
+# Begin Custom Build - Creating Version Resource
+InputPath=..\..\build\win32\win32ver.awk
+
+".\mod_bucketeer.rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ awk -f ../../build/win32/win32ver.awk mod_bucketeer.so "bucketeer_module for Apache" ../../include/ap_release.h > .\mod_bucketeer.rc
+
+# End Custom Build
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
diff --git a/rubbos/app/httpd-2.0.64/modules/test/mod_optional_fn_export.c b/rubbos/app/httpd-2.0.64/modules/test/mod_optional_fn_export.c
new file mode 100644
index 00000000..27db2cc1
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/mod_optional_fn_export.c
@@ -0,0 +1,48 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "mod_optional_fn_export.h"
+
+/* The alert will note a strange mirror-image style resemblance to
+ * mod_optional_hook_import.c. Yes, I _did_ mean import. Think about it.
+ */
+
+static int TestOptionalFn(const char *szStr)
+{
+ ap_log_error(APLOG_MARK,APLOG_ERR,OK,NULL,
+ "Optional function test said: %s",szStr);
+
+ return OK;
+}
+
+static void ExportRegisterHooks(apr_pool_t *p)
+{
+ APR_REGISTER_OPTIONAL_FN(TestOptionalFn);
+}
+
+module AP_MODULE_DECLARE_DATA optional_fn_export_module=
+{
+ STANDARD20_MODULE_STUFF,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ExportRegisterHooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/test/mod_optional_fn_export.h b/rubbos/app/httpd-2.0.64/modules/test/mod_optional_fn_export.h
new file mode 100644
index 00000000..f30c0b6c
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/mod_optional_fn_export.h
@@ -0,0 +1,19 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_optional.h"
+
+APR_DECLARE_OPTIONAL_FN(int,TestOptionalFn,(const char *));
diff --git a/rubbos/app/httpd-2.0.64/modules/test/mod_optional_fn_import.c b/rubbos/app/httpd-2.0.64/modules/test/mod_optional_fn_import.c
new file mode 100644
index 00000000..f793fa44
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/mod_optional_fn_import.c
@@ -0,0 +1,55 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "mod_optional_fn_export.h"
+#include "http_protocol.h"
+
+/* The alert will note a strange mirror-image style resemblance to
+ * mod_optional_hook_export.c. Yes, I _did_ mean export. Think about it.
+ */
+
+static APR_OPTIONAL_FN_TYPE(TestOptionalFn) *pfn;
+
+static int ImportLogTransaction(request_rec *r)
+{
+ if(pfn)
+ return pfn(r->the_request);
+ return DECLINED;
+}
+
+static void ImportFnRetrieve(void)
+{
+ pfn=APR_RETRIEVE_OPTIONAL_FN(TestOptionalFn);
+}
+
+static void ImportRegisterHooks(apr_pool_t *p)
+{
+ ap_hook_log_transaction(ImportLogTransaction,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_optional_fn_retrieve(ImportFnRetrieve,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA optional_fn_import_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ImportRegisterHooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/test/mod_optional_hook_export.c b/rubbos/app/httpd-2.0.64/modules/test/mod_optional_hook_export.c
new file mode 100644
index 00000000..b0e6fd53
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/mod_optional_hook_export.c
@@ -0,0 +1,44 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "mod_optional_hook_export.h"
+#include "http_protocol.h"
+
+AP_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(int,optional_hook_test,(const char *szStr),
+ (szStr),OK,DECLINED)
+
+static int ExportLogTransaction(request_rec *r)
+{
+ return ap_run_optional_hook_test(r->the_request);
+}
+
+static void ExportRegisterHooks(apr_pool_t *p)
+{
+ ap_hook_log_transaction(ExportLogTransaction,NULL,NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA optional_hook_export_module =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ExportRegisterHooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/test/mod_optional_hook_export.h b/rubbos/app/httpd-2.0.64/modules/test/mod_optional_hook_export.h
new file mode 100644
index 00000000..7af68620
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/mod_optional_hook_export.h
@@ -0,0 +1,24 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MOD_OPTIONAL_HOOK_EXPORT_H
+#define MOD_OPTOPNAL_HOOK_EXPORT_H
+
+#include "ap_config.h"
+
+AP_DECLARE_HOOK(int,optional_hook_test,(const char *))
+
+#endif /* def MOD_OPTIONAL_HOOK_EXPORT_H */
diff --git a/rubbos/app/httpd-2.0.64/modules/test/mod_optional_hook_import.c b/rubbos/app/httpd-2.0.64/modules/test/mod_optional_hook_import.c
new file mode 100644
index 00000000..cd485413
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/mod_optional_hook_import.c
@@ -0,0 +1,45 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "mod_optional_hook_export.h"
+
+static int ImportOptionalHookTestHook(const char *szStr)
+{
+ ap_log_error(APLOG_MARK,APLOG_ERR,OK,NULL,"Optional hook test said: %s",
+ szStr);
+
+ return OK;
+}
+
+static void ImportRegisterHooks(apr_pool_t *p)
+{
+ AP_OPTIONAL_HOOK(optional_hook_test,ImportOptionalHookTestHook,NULL,
+ NULL,APR_HOOK_MIDDLE);
+}
+
+module AP_MODULE_DECLARE_DATA optional_hook_import_module=
+{
+ STANDARD20_MODULE_STUFF,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ImportRegisterHooks
+};
diff --git a/rubbos/app/httpd-2.0.64/modules/test/modules.mk b/rubbos/app/httpd-2.0.64/modules/test/modules.mk
new file mode 100644
index 00000000..ceb52a1b
--- /dev/null
+++ b/rubbos/app/httpd-2.0.64/modules/test/modules.mk
@@ -0,0 +1,3 @@
+DISTCLEAN_TARGETS = modules.mk
+static =
+shared =