Cross-compiling for mips under x86_64 with MSA support - mips

I’m trying to build a toolchain for cross-compiling for MIPS under x86 with crosstool-ng.
According to GCC 7.2.0 documentation it must support msa instructions. But when I’m building an application with assembly msa instructions using buit toolchain the application doesn’t execute and fails with "Illegal Instruction". What am I doing wrong?
I'm building the application with the following cflags: -mmsa, -mhard-float -mfp64, but without -mnan=2008, because in this case I got a problem with missing gnu/stubs-o32_hard_2008.h
The text of .config file for croosstool-ng:
#
# Automatically generated file; DO NOT EDIT.
# crosstool-NG crosstool-ng-1.23.0-288-gadaa3a5 Configuration
#
CT_CONFIGURE_has_static_link=y
CT_CONFIGURE_has_wget=y
CT_CONFIGURE_has_stat_flavor_GNU=y
CT_CONFIGURE_has_make_3_81_or_newer=y
CT_CONFIGURE_has_autoconf_2_63_or_newer=y
CT_CONFIGURE_has_autoreconf_2_63_or_newer=y
CT_CONFIGURE_has_automake_1_15_or_newer=y
CT_CONFIGURE_has_gnu_m4_1_4_12_or_newer=y
CT_CONFIGURE_has_git=y
CT_MODULES=y
#
# Paths and misc options
#
#
# crosstool-NG behavior
#
# CT_OBSOLETE is not set
# CT_EXPERIMENTAL is not set
# CT_DEBUG_CT is not set
#
# Paths
#
CT_LOCAL_TARBALLS_DIR="${HOME}/src"
CT_SAVE_TARBALLS=y
CT_WORK_DIR="${CT_TOP_DIR}/.build"
CT_BUILD_TOP_DIR="${CT_WORK_DIR}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}"
CT_PREFIX_DIR="${CT_PREFIX:-${HOME}/x-tools}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}"
CT_RM_RF_PREFIX_DIR=y
CT_REMOVE_DOCS=y
CT_PREFIX_DIR_RO=y
CT_STRIP_HOST_TOOLCHAIN_EXECUTABLES=y
# CT_STRIP_TARGET_TOOLCHAIN_EXECUTABLES is not set
#
# Downloading
#
CT_DOWNLOAD_AGENT_WGET=y
# CT_DOWNLOAD_AGENT_NONE is not set
# CT_FORBID_DOWNLOAD is not set
# CT_FORCE_DOWNLOAD is not set
CT_CONNECT_TIMEOUT=10
CT_DOWNLOAD_WGET_OPTIONS="--passive-ftp --tries=3 -nc --progress=dot:binary"
# CT_ONLY_DOWNLOAD is not set
# CT_USE_MIRROR is not set
CT_VERIFY_DOWNLOAD_DIGEST=y
CT_VERIFY_DOWNLOAD_DIGEST_SHA512=y
CT_MAKE_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
# CT_VERIFY_DOWNLOAD_DIGEST_SHA256 is not set
# CT_VERIFY_DOWNLOAD_DIGEST_SHA1 is not set
# CT_VERIFY_DOWNLOAD_DIGEST_MD5 is not set
CT_VERIFY_DOWNLOAD_DIGEST_ALG="sha512"
# CT_VERIFY_DOWNLOAD_SIGNATURE is not set
#
# Extracting
#
# CT_FORCE_EXTRACT is not set
CT_OVERRIDE_CONFIG_GUESS_SUB=y
# CT_ONLY_EXTRACT is not set
CT_PATCH_BUNDLED=y
# CT_PATCH_BUNDLED_LOCAL is not set
CT_PATCH_ORDER="bundled"
#
# Build behavior
#
CT_PARALLEL_JOBS=0
CT_LOAD=""
CT_USE_PIPES=y
CT_EXTRA_CFLAGS_FOR_BUILD=""
CT_EXTRA_LDFLAGS_FOR_BUILD=""
CT_EXTRA_CFLAGS_FOR_HOST=""
CT_EXTRA_LDFLAGS_FOR_HOST=""
# CT_CONFIG_SHELL_SH is not set
# CT_CONFIG_SHELL_ASH is not set
CT_CONFIG_SHELL_BASH=y
# CT_CONFIG_SHELL_CUSTOM is not set
CT_CONFIG_SHELL="${bash}"
#
# Logging
#
# CT_LOG_ERROR is not set
# CT_LOG_WARN is not set
# CT_LOG_INFO is not set
CT_LOG_EXTRA=y
# CT_LOG_ALL is not set
# CT_LOG_DEBUG is not set
CT_LOG_LEVEL_MAX="EXTRA"
# CT_LOG_SEE_TOOLS_WARN is not set
CT_LOG_PROGRESS_BAR=y
CT_LOG_TO_FILE=y
CT_MAKE_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
СT_LOG_FILE_COMPRESS=y
#
# Target options
#
# CT_ARCH_ALPHA is not set
# CT_ARCH_ARM is not set
# CT_ARCH_AVR is not set
# CT_ARCH_M68K is not set
CT_ARCH_MIPS=y
# CT_ARCH_NIOS2 is not set
# CT_ARCH_POWERPC is not set
# CT_ARCH_S390 is not set
# CT_ARCH_SH is not set
# CT_ARCH_SPARC is not set
# CT_ARCH_X86 is not set
# CT_ARCH_XTENSA is not set
CT_ARCH="mips"
CT_ARCH_CHOICE_KSYM="MIPS"
CT_ARCH_TUNE=""
CT_ARCH_MIPS_PKG_KSYM=""
CT_ARCH_mips_o32=y
CT_ARCH_mips_ABI="32"
CT_ARCH_SUFFIX=""
#
# Generic target options
#
# CT_MULTILIB is not set
CT_DEMULTILIB=y
CT_ARCH_USE_MMU=y
CT_ARCH_SUPPORTS_EITHER_ENDIAN=y
CT_ARCH_DEFAULT_BE=y
CT_ARCH_BE=y
# CT_ARCH_LE is not set
# CT_ARCH_BE_LE is not set
# CT_ARCH_LE_BE is not set
CT_ARCH_ENDIAN="big"
CT_ARCH_SUPPORTS_32=y
CT_ARCH_SUPPORTS_64=y
CT_ARCH_DEFAULT_32=y
CT_ARCH_BITNESS=32
CT_ARCH_32=y
# CT_ARCH_64 is not set
#
# Target optimisations
#
CT_ARCH_SUPPORTS_WITH_ARCH=y
CT_ARCH_SUPPORTS_WITH_TUNE=y
CT_ARCH_SUPPORTS_WITH_FLOAT=y
CT_ARCH_ARCH="mips32r5"
# CT_ARCH_FLOAT_AUTO is not set
CT_ARCH_FLOAT_HW=y
# CT_ARCH_FLOAT_SW is not set
CT_TARGET_CFLAGS=""
CT_TARGET_LDFLAGS=""
CT_ARCH_FLOAT="hard"
#
# Toolchain options
#
#
# General toolchain options
#
CT_FORCE_SYSROOT=y
CT_USE_SYSROOT=y
CT_SYSROOT_NAME="sysroot"
CT_SYSROOT_DIR_PREFIX=""
CT_WANTS_STATIC_LINK=y
CT_WANTS_STATIC_LINK_CXX=y
# CT_STATIC_TOOLCHAIN is not set
CT_SHOW_CT_VERSION=y
CT_TOOLCHAIN_PKGVERSION=""
CT_TOOLCHAIN_BUGURL=""
#
# Tuple completion and aliasing
#
CT_TARGET_VENDOR="unknown"
CT_TARGET_ALIAS_SED_EXPR=""
CT_TARGET_ALIAS=""
#
# Toolchain type
#
CT_CROSS=y
# CT_CANADIAN is not set
CT_TOOLCHAIN_TYPE="cross"
#
# Build system
#
CT_BUILD=""
CT_BUILD_PREFIX=""
CT_BUILD_SUFFIX=""
#
# Misc options
#
# CT_TOOLCHAIN_ENABLE_NLS is not set
#
# Operating System
#
CT_KERNEL_SUPPORTS_SHARED_LIBS=y
# CT_KERNEL_BARE_METAL is not set
CT_KERNEL_LINUX=y
CT_KERNEL="linux"
CT_KERNEL_CHOICE_KSYM="LINUX"
CT_KERNEL_LINUX_PKG_KSYM="LINUX"
CT_LINUX_DIR_NAME="linux"
CT_LINUX_PKG_NAME="linux"
CT_LINUX_SRC_RELEASE=y
# CT_LINUX_V_4_14 is not set
# CT_LINUX_V_4_13 is not set
# CT_LINUX_V_4_12 is not set
# CT_LINUX_V_4_11 is not set
# CT_LINUX_V_4_10 is not set
# CT_LINUX_V_4_9 is not set
CT_LINUX_V_4_4=y
# CT_LINUX_V_4_1 is not set
# CT_LINUX_V_3_16 is not set
# CT_LINUX_V_3_13 is not set
# CT_LINUX_V_3_12 is not set
# CT_LINUX_V_3_10 is not set
# CT_LINUX_V_3_4 is not set
# CT_LINUX_V_3_2 is not set
CT_LINUX_VERSION="4.4.103"
CT_LINUX_MIRRORS="$(CT_Mirrors kernel.org linux ${CT_LINUX_VERSION})"
CT_LINUX_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_LINUX_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_LINUX_ARCHIVE_FORMATS=".tar.xz .tar.gz"
CT_LINUX_SIGNATURE_FORMAT="unpacked/.sign"
CT_LINUX_3_2_or_later=y
CT_LINUX_REQUIRE_3_2_or_later=y
CT_KERNEL_LINUX_VERBOSITY_0=y
# CT_KERNEL_LINUX_VERBOSITY_1 is not set
# CT_KERNEL_LINUX_VERBOSITY_2 is not set
CT_KERNEL_LINUX_VERBOSE_LEVEL=0
CT_KERNEL_LINUX_INSTALL_CHECK=y
#
# Common kernel options
#
CT_SHARED_LIBS=y
#
# Binary utilities
#
CT_ARCH_BINFMT_ELF=y
CT_BINUTILS_BINUTILS=y
CT_BINUTILS="binutils"
CT_BINUTILS_CHOICE_KSYM="BINUTILS"
CT_BINUTILS_BINUTILS_PKG_KSYM="BINUTILS"
CT_BINUTILS_DIR_NAME="binutils"
CT_BINUTILS_USE_GNU=y
CT_BINUTILS_USE="BINUTILS"
CT_BINUTILS_PKG_NAME="binutils"
CT_BINUTILS_SRC_RELEASE=y
# CT_BINUTILS_V_2_29_1 is not set
# CT_BINUTILS_V_2_28_1 is not set
# CT_BINUTILS_V_2_27 is not set
CT_BINUTILS_V_2_26_1=y
CT_BINUTILS_VERSION="2.26.1"
CT_BINUTILS_MIRRORS="$(CT_Mirrors GNU binutils) $(CT_Mirrors sourceware binutils/releases)"
CT_BINUTILS_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_BINUTILS_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_BINUTILS_ARCHIVE_FORMATS=".tar.bz2 .tar.gz"
CT_BINUTILS_SIGNATURE_FORMAT="packed/.sig"
CT_BINUTILS_2_25_or_later=y
CT_BINUTILS_REQUIRE_2_25_or_later=y
CT_BINUTILS_2_23_or_later=y
#
# GNU binutils
#
CT_BINUTILS_HAS_HASH_STYLE=y
CT_BINUTILS_HAS_GOLD=y
CT_BINUTILS_HAS_PLUGINS=y
CT_BINUTILS_HAS_PKGVERSION_BUGURL=y
CT_BINUTILS_FORCE_LD_BFD_DEFAULT=y
CT_BINUTILS_LINKER_LD=y
CT_BINUTILS_LINKERS_LIST="ld"
CT_BINUTILS_LINKER_DEFAULT="bfd"
# CT_BINUTILS_PLUGINS is not set
CT_BINUTILS_EXTRA_CONFIG_ARRAY=""
# CT_BINUTILS_FOR_TARGET is not set
#
# C-library
#
CT_LIBC_GLIBC=y
# CT_LIBC_UCLIBC is not set
CT_LIBC="glibc"
CT_LIBC_CHOICE_KSYM="GLIBC"
CT_THREADS="nptl"
CT_LIBC_GLIBC_PKG_KSYM="GLIBC"
CT_GLIBC_DIR_NAME="glibc"
CT_GLIBC_USE_GNU=y
CT_GLIBC_USE="GLIBC"
CT_GLIBC_PKG_NAME="glibc"
CT_GLIBC_SRC_RELEASE=y
CT_GLIBC_V_2_26=y
# CT_GLIBC_V_2_25 is not set
# CT_GLIBC_V_2_24 is not set
# CT_GLIBC_V_2_23 is not set
# CT_GLIBC_V_2_19 is not set
# CT_GLIBC_V_2_17 is not set
# CT_GLIBC_V_2_12_1 is not set
CT_GLIBC_VERSION="2.26"
CT_GLIBC_MIRRORS="$(CT_Mirrors GNU glibc)"
CT_GLIBC_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_GLIBC_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_GLIBC_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz"
CT_GLIBC_SIGNATURE_FORMAT="packed/.sig"
CT_GLIBC_2_26_or_later=y
CT_GLIBC_2_26_or_older=y
CT_GLIBC_2_24_or_later=y
CT_GLIBC_2_23_or_later=y
CT_GLIBC_2_20_or_later=y
CT_GLIBC_2_17_or_later=y
CT_GLIBC_2_14_or_later=y
CT_GLIBC_DEP_KERNEL_HEADERS_VERSION=y
CT_GLIBC_DEP_BINUTILS=y
CT_GLIBC_DEP_GCC=y
CT_GLIBC_HAS_LIBIDN_ADDON=y
# CT_GLIBC_USE_LIBIDN_ADDON is not set
CT_GLIBC_NO_SPARC_V8=y
CT_GLIBC_HAS_OBSOLETE_RPC=y
CT_GLIBC_EXTRA_CONFIG_ARRAY=""
CT_GLIBC_CONFIGPARMS=""
CT_GLIBC_EXTRA_CFLAGS=""
CT_GLIBC_ENABLE_OBSOLETE_RPC=y
# CT_GLIBC_DISABLE_VERSIONING is not set
CT_GLIBC_OLDEST_ABI=""
CT_GLIBC_FORCE_UNWIND=y
# CT_GLIBC_LOCALES is not set
# CT_GLIBC_KERNEL_VERSION_NONE is not set
CT_GLIBC_KERNEL_VERSION_AS_HEADERS=y
# CT_GLIBC_KERNEL_VERSION_CHOSEN is not set
CT_GLIBC_MIN_KERNEL="4.4.103"
CT_LIBC_SUPPORT_THREADS_ANY=y
CT_LIBC_SUPPORT_THREADS_NATIVE=y
#
# Common C library options
#
CT_THREADS_NATIVE=y
# CT_CREATE_LDSO_CONF is not set
CT_LIBC_XLDD=y
#
# C compiler
#
CT_CC_CORE_PASSES_NEEDED=y
CT_CC_CORE_PASS_1_NEEDED=y
CT_CC_CORE_PASS_2_NEEDED=y
CT_CC_SUPPORT_CXX=y
CT_CC_SUPPORT_FORTRAN=y
CT_CC_SUPPORT_ADA=y
CT_CC_SUPPORT_OBJC=y
CT_CC_SUPPORT_OBJCXX=y
CT_CC_SUPPORT_GOLANG=y
CT_CC_GCC=y
CT_CC="gcc"
CT_CC_CHOICE_KSYM="GCC"
CT_CC_GCC_PKG_KSYM="GCC"
CT_GCC_DIR_NAME="gcc"
CT_GCC_USE_GNU=y
CT_GCC_USE="GCC"
CT_GCC_PKG_NAME="gcc"
CT_GCC_SRC_RELEASE=y
CT_GCC_V_7_2_0=y
# CT_GCC_V_6_4_0 is not set
# CT_GCC_V_5_5_0 is not set
# CT_GCC_V_4_9_4 is not set
CT_GCC_VERSION="7.2.0"
CT_GCC_MIRRORS="$(CT_Mirrors GNU gcc/gcc-${CT_GCC_VERSION}) $(CT_Mirrors sourceware gcc/releases/gcc-${CT_GCC_VERSION})"
CT_GCC_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_GCC_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_GCC_ARCHIVE_FORMATS=".tar.xz .tar.gz"
CT_GCC_SIGNATURE_FORMAT=""
CT_GCC_7_or_later=y
CT_GCC_6_or_later=y
CT_GCC_5_or_later=y
CT_GCC_4_9_2_or_later=y
CT_GCC_4_9_or_later=y
CT_GCC_REQUIRE_4_9_or_later=y
CT_GCC_4_8_or_later=y
CT_CC_GCC_HAS_LIBMPX=y
CT_CC_GCC_ENABLE_CXX_FLAGS=""
CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY=""
CT_CC_GCC_EXTRA_CONFIG_ARRAY=""
CT_CC_GCC_STATIC_LIBSTDCXX=y
# CT_CC_GCC_SYSTEM_ZLIB is not set
CT_CC_GCC_CONFIG_TLS=m
#
# Optimisation features
#
CT_CC_GCC_USE_GRAPHITE=y
CT_CC_GCC_USE_LTO=y
#
# Settings for libraries running on target
#
CT_CC_GCC_ENABLE_TARGET_OPTSPACE=y
# CT_CC_GCC_LIBMUDFLAP is not set
# CT_CC_GCC_LIBGOMP is not set
# CT_CC_GCC_LIBSSP is not set
# CT_CC_GCC_LIBQUADMATH is not set
# CT_CC_GCC_LIBSANITIZER is not set
#
# Misc. obscure options.
#
CT_CC_CXA_ATEXIT=y
# CT_CC_GCC_DISABLE_PCH is not set
CT_CC_GCC_SJLJ_EXCEPTIONS=m
CT_CC_GCC_LDBL_128=m
# CT_CC_GCC_BUILD_ID is not set
CT_CC_GCC_LNK_HASH_STYLE_DEFAULT=y
# CT_CC_GCC_LNK_HASH_STYLE_SYSV is not set
# CT_CC_GCC_LNK_HASH_STYLE_GNU is not set
# CT_CC_GCC_LNK_HASH_STYLE_BOTH is not set
CT_CC_GCC_LNK_HASH_STYLE=""
CT_CC_GCC_DEC_FLOAT_AUTO=y
# CT_CC_GCC_DEC_FLOAT_BID is not set
# CT_CC_GCC_DEC_FLOAT_DPD is not set
# CT_CC_GCC_DEC_FLOATS_NO is not set
CT_CC_GCC_HAS_ARCH_OPTIONS=y
#
# archictecture-specific options
#
CT_CC_GCC_mips_llsc=m
CT_CC_GCC_mips_synci=m
# CT_CC_GCC_mips_plt is not set
#
# Additional supported languages:
#
CT_CC_LANG_CXX=y
CT_CC_LANG_FORTRAN=y
#
# Debug facilities
#
# CT_DEBUG_DUMA is not set
CT_DEBUG_GDB=y
CT_DEBUG_GDB_PKG_KSYM="GDB"
CT_GDB_DIR_NAME="gdb"
CT_GDB_USE_GNU=y
CT_GDB_USE="GDB"
CT_GDB_PKG_NAME="gdb"
CT_GDB_SRC_RELEASE=y
CT_GDB_V_8_0_1=y
# CT_GDB_V_7_12_1 is not set
# CT_GDB_V_7_11_1 is not set
CT_GDB_VERSION="8.0.1"
CT_GDB_MIRRORS="$(CT_Mirrors GNU gdb) $(CT_Mirrors sourceware gdb/releases)"
CT_GDB_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_GDB_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_GDB_ARCHIVE_FORMATS=".tar.xz .tar.gz"
CT_GDB_SIGNATURE_FORMAT=""
CT_GDB_8_0_or_later=y
CT_GDB_7_2_or_later=y
CT_GDB_7_0_or_later=y
CT_GDB_CROSS=y
# CT_GDB_CROSS_STATIC is not set
# CT_GDB_CROSS_SIM is not set
CT_GDB_CROSS_PYTHON=y
CT_GDB_CROSS_PYTHON_BINARY=""
CT_GDB_CROSS_EXTRA_CONFIG_ARRAY=""
# CT_GDB_NATIVE is not set
CT_GDB_GDBSERVER=y
CT_GDB_GDBSERVER_HAS_IPA_LIB=y
# CT_GDB_GDBSERVER_STATIC_LIBSTDCXX is not set
# CT_GDB_GDBSERVER_BUILD_IPA_LIB is not set
CT_GDB_HAS_PKGVERSION_BUGURL=y
CT_GDB_HAS_PYTHON=y
CT_GDB_INSTALL_GDBINIT=y
# CT_DEBUG_LTRACE is not set
# CT_DEBUG_STRACE is not set
#
# Companion libraries
#
# CT_COMPLIBS_CHECK is not set
# CT_COMP_LIBS_CLOOG is not set
CT_COMP_LIBS_EXPAT=y
CT_COMP_LIBS_EXPAT_PKG_KSYM="EXPAT"
CT_EXPAT_DIR_NAME="expat"
CT_EXPAT_PKG_NAME="expat"
CT_EXPAT_SRC_RELEASE=y
CT_EXPAT_V_2_2_5=y
CT_EXPAT_VERSION="2.2.5"
CT_EXPAT_MIRRORS="http://downloads.sourceforge.net/project/expat/expat/${CT_EXPAT_VERSION}"
CT_EXPAT_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_EXPAT_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_EXPAT_ARCHIVE_FORMATS=".tar.bz2"
CT_EXPAT_SIGNATURE_FORMAT=""
CT_COMP_LIBS_GETTEXT=y
CT_COMP_LIBS_GETTEXT_PKG_KSYM="GETTEXT"
CT_GETTEXT_DIR_NAME="gettext"
CT_GETTEXT_PKG_NAME="gettext"
CT_GETTEXT_SRC_RELEASE=y
CT_GETTEXT_V_0_19_8_1=y
CT_GETTEXT_VERSION="0.19.8.1"
CT_GETTEXT_MIRRORS="$(CT_Mirrors GNU gettext)"
CT_GCC_SRC_RELEASE=y
CT_GCC_V_7_2_0=y
CT_GETTEXT_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_GETTEXT_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_GETTEXT_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.gz"
CT_GETTEXT_SIGNATURE_FORMAT="packed/.sig"
CT_COMP_LIBS_GMP=y
CT_COMP_LIBS_GMP_PKG_KSYM="GMP"
CT_GMP_DIR_NAME="gmp"
CT_GMP_PKG_NAME="gmp"
CT_GMP_SRC_RELEASE=y
CT_GMP_V_6_1_2=y
CT_GMP_VERSION="6.1.2"
CT_GMP_MIRRORS="https://gmplib.org/download/gmp https://gmplib.org/download/gmp/archive $(CT_Mirrors GNU gmp)"
CT_GMP_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_GMP_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_GMP_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2"
CT_GMP_SIGNATURE_FORMAT="packed/.sig"
CT_GMP_5_1_or_later=y
CT_COMP_LIBS_ISL=y
CT_COMP_LIBS_ISL_PKG_KSYM="ISL"
CT_ISL_DIR_NAME="isl"
CT_ISL_PKG_NAME="isl"
CT_ISL_SRC_RELEASE=y
CT_GCC_SRC_RELEASE=y
CT_GCC_V_7_2_0=y
CT_ISL_V_0_18=y
# CT_ISL_V_0_17_1 is not set
# CT_ISL_V_0_16_1 is not set
# CT_ISL_V_0_15 is not set
CT_ISL_VERSION="0.18"
CT_ISL_MIRRORS="http://isl.gforge.inria.fr"
CT_ISL_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_ISL_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_ISL_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz"
CT_ISL_SIGNATURE_FORMAT=""
CT_ISL_0_15_or_later=y
CT_ISL_REQUIRE_0_15_or_later=y
CT_ISL_0_14_or_later=y
CT_ISL_REQUIRE_0_14_or_later=y
CT_ISL_0_13_or_later=y
CT_ISL_0_12_or_later=y
CT_ISL_REQUIRE_0_12_or_later=y
# CT_COMP_LIBS_LIBELF is not set
CT_COMP_LIBS_LIBICONV=y
CT_COMP_LIBS_LIBICONV_PKG_KSYM="LIBICONV"
CT_LIBICONV_DIR_NAME="libiconv"
CT_LIBICONV_PKG_NAME="libiconv"
CT_LIBICONV_SRC_RELEASE=y
CT_LIBICONV_V_1_15=y
CT_LIBICONV_VERSION="1.15"
CT_LIBICONV_MIRRORS="$(CT_Mirrors GNU libiconv)"
CT_LIBICONV_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_LIBICONV_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_LIBICONV_ARCHIVE_FORMATS=".tar.gz"
CT_LIBICONV_SIGNATURE_FORMAT="packed/.sig"
CT_COMP_LIBS_MPC=y
CT_COMP_LIBS_MPC_PKG_KSYM="MPC"
CT_MPC_DIR_NAME="mpc"
CT_MPC_PKG_NAME="mpc"
CT_MPC_SRC_RELEASE=y
CT_MPC_V_1_0_3=y
CT_GCC_SRC_RELEASE=y
CT_GCC_V_7_2_0=y
CT_MPC_VERSION="1.0.3"
CT_MPC_MIRRORS="http://www.multiprecision.org/mpc/download $(CT_Mirrors GNU mpc)"
CT_MPC_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_MPC_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_MPC_ARCHIVE_FORMATS=".tar.gz"
CT_MPC_SIGNATURE_FORMAT="packed/.sig"
CT_COMP_LIBS_MPFR=y
CT_COMP_LIBS_MPFR_PKG_KSYM="MPFR"
CT_MPFR_DIR_NAME="mpfr"
CT_MPFR_PKG_NAME="mpfr"
CT_MPFR_SRC_RELEASE=y
CT_MPFR_V_3_1_6=y
CT_MPFR_VERSION="3.1.6"
CT_MPFR_MIRRORS="http://www.mpfr.org/mpfr-${CT_MPFR_VERSION} $(CT_Mirrors GNU mpfr)"
CT_MPFR_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_MPFR_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_MPFR_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz .zip"
CT_MPFR_SIGNATURE_FORMAT="packed/.asc"
CT_COMP_LIBS_NCURSES=y
CT_COMP_LIBS_NCURSES_PKG_KSYM="NCURSES"
CT_NCURSES_DIR_NAME="ncurses"
CT_NCURSES_PKG_NAME="ncurses"
CT_NCURSES_SRC_RELEASE=y
CT_NCURSES_V_6_0=y
CT_NCURSES_VERSION="6.0"
CT_NCURSES_MIRRORS="ftp://invisible-island.net/ncurses $(CT_Mirrors GNU ncurses)"
CT_NCURSES_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_NCURSES_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_NCURSES_ARCHIVE_FORMATS=".tar.gz"
CT_NCURSES_SIGNATURE_FORMAT="packed/.sig"
CT_NCURSES_HOST_CONFIG_ARGS=""
CT_NCURSES_HOST_DISABLE_DB=y
CT_NCURSES_HOST_FALLBACKS="linux,xterm,xterm-color,xterm-256color,vt100"
CT_NCURSES_TARGET_CONFIG_ARGS=""
# CT_NCURSES_TARGET_DISABLE_DB is not set
CT_NCURSES_TARGET_FALLBACKS=""
# CT_COMP_LIBS_ZLIB is not set
CT_LIBICONV_NEEDED=y
CT_GETTEXT_NEEDED=y
CT_GMP_NEEDED=y
CT_MPFR_NEEDED=y
CT_ISL_NEEDED=y
CT_MPC_NEEDED=y
CT_EXPAT_NEEDED=y
CT_NCURSES_NEEDED=y
# CT_ZLIB_NEEDED is not set
CT_LIBICONV=y
CT_GETTEXT=y
CT_GMP=y
CT_MPFR=y
CT_ISL=y
CT_MPC=y
CT_EXPAT=y
CT_NCURSES=y
#
# Companion tools
#
# CT_COMP_TOOLS_FOR_HOST is not set
CT_COMP_TOOLS_AUTOCONF=y
CT_COMP_TOOLS_AUTOCONF_PKG_KSYM="AUTOCONF"
CT_AUTOCONF_DIR_NAME="autoconf"
CT_AUTOCONF_PKG_NAME="autoconf"
CT_AUTOCONF_SRC_RELEASE=y
CT_AUTOCONF_V_2_69=y
# CT_AUTOCONF_V_2_65 is not set
CT_AUTOCONF_VERSION="2.69"
CT_AUTOCONF_MIRRORS="$(CT_Mirrors GNU autoconf)"
CT_AUTOCONF_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_AUTOCONF_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_AUTOCONF_ARCHIVE_FORMATS=".tar.xz .tar.gz"
CT_AUTOCONF_SIGNATURE_FORMAT="packed/.sig"
CT_COMP_TOOLS_AUTOMAKE=y
CT_COMP_TOOLS_AUTOMAKE_PKG_KSYM="AUTOMAKE"
CT_AUTOMAKE_DIR_NAME="automake"
CT_AUTOMAKE_PKG_NAME="automake"
CT_AUTOMAKE_SRC_RELEASE=y
CT_AUTOMAKE_V_1_15_1=y
CT_AUTOMAKE_VERSION="1.15.1"
CT_AUTOMAKE_MIRRORS="$(CT_Mirrors GNU automake)"
CT_AUTOMAKE_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_AUTOMAKE_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_AUTOMAKE_ARCHIVE_FORMATS=".tar.xz .tar.gz"
CT_AUTOMAKE_SIGNATURE_FORMAT="packed/.sig"
CT_COMP_TOOLS_LIBTOOL=y
CT_COMP_TOOLS_LIBTOOL_PKG_KSYM="LIBTOOL"
CT_LIBTOOL_DIR_NAME="libtool"
CT_LIBTOOL_PKG_NAME="libtool"
CT_LIBTOOL_SRC_RELEASE=y
CT_LIBTOOL_V_2_4_6=y
CT_LIBTOOL_VERSION="2.4.6"
CT_LIBTOOL_MIRRORS="$(CT_Mirrors GNU libtool)"
CT_LIBTOOL_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_LIBTOOL_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_LIBTOOL_ARCHIVE_FORMATS=".tar.xz .tar.gz"
CT_LIBTOOL_SIGNATURE_FORMAT="packed/.sig"
# CT_COMP_TOOLS_M4 is not set
CT_COMP_TOOLS_MAKE=y
CT_COMP_TOOLS_MAKE_PKG_KSYM="MAKE"
CT_MAKE_DIR_NAME="make"
CT_MAKE_PKG_NAME="make"
CT_MAKE_SRC_RELEASE=y
CT_MAKE_V_4_2_1=y
CT_MAKE_VERSION="4.2.1"
CT_MAKE_MIRRORS="$(CT_Mirrors GNU make)"
CT_MAKE_ARCHIVE_FILENAME="#{pkg_name}-#{version}"
CT_MAKE_ARCHIVE_DIRNAME="#{pkg_name}-#{version}"
CT_MAKE_ARCHIVE_FORMATS=".tar.bz2 .tar.gz"
CT_MAKE_SIGNATURE_FORMAT="packed/.sig"
# CT_MAKE_GMAKE_SYMLINK is not set

Related

Failed calling executeUserFunction with error {"instanceTree":null,"maxTreeDepth":0}

After upgrading of Autodesk Forge Viewer from v6.5 to v7.11 new console error start to appear every time when DWG loaded:
Failed calling executeUserFunction with error {"instanceTree":null,"maxTreeDepth":0}
LMV../src/logger/Logger.js.Logger._reportError # viewer3D.js:75372
(anonymous) # Hyperlink.js:857
Promise.catch (async)
HyperlinkTool.loadHyperlinksF2d # Hyperlink.js:854
HyperlinkTool.loadHyperlinks # Hyperlink.js:805
HyperlinkTool.activate # Hyperlink.js:622
ToolController.activateTool # viewer3D.js:83795
Autodesk.Extensions.Hyperlink../extensions/Hyperlink/Hyperlink.js.HyperlinkExtension.load # Hyperlink.js:192
loadExtensionLocal # viewer3D.js:26330
(anonymous) # viewer3D.js:26245
Promise.then (async)
loadExtension # viewer3D.js:26228
(anonymous) # viewer3D.js:62886
setTimeout (async)
LMV../src/gui/GuiViewer3D.js.GuiViewer3D.createUI # viewer3D.js:62874
createUI # viewer3D.js:62737
(anonymous) # viewer3D.js:62749
setTimeout (async)
onSuccessChained # viewer3D.js:62744
_ref2 # viewer3D.js:33850
onParse # viewer3D.js:49394
According to stacktrace it fails to execute function specified in a string variable:
function userFunction(pdb) {
var hyperlinkExists = false;
pdb.enumAttributes(function(i, attrDef, attrRaw) {
var name = attrRaw[0];
if (name === 'hyperlink') {
hyperlinkExists = true;
return true;
}
});
return hyperlinkExists;
}
With disabled Autodesk.Hyperlink extension it works well without any errors. Is it a bug that is not fixed yet? It would be good to have a sample DWG with hyperlinks, because it's not clear how to test hyperlinks as well.
With disabled Autodesk.Hyperlink extension it works well without any errors. Is it a bug that is not fixed yet?
Yes it's a known issue - before that's fixed be sure to have the Hyperlink switched off with:
new Autodesk.Viewing.GuiViewer3D(container, {disabledExtensions:{hyperlink:true}})
Stay tuned to our official blog for release notes of upcoming versions - this should get fixed soon...

Mediawiki MySQL have high cpu loading sometimes

Mediawiki MySQL have high cpu loading sometimes.
We host a mediawiki, it have around 50 person online
version is 1.31
here is LocaleSettings.php content
<?php
# This file was automatically generated by the MediaWiki 1.31.0
# installer. If you make manual changes, please keep track in case you
# need to recreate them later.
#
# See includes/DefaultSettings.php for all configurable settings
# and their default values, but don't forget to make changes in _this_
# file, not there.
#
# Further documentation for configuration settings may be found at:
# https://www.mediawiki.org/wiki/Manual:Configuration_settings
# Protect against web entry
if ( !defined( 'MEDIAWIKI' ) ) {
exit;
}
## Uncomment this to disable output compression
# $wgDisableOutputCompression = true;
$wgSitename = "MyWiki";
## The URL base path to the directory containing the wiki;
## defaults for all runtime URL paths are based off of this.
## For more information on customizing the URLs
## (like /w/index.php/Page_title to /wiki/Page_title) please see:
## https://www.mediawiki.org/wiki/Manual:Short_URL
$wgScriptPath = "";
## The protocol and server name to use in fully-qualified URLs
#$wgServer = "https://wikidecode.org";
$wgServer = "https://xxxxxxx.com";
## The URL path to static resources (images, scripts, etc.)
$wgResourceBasePath = $wgScriptPath;
## The URL path to the logo. Make sure you change this from the default,
## or else you'll overwrite your logo when you upgrade!
$wgLogo = "$wgResourceBasePath/resources/assets/wiki.png";
## UPO means: this is also a user preference option
$wgEnableEmail = true;
$wgEnableUserEmail = true; # UPO
$wgEmergencyContact = "apache#xxxxxxx.com";
$wgPasswordSender = "apache#xxxxxxx.com";
$wgEnotifUserTalk = false; # UPO
$wgEnotifWatchlist = false; # UPO
$wgEmailAuthentication = true;
## Database settings
$wgDBtype = "mysql";
$wgDBserver = "127.0.0.1";
$wgDBname = "mediawikidb";
$wgDBuser = "mediawikidb";
$wgDBpassword = "xxxxxxxxxxxxxxxxx";
# MySQL specific settings
$wgDBprefix = "";
$wgSQLMode = null;
# MySQL table options to use during installation or update
$wgDBTableOptions = "ENGINE=InnoDB, DEFAULT CHARSET=binary";
## Shared memory settings
#$wgUseGzip = true;
$wgEnableSidebarCache = true;
$wgMainCacheType = CACHE_MEMCACHED;
$wgParserCacheType = CACHE_MEMCACHED; # optional
$wgMessageCacheType = CACHE_MEMCACHED; # optional
$wgMemCachedServers = array( "127.0.0.1:11211" );
#$wgUseLocalMessageCache = true;
$wgSessionsInObjectCache = true; # optional
$wgSessionCacheType = CACHE_MEMCACHED; # optional
# File cache
$wgUseFileCache = true;
#$wgFileCacheDirectory = "/var/www/mediawiki/file-cache";
$wgFileCacheDirectory = "$IP/cache";
# NO DB HITS!
# Refer to https://techwelkin.com/slow-mediawiki-optimize-to-enhance-performance-part-1
$wgDisableCounters = false;
$wgHitcounterUpdateFreq = 500;
$wgMiserMode = true;
#$wgJobRunRate = 0; // Set a cronjob via "crontab -e" with "0 0 * * * /usr/bin/php /var/www/mediawiki/maintenance/runJobs.php > /var/log/runJobs.log 2>&1"
## Set $wgCacheDirectory to a writable directory on the web server
## to make your wiki go slightly faster. The directory should not
## be publically accessible from the web.
$wgCacheDirectory = "$IP/cache";
## To enable image uploads, make sure the 'images' directory
## is writable, then set this to true:
$wgEnableUploads = true;
#$wgUseImageMagick = true;
#$wgImageMagickConvertCommand = "/usr/bin/convert";
# InstantCommons allows wiki to use images from https://commons.wikimedia.org
$wgUseInstantCommons = false;
$wgRunJobsAsync = true;
# Periodically send a pingback to https://www.mediawiki.org/ with basic data
# about this MediaWiki instance. The Wikimedia Foundation shares this data
# with MediaWiki developers to help guide future development efforts.
$wgPingback = false;
## If you use ImageMagick (or any other shell command) on a
## Linux server, this will need to be set to the name of an
## available UTF-8 locale
$wgShellLocale = "C.UTF-8";
# Site language code, should be one of the list in ./languages/data/Names.php
$wgLanguageCode = "zh";
$wgSecretKey = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
# Changing this will log out all existing sessions.
$wgAuthenticationTokenVersion = "1";
# Site upgrade key. Must be set to a string (default provided) to turn on the
# web installer while LocalSettings.php is in place
$wgUpgradeKey = "xxxxxxxxxxxxxx";
## For attaching licensing metadata to pages, and displaying an
## appropriate copyright notice / icon. GNU Free Documentation
## License and Creative Commons licenses are supported so far.
$wgRightsPage = ""; # Set to the title of a wiki page that describes your license/copyright
$wgRightsUrl = "";
$wgRightsText = "";
$wgRightsIcon = "";
# Path to the GNU diff3 utility. Used for conflict resolution.
$wgDiff3 = "/usr/bin/diff3";
## Default skin: you can change the default skin. Use the internal symbolic
## names, ie 'vector', 'monobook':
#$wgDefaultSkin = "minerva";
$wgDefaultSkin = "Timeless";
# Enabled skins.
# The following skins were automatically enabled:
wfLoadSkin( 'MonoBook' );
wfLoadSkin( 'Timeless' );
wfLoadSkin( 'Vector' );
# Enabled extensions. Most of the extensions are enabled by adding
# wfLoadExtensions('ExtensionName');
# to LocalSettings.php. Check specific extension documentation for more details.
# The following extensions were automatically enabled:
wfLoadExtension( 'CategoryTree' );
wfLoadExtension( 'Cite' );
wfLoadExtension( 'CiteThisPage' );
wfLoadExtension( 'CodeEditor' );
wfLoadExtension( 'Gadgets' );
#wfLoadExtension( 'ImageMap' );
wfLoadExtension( 'InputBox' );
wfLoadExtension( 'Interwiki' );
wfLoadExtension( 'LocalisationUpdate' );
wfLoadExtension( 'MultimediaViewer' );
wfLoadExtension( 'OATHAuth' );
wfLoadExtension( 'ParserFunctions' );
wfLoadExtension( 'PdfHandler' );
wfLoadExtension( 'Poem' );
wfLoadExtension( 'Renameuser' );
wfLoadExtension( 'ReplaceText' );
wfLoadExtension( 'SpamBlacklist' );
wfLoadExtension( 'SyntaxHighlight_GeSHi' );
wfLoadExtension( 'TitleBlacklist' );
wfLoadExtension( 'Babel' );
wfLoadExtension( 'cldr' );
wfLoadExtension( 'CleanChanges' );
$wgCCTrailerFilter = true;
$wgCCUserFilter = false;
$wgDefaultUserOptions['usenewrc'] = 1;
wfLoadExtension( 'LocalisationUpdate' );
$wgLocalisationUpdateDirectory = "$IP/cache";
require_once "$IP/extensions/Translate/Translate.php";
$wgGroupPermissions['user']['translate'] = true;
$wgGroupPermissions['user']['translate-messagereview'] = true;
$wgGroupPermissions['user']['translate-groupreview'] = true;
$wgGroupPermissions['user']['translate-import'] = true;
$wgGroupPermissions['sysop']['pagetranslation'] = true;
$wgGroupPermissions['sysop']['translate-manage'] = true;
$wgTranslateDocumentationLanguageCode = 'qqq';
$wgExtraLanguageNames['qqq'] = 'Message documentation'; # No linguistic content. Used for documenting messages
wfLoadExtension( 'UniversalLanguageSelector' );
wfLoadExtension( 'WikiEditor' );
wfLoadExtension( 'Scribunto' );
#$wgScribuntoDefaultEngine = 'luastandalone';
#$wgScribuntoEngineConf['luastandalone']['cpuLimit'] = 'ulimit';
#$wgScribuntoEngineConf['luastandalone']['memoryLimit'] = 209715200; # bytes
$wgScribuntoDefaultEngine = 'luasandbox';
$wgScribuntoEngineConf['luasandbox']['cpuLimit'] = 'ulimit';
$wgScribuntoEngineConf['luasandbox']['memoryLimit'] = 209715200; # bytes
wfLoadExtension( 'Nuke' );
#wfLoadExtension( 'VisualEditor' );
// Enable by default for everybody
# $wgDefaultUserOptions['visualeditor-enable'] = 1;
// Optional: Set VisualEditor as the default for anonymous users
// otherwise they will have to switch to VE
# $wgDefaultUserOptions['visualeditor-editor'] = "visualeditor";
// Don't allow users to disable it
# $wgHiddenPrefs[] = 'visualeditor-enable';
# Parsoid fir Visual Editor
$wgVirtualRestConfig['modules']['parsoid'] = array(
// URL to the Parsoid instance
// Use port 8142 if you use the Debian package
//'url' => 'http://wikidecode.org:8142',
'url' => 'http://xxxxxxx.com:8142',
// Parsoid "domain", see below (optional)
//'domain' => 'localhost',
// Parsoid "prefix", see below (optional)
//'prefix' => 'localhost'
);
# Wikibase
$wgEnableWikibaseRepo = true;
$wgEnableWikibaseClient = true;
require_once "$IP/extensions/Wikibase/repo/Wikibase.php";
require_once "$IP/extensions/Wikibase/repo/ExampleSettings.php";
require_once "$IP/extensions/Wikibase/client/WikibaseClient.php";
require_once "$IP/extensions/Wikibase/client/ExampleSettings.php";
# Mobile responsive display
wfLoadExtension( 'MobileFrontend' );
$wgMFAutodetectMobileView = true;
wfLoadSkin( 'MinervaNeue' );
$wgMFDefaultSkinClass = 'SkinMinerva';
# End of automatically generated settings.
# Add more configuration options below.
# Auto confirm threshold
$wgAutoConfirmCount=20;
# Tidy # Fix the problem that infobox will show <td? </td>
$wgUseTidy=true;
# Permission (need to comment out to make Visual Editor work)
$wgGroupPermissions['*']['read'] = false; # Disable reading by anonymous users
$wgGroupPermissions['*']['edit'] = false;
$wgGroupPermissions['*']['writeapi'] = false;
$wgGroupPermissions['*']['createpage'] = false;
$wgGroupPermissions['*']['createtalk'] = false;
$wgGroupPermissions['*']['createaccount'] = false; # Prevent new user registrations except by sysops
# Debugging
#$wgShowExceptionDetails = true;
#$wgShowDBErrorBacktrace = true;
#$wgShowSQLErrors = true;
#$wgDebugDumpSql = true;
#$wgDebugComments = true;
$wgDebugLogFile = "/var/log/mediawiki/debug-{$wgDBname}.log";
$wgPFEnableStringFunctions = true;
$wgMaxShellMemory = 204800; # in KB
# TemplateStyle
wfLoadExtension( 'TemplateStyles' );
#Youtube
wfLoadExtension( 'YouTube' );
#Embedded Video
wfLoadExtension( 'EmbedVideo' );
# Permission to Hide history
$wgGroupPermissions['sysop']['deletelogentry'] = true;
$wgGroupPermissions['sysop']['deleterevision'] = true;
# Notice
# $wgSiteNotice = "Internal..."
# Upload picture option
# allow upload by URL
$wgAllowCopyUploads = true;
$wgCopyUploadsFromSpecialUpload = true;
# Increase Session timeout
$wgCookieExpiration = 86400;
$wgExtendedLoginCookieExpiration = null;
# Proxy WWW
#$wgUseSquid = true;
#$wgSquidServers = [ 'x.x.x.x' ];
#$wgSquidServersNoPurge = [ 'x.x.x.x' ];
# 貢獻得分
require_once "$IP/extensions/ContributionScores/ContributionScores.php";
$wgContribScoreIgnoreBots = true; // Exclude Bots from the reporting - Can be omitted.
$wgContribScoreIgnoreBlockedUsers = true; // Exclude Blocked Users from the reporting - Can be omitted.
$wgContribScoresUseRealName = true; // Use real user names when available - Can be omitted. Only for MediaWiki 1.19 and later.
$wgContribScoreDisableCache = true; // Set to true to disable cache for parser function and inclusion of table.
//Each array defines a report - 7,50 is "past 7 days" and "LIMIT 50" - Can be omitted.
$wgContribScoreReports = array(
array(7,20),
array(30,20),
array(0,20));
# Change default user behavior
## Add pages the user edits to their watchlist by default
$wgDefaultUserOptions['watchdefault'] = 0; // 不將更改的page加入監視列表
# 允許外部連結顯示圖片
$wgAllowExternalImages=true;
# 開啟濫用日誌
wfLoadExtension( 'AbuseFilter' );
$wgGroupPermissions['sysop']['abusefilter-modify'] = true;
$wgGroupPermissions['*']['abusefilter-log-detail'] = true;
##$wgGroupPermissions['*']['abusefilter-view'] = true;
##$wgGroupPermissions['*']['abusefilter-log'] = true;
$wgGroupPermissions['sysop']['abusefilter-private'] = true;
$wgGroupPermissions['sysop']['abusefilter-modify-restricted'] = true;
$wgGroupPermissions['sysop']['abusefilter-revert'] = true;
# 防止用戶名使用非格式化字串
#wfLoadExtension( 'AntiSpoof' );
#$wgSharedTables[] = 'spoofuser';
# 允許HTML img tag
$wgAllowImageTag=true;
# 必須email驗證
#$wgEmailConfirmToEdit=true;
# Lockdown Permission
wfLoadExtension( 'Lockdown' );
$wgNamespacePermissionLockdown[NS_TEMPLATE]['*'] = ['bureaucrat'];
$wgNamespacePermissionLockdown[NS_TEMPLATE]['read'] = ['*'];
# 網站維護....
#$wgReadOnly = 'Dumping Database, Access will be restored shortly';
# 打開舉報 (註冊用戶可以舉報,管理員處理)
wfLoadExtension( 'Report' );
# 可以看見誰正在監視那個page
wfLoadExtension( 'WhoIsWatching' );
$whoiswatching_nametype = "RealName";
# $wgGroupPermissions['sysop']['addpagetoanywatchlist'] = true;
# $wgGroupPermissions['sysop']['seepagewatchers'] = true;
# 追蹤某個使用者的進出狀況
#require_once "$IP/extensions/StalkerLog/StalkerLog.php";
#$wgGroupPermissions['*']['stalkerlog-view-log'] = false;
#$wgGroupPermissions['sysop']['stalkerlog-view-log'] = true;
# Who is online
wfLoadExtension( 'WhosOnline' );
$wgWhosOnlineShowAnons = true;
# 關閉page view counter
# $wgDisableCounters = true;
#wfLoadExtension( 'MatomoAnalytics' );
#$wgMatomoAnalyticsServerURL=false;
#$wgMatomoAnalyticsTokenAuth=false;
# Check User IP (blocking by IP range)
wfLoadExtension( 'CheckUser' );
#$wgGroupPermissions['sysop']['checkuser'] = true;
#$wgGroupPermissions['sysop']['checkuser-log'] = true;
$wgAddGroups['bureaucrat'][] = 'checkuser';
$wgRemoveGroups['bureaucrat'][] = 'checkuser';
# Local S3 for image
# https://github.com/edwardspec/mediawiki-aws-s3
#wfLoadExtension( 'AWS' );
// Configure AWS credentials.
// THIS IS NOT NEEDED if your EC2 instance has an IAM instance profile.
#$wgAWSCredentials = [
# 'key' => 'xxxxxxxxxxxxxx',
# 'secret' => 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
# 'token' => false
#];
#$wgAWSRegion = 'us-west-2'; # Oregon
// Replace <something> with the name of your S3 bucket, e.g. wonderfulbali234.
#$wgAWSBucketName = "mediawiki-uploads";
#$wgUploadDirectory = "$IP/s3mnt";
#$wgUploadPath = "$wgScriptPath/s3mnt";
# Anti-Robot Mechanism
#wfLoadExtension( 'ConfirmEdit' );
#$wgGroupPermissions['*' ]['skipcaptcha'] = false; // Default
#$wgGroupPermissions['user' ]['skipcaptcha'] = false; // Default
#$wgGroupPermissions['autoconfirmed']['skipcaptcha'] = false; // Default
#$wgGroupPermissions['bot' ]['skipcaptcha'] = true; // Default: registered bots
#$wgGroupPermissions['sysop' ]['skipcaptcha'] = true; // Default
#$wgCaptchaTriggers['edit'] = false; // Trigger while editing a page
#$wgCaptchaTriggers['create'] = true; // Default: Trigger while creating a page
#$wgCaptchaTriggers['addurl'] = true; // Default: Trigger while ading a url
#$wgCaptchaTriggers['createaccount'] = true; // Default: Trigger while registering
#$wgCaptchaTriggers['badlogin'] = true; // Default: Trigger while bad login
# Echo to users
wfLoadExtension( 'Echo' );
# Delete a Page permanently
wfLoadExtension( 'DeletePagesForGood' );
$wgGroupPermissions['*']['deleteperm'] = false;
$wgGroupPermissions['user']['deleteperm'] = false;
$wgGroupPermissions['bureaucrat']['deleteperm'] = true;
$wgGroupPermissions['sysop']['deleteperm'] = false;
# Gadgets edit permission
#$wgGroupPermissions['sysop']['gadgets-edit'] = true;
#$wgGroupPermissions['sysop']['gadgets-definition-edit'] = true;
# Addthis
#require_once "$IP/extensions/AddThis/AddThis.php";
#$wgAddThisMain=false;
#$wgAddThisSidebar=false;
# Related Articles
#wfLoadExtension( 'RelatedArticles' );
#$wgRelatedArticlesFooterWhitelistedSkins = ['vector', 'timeless', 'minerva', 'monobook'];
$wgULSGeoService = 'https://freegeoip.app/json/8.8.8.8?callback=?';
# New namespace for private pages
// Define constants for my additional namespaces.
define("NS_FOO", 3000); // This MUST be even.
define("NS_FOO_TALK", 3001); // This MUST be the following odd integer.
// Add namespaces.
$wgExtraNamespaces[NS_FOO] = "Foo";
$wgExtraNamespaces[NS_FOO_TALK] = "Foo_talk"; // Note underscores in the namespace name.
$wgNamespaceProtection[NS_FOO] = array( 'editfoo' ); // permission "editfoo" required to edit the foo namespace
$wgNamespacesWithSubpages[NS_FOO] = true; // subpages enabled for the foo namespace
$wgGroupPermissions['staff']['editfoo'] = true; // permission "editfoo" granted to users in the "staff" group
$wgNamespacePermissionLockdown[NS_FOO]['read'] = ['staff'];
sometimes mysqld cost all cpu resource, and nginx show 504 gateway time-out.
here is part of mysql slow query log:
# Time: 2018-12-04T17:13:53.679634Z
# User#Host: root[root] # localhost [127.0.0.1] Id: 82845
# Query_time: 3.868103 Lock_time: 0.000066 Rows_sent: 6 Rows_examined: 14
SET timestamp=1543943633;
SELECT /* SearchMySQL::searchInternal */ page_id,page_namespace,page_title FROM `page`,`searchindex` WHERE (page_id=si_page) AND ( MATCH(si_text) AGAINST('+\"u8e7ae80 u8c2b7 u8e696b9 u8e8bebe\" ' IN BOOLEAN MODE) ) AND page_namespace IN ('0','120') LIMIT 20;
# Time: 2018-12-04T17:13:57.542743Z
# User#Host: root[root] # localhost [127.0.0.1] Id: 82845
# Query_time: 3.861802 Lock_time: 0.000104 Rows_sent: 1 Rows_examined: 14
SET timestamp=1543943637;
SELECT /* SearchMySQL::searchInternal */ COUNT(*) as c FROM `page`,`searchindex` WHERE (page_id=si_page) AND ( MATCH(si_text) AGAINST('+\"u8e7ae80 u8c2b7 u8e696b9 u8e8bebe\" ' IN BOOLEAN MODE) ) AND page_namespace IN ('0','120');
I'm not sure what is main problem, is MySQL setting or Mediawiki or just have to upgrade hardware?
I already tried change mysql setting many times, it didn't fix.
here is my mysqld.cnf:
#
# The MySQL database server configuration file.
#
# You can copy this to one of:
# - "/etc/mysql/my.cnf" to set global options,
# - "~/.my.cnf" to set user-specific options.
#
# One can use all long options that the program supports.
# Run program with --help to get a list of available options and with
# --print-defaults to see which it would actually understand and use.
#
# For explanations see
# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
# This will be passed to all mysql clients
# It has been reported that passwords should be enclosed with ticks/quotes
# escpecially if they contain "#" chars...
# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
# Here is entries for some specific programs
# The following values assume you have at least 32M ram
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
#
# * Basic Settings
#
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
lc-messages-dir = /usr/share/mysql
skip-external-locking
#
# Instead of skip-networking the default is now to listen only on
# localhost which is more compatible and is not less secure.
bind-address = 127.0.0.1
#
# * Fine Tuning
#
key_buffer_size = 512M
max_allowed_packet = 16M
thread_stack = 192K
thread_cache_size = 32
# This replaces the startup script and checks MyISAM tables if needed
# the first time they are touched
myisam-recover-options = BACKUP
#max_connections = 100
#table_cache = 64
#thread_concurrency = 10
#
# * Query Cache Configuration
#
query_cache_type = 0
query_cache_limit = 2M
query_cache_size = 0
#
# * Logging and Replication
#
# Both location gets rotated by the cronjob.
# Be aware that this log type is a performance killer.
# As of 5.1 you can enable the log at runtime!
#general_log_file = /var/log/mysql/mysql.log
#general_log = 1
#
# Error log - should be very few entries.
#
log_error = /var/log/mysql/error.log
#
# Here you can see queries with especially long duration
slow_query_log = 1
slow_query_log_file = /var/log/mysql/mysql-slow.log
#log_slow_queries = /var/log/mysql/mysql-slow.log
long_query_time = 1
#log-queries-not-using-indexes
#
# The following can be used as easy to replay backup logs or for replication.
# note: if you are setting up a replication slave, see README.Debian about
# other settings you may need to change.
#server-id = 1
#log_bin = /var/log/mysql/mysql-bin.log
expire_logs_days = 10
max_binlog_size = 100M
#binlog_do_db = include_database_name
#binlog_ignore_db = include_database_name
#
# * InnoDB
#
# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
# Read the manual for more InnoDB related options. There are many!
#
# * Security Features
#
# Read the manual, too, if you want chroot!
# chroot = /var/lib/mysql/
#
# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
#
# ssl-ca=/etc/mysql/cacert.pem
# ssl-cert=/etc/mysql/server-cert.pem
# ssl-key=/etc/mysql/server-key.pem
#add
table_open_cache = 432
innodb_buffer_pool_instances = 1
innodb_buffer_pool_size = 1G
innodb_log_file_size = 128M
hardware is Amazon EC2 c5.large
OS version: Ubuntu 16.04.5
MySQL version: Ver 14.14 Distrib 5.7.24, for Linux (x86_64) using EditLine wrapper
I found reason.
It maybe MyISAM engine lock problem,
search index default is using MyISAM engine.
I try to change engine to innoDB.
(but your MySQL version should higher than 5.6)
ref: https://www.mediawiki.org/wiki/Manual:Searchindex_table
Rate Per Second=RPS - With the information available at this time, consider these suggestions for your Performance Group [mysqld] section, please
innodb_io_capacity=1800 # from 200 to use more of available IOPS
key_cache_age_threshold=7200 # from 300 seconds to reduce key_reads RPS from ~5 per SECOND
open_files_limit=16384 # from 1024 to accommodate high volume file activity
table_open_cache=10000 # from 431 to reduce opened_tables RPS of more than 2
At Linux command prompt, ulimit -n 24576 to dynamically increase Open File limit from 1024
For this Linux change to persist over shutdown/restart refer to this url
https://glassonionblog.wordpress.com/2013/01/27/increase-ulimit-and-file-descriptors-limit/
Your specifics may be slightly different due to OS version.
Good Luck.
For additional suggestions, view profile, Network profile for contact information.

How to enable automatic code reloading in Rails

Is there a way to do 'hot code reloading' with a Rails application in the development environment?
For example: I'm working on a Rails application, I add a few lines of css in a stylesheet, I look at the browser to see the modified styling. As of right now I have to refresh the page with cmd-r or by clicking the refresh button.
Is there a way to get the page to reload automatically when changes are made?
This works nicely in the Phoenix web framework (and I'm sure Phoenix isn't the only framework in this feature). How could a feature like this be enabled in Ruby on Rails?
I am using this setup reloads all assets, js, css, ruby files
in Gemfile
group :development, :test do
gem 'guard-livereload', '~> 2.5', require: false
end
group :development do
gem 'listen'
gem 'guard'
gem 'guard-zeus'
gem 'rack-livereload'
end
insert this in your development.rb
config.middleware.insert_after ActionDispatch::Static, Rack::LiveReload
i have this in my guard file
# A sample Guardfile
# More info at https://github.com/guard/guard#readme
## Uncomment and set this to only include directories you want to watch
# directories %w(app lib config test spec features) \
# .select{|d| Dir.exists?(d) ? d : UI.warning("Directory #{d} does not exist")}
## Note: if you are using the `directories` clause above and you are not
## watching the project directory ('.'), then you will want to move
## the Guardfile to a watched dir and symlink it back, e.g.
#
# $ mkdir config
# $ mv Guardfile config/
# $ ln -s config/Guardfile .
#
# and, you'll have to watch "config/Guardfile" instead of "Guardfile"
guard 'livereload' do
extensions = {
css: :css,
scss: :css,
sass: :css,
js: :js,
coffee: :js,
html: :html,
png: :png,
gif: :gif,
jpg: :jpg,
jpeg: :jpeg,
# less: :less, # uncomment if you want LESS stylesheets done in browser
}
rails_view_exts = %w(erb haml slim)
# file types LiveReload may optimize refresh for
compiled_exts = extensions.values.uniq
watch(%r{public/.+\.(#{compiled_exts * '|'})})
extensions.each do |ext, type|
watch(%r{
(?:app|vendor)
(?:/assets/\w+/(?<path>[^.]+) # path+base without extension
(?<ext>\.#{ext})) # matching extension (must be first encountered)
(?:\.\w+|$) # other extensions
}x) do |m|
path = m[1]
"/assets/#{path}.#{type}"
end
end
# file needing a full reload of the page anyway
watch(%r{app/views/.+\.(#{rails_view_exts * '|'})$})
watch(%r{app/helpers/.+\.rb})
watch(%r{config/locales/.+\.yml})
end
guard 'zeus' do
require 'ostruct'
rspec = OpenStruct.new
# rspec.spec_dir = 'spec'
# rspec.spec = ->(m) { "#{rspec.spec_dir}/#{m}_spec.rb" }
# rspec.spec_helper = "#{rspec.spec_dir}/spec_helper.rb"
# matchers
# rspec.spec_files = /^#{rspec.spec_dir}\/.+_spec\.rb$/
# Ruby apps
ruby = OpenStruct.new
ruby.lib_files = /^(lib\/.+)\.rb$/
# watch(rspec.spec_files)
# watch(rspec.spec_helper) { rspec.spec_dir }
# watch(ruby.lib_files) { |m| rspec.spec.call(m[1]) }
# Rails example
rails = OpenStruct.new
rails.app_files = /^app\/(.+)\.rb$/
rails.views_n_layouts = /^app\/(.+(?:\.erb|\.haml|\.slim))$/
rails.controllers = %r{^app/controllers/(.+)_controller\.rb$}
# watch(rails.app_files) { |m| rspec.spec.call(m[1]) }
# watch(rails.views_n_layouts) { |m| rspec.spec.call(m[1]) }
# watch(rails.controllers) do |m|
# [
# rspec.spec.call("routing/#{m[1]}_routing"),
# rspec.spec.call("controllers/#{m[1]}_controller"),
# rspec.spec.call("acceptance/#{m[1]}")
# ]
# end
end
I am using zeus instead of spring on this setup.
Run guard
Open localhost:3000 and you are good to go.
This should resolve your question, and have blazing reload times better than browserify.
I commented out guard looking at test directories if you want you can uncomment those lines if your are doing TDD.
CSS hot swapping and auto-reload when HTML/JS changes can be achieved with guard in combination with livereload: https://github.com/guard/guard-livereload
This gem would auto reload when you make changes to js elements(Not css or ruby files).
https://github.com/rmosolgo/react-rails-hot-loader
Never seen css hot code reloading in rails platform.

Parsing HTML - searching links - is it possible to search paragraph that a link is contained in?

I am parsing the links on wikipedia pages of actors, and trying to find links to films they appeared in.
I have a basic method that searchs the links and checks for the word film in the link. However many of the links to films do not actually contain this word.
However, within the paragraphs that the links are contained in, the word film appears , for example:
<p>Dreyfuss's first film part was a small, uncredited role in
<i><a href="/wiki/The_Graduate" title="The Graduate">The Graduate
// Paragraph goes on for a long time.
Here is the block from the method that checks all the links:
all_links = doca.search('//a[#href]')
all_links.each do |link|
link_info = link['href']
if link_info.include?("(film)") && !(link_info.include?("Category:") || link_info.include?("php"))
then out << link_info end
end
out.uniq.collect {|link| strip_out_name(link)}
Would there be a way of checking the previous text before the link but after the <p> tag for the word film, but being careful not to check other links (and also perhaps limited the search to 50 characters before the link)?
Thanks for any help or suggestions.
Click here, this is the main page that I am testing on
It is possible to search for text inside a tag. See https://stackoverflow.com/a/19816840/128421 for an example.
But, I'd do it something similar to this way:
require 'nokogiri'
require 'open-uri'
doc = Nokogiri::HTML(open('http://en.wikipedia.org/wiki/Richard_Dreyfuss'))
table = doc.at('#Filmography').parent.next_element
films = table.search('tr')[1..-1].map{ |tr|
tds = tr.search('td')
year = tds.shift.text
movie = tds.shift
movie_url = movie.at('a')['href']
movie_title = movie.at('a').text
role = tds.shift.text
{
year: year,
movie_url: movie_url,
movie_title: movie_title,
role: role
}
}
films
# => [{:year=>"1966",
# :movie_url=>"/wiki/Bewitched",
# :movie_title=>"Bewitched",
# :role=>"Rodney"},
# {:year=>"1966",
# :movie_url=>"/wiki/Gidget_(TV_series)",
# :movie_title=>"Gidget",
# :role=>"Durf the Drag"},
# {:year=>"1967",
# :movie_url=>"/wiki/Valley_of_the_Dolls_(film)",
# :movie_title=>"Valley of the Dolls",
# :role=>"Assistant stage manager"},
# {:year=>"1967",
# :movie_url=>"/wiki/The_Graduate",
# :movie_title=>"The Graduate",
# :role=>"Boarding House Resident"},
# {:year=>"1967",
# :movie_url=>"/wiki/The_Big_Valley",
# :movie_title=>"The Big Valley",
# :role=>"Lud Akley"},
# {:year=>"1968",
# :movie_url=>"/wiki/The_Young_Runaways",
# :movie_title=>"The Young Runaways",
# :role=>"Terry"},
# {:year=>"1969",
# :movie_url=>"/wiki/Hello_Down_There",
# :movie_title=>"Hello Down There",
# :role=>"Harold Webster"},
# {:year=>"1970",
# :movie_url=>"/wiki/The_Mod_Squad",
# :movie_title=>"The Mod Squad",
# :role=>"Curtis Bell"},
# {:year=>"1973",
# :movie_url=>"/wiki/American_Graffiti",
# :movie_title=>"American Graffiti",
# :role=>"Curt Henderson"},
# {:year=>"1973",
# :movie_url=>"/wiki/Dillinger_(1973_film)",
# :movie_title=>"Dillinger",
# :role=>"Baby Face Nelson"},
# {:year=>"1974",
# :movie_url=>"/wiki/The_Apprenticeship_of_Duddy_Kravitz_(film)",
# :movie_title=>"The Apprenticeship of Duddy Kravitz",
# :role=>"Duddy"},
# {:year=>"1974",
# :movie_url=>"/wiki/The_Second_Coming_of_Suzanne",
# :movie_title=>"The Second Coming of Suzanne",
# :role=>"Clavius"},
# {:year=>"1975",
# :movie_url=>"/wiki/Inserts_(film)",
# :movie_title=>"Inserts",
# :role=>"The Boy Wonder"},
# {:year=>"1975",
# :movie_url=>"/wiki/Jaws_(film)",
# :movie_title=>"Jaws",
# :role=>"Matt Hooper"},
# {:year=>"1976",
# :movie_url=>"/wiki/Victory_at_Entebbe",
# :movie_title=>"Victory at Entebbe",
# :role=>"Colonel Yonatan 'Yonni' Netanyahu"},
# {:year=>"1977",
# :movie_url=>"/wiki/Close_Encounters_of_the_Third_Kind",
# :movie_title=>"Close Encounters of the Third Kind",
# :role=>"Roy Neary"},
# {:year=>"1977",
# :movie_url=>"/wiki/The_Goodbye_Girl",
# :movie_title=>"The Goodbye Girl",
# :role=>"Elliott Garfield"},
# {:year=>"1978",
# :movie_url=>"/wiki/The_Big_Fix",
# :movie_title=>"The Big Fix",
# :role=>"Moses Wine"},
# {:year=>"1980",
# :movie_url=>"/wiki/The_Competition_(film)",
# :movie_title=>"The Competition",
# :role=>"Paul Dietrich"},
# {:year=>"1981",
# :movie_url=>"/wiki/Whose_Life_Is_It_Anyway%3F_(1981_film)",
# :movie_title=>"Whose Life Is It Anyway?",
# :role=>"Ken Harrison"},
# {:year=>"1984",
# :movie_url=>"/wiki/The_Buddy_System_(film)",
# :movie_title=>"The Buddy System",
# :role=>"Joe"},
# {:year=>"1986",
# :movie_url=>"/wiki/Down_and_Out_in_Beverly_Hills",
# :movie_title=>"Down and Out in Beverly Hills",
# :role=>"David 'Dave' Whiteman"},
# {:year=>"1986",
# :movie_url=>"/wiki/Stand_by_Me_(film)",
# :movie_title=>"Stand by Me",
# :role=>"Narrator/Gordie LaChance (adult)"},
# {:year=>"1987",
# :movie_url=>"/wiki/Tin_Men",
# :movie_title=>"Tin Men",
# :role=>"Bill 'BB' Babowsky"},
# {:year=>"1987",
# :movie_url=>"/wiki/Stakeout_(1987_film)",
# :movie_title=>"Stakeout",
# :role=>"Det. Chris Lecce"},
# {:year=>"1987",
# :movie_url=>"/wiki/Nuts_(film)",
# :movie_title=>"Nuts",
# :role=>"Aaron Levinsky"},
# {:year=>"1988",
# :movie_url=>"/wiki/Moon_Over_Parador",
# :movie_title=>"Moon Over Parador",
# :role=>"Jack Noah/President Alphonse Simms"},
# {:year=>"1989",
# :movie_url=>"/wiki/Let_It_Ride_(film)",
# :movie_title=>"Let It Ride",
# :role=>"Jay Trotter"},
# {:year=>"1989",
# :movie_url=>"/wiki/Always_(1989_film)",
# :movie_title=>"Always",
# :role=>"Pete Sandich"},
# {:year=>"1990",
# :movie_url=>"/wiki/Rosencrantz_%26_Guildenstern_Are_Dead_(film)",
# :movie_title=>"Rosencrantz & Guildenstern Are Dead",
# :role=>"The Player"},
# {:year=>"1990",
# :movie_url=>"/wiki/Postcards_from_the_Edge_(film)",
# :movie_title=>"Postcards from the Edge",
# :role=>"Doctor Frankenthal"},
# {:year=>"1991",
# :movie_url=>"/wiki/Once_Around",
# :movie_title=>"Once Around",
# :role=>"Sam Sharpe"},
# {:year=>"1991",
# :movie_url=>"/wiki/Prisoner_of_Honor",
# :movie_title=>"Prisoner of Honor",
# :role=>"Col. Picquart"},
# {:year=>"1991",
# :movie_url=>"/wiki/What_About_Bob%3F",
# :movie_title=>"What About Bob?",
# :role=>"Dr. Leo Marvin"},
# {:year=>"1993",
# :movie_url=>"/wiki/Lost_in_Yonkers_(film)",
# :movie_title=>"Lost in Yonkers",
# :role=>"Louie Kurnitz"},
# {:year=>"1993",
# :movie_url=>"/wiki/Another_Stakeout",
# :movie_title=>"Another Stakeout",
# :role=>"Detective Chris Lecce"},
# {:year=>"1994",
# :movie_url=>"/wiki/Silent_Fall",
# :movie_title=>"Silent Fall",
# :role=>"Dr. Jake Rainer"},
# {:year=>"1995",
# :movie_url=>
# "/w/index.php?title=The_Last_Word_(1995_film)&action=edit&redlink=1",
# :movie_title=>"The Last Word",
# :role=>"Larry"},
# {:year=>"1995",
# :movie_url=>"/wiki/The_American_President_(film)",
# :movie_title=>"The American President",
# :role=>"Senator Bob Rumson"},
# {:year=>"1995",
# :movie_url=>"/wiki/Mr._Holland%27s_Opus",
# :movie_title=>"Mr. Holland's Opus",
# :role=>"Glenn Holland"},
# {:year=>"1996",
# :movie_url=>"/wiki/James_and_the_Giant_Peach_(film)",
# :movie_title=>"James and the Giant Peach",
# :role=>"Centipede (voice)"},
# {:year=>"1996",
# :movie_url=>"/wiki/Mad_Dog_Time",
# :movie_title=>"Mad Dog Time",
# :role=>"Vic"},
# {:year=>"1997",
# :movie_url=>"/wiki/Night_Falls_on_Manhattan",
# :movie_title=>"Night Falls on Manhattan",
# :role=>"Sam Vigoda"},
# {:year=>"1997",
# :movie_url=>"/wiki/Oliver_Twist_(1997_film)",
# :movie_title=>"Oliver Twist",
# :role=>"Fagin"},
# {:year=>"1998",
# :movie_url=>"/wiki/Krippendorf%27s_Tribe",
# :movie_title=>"Krippendorf's Tribe",
# :role=>"Prof. James Krippendorf"},
# {:year=>"1999",
# :movie_url=>"/wiki/Lansky_(film)",
# :movie_title=>"Lansky",
# :role=>"Meyer Lansky"},
# {:year=>"2000",
# :movie_url=>"/wiki/The_Crew_(2000_film)",
# :movie_title=>"The Crew",
# :role=>"Bobby Bartellemeo/Narrator"},
# {:year=>"2000",
# :movie_url=>"/wiki/Fail_Safe_(2000_TV)",
# :movie_title=>"Fail Safe",
# :role=>"President of the United States"},
# {:year=>"2001",
# :movie_url=>"/wiki/The_Old_Man_Who_Read_Love_Stories",
# :movie_title=>"The Old Man Who Read Love Stories",
# :role=>"Antonio Bolivar"},
# {:year=>"2001",
# :movie_url=>"/wiki/Who_Is_Cletis_Tout%3F",
# :movie_title=>"Who Is Cletis Tout?",
# :role=>"Micah Donnelly"},
# {:year=>"2001",
# :movie_url=>"/wiki/The_Education_of_Max_Bickford",
# :movie_title=>"The Education of Max Bickford",
# :role=>"Max Bickford"},
# {:year=>"2001",
# :movie_url=>"/wiki/The_Day_Reagan_Was_Shot",
# :movie_title=>"The Day Reagan Was Shot",
# :role=>"Alexander Haig"},
# {:year=>"2003",
# :movie_url=>"/wiki/Coast_to_Coast_(TV_film)",
# :movie_title=>"Coast to Coast",
# :role=>"Barnaby Pierce"},
# {:year=>"2004",
# :movie_url=>"/wiki/Silver_City_(2004_film)",
# :movie_title=>"Silver City",
# :role=>"Chuck Raven"},
# {:year=>"2006",
# :movie_url=>"/wiki/Poseidon_(film)",
# :movie_title=>"Poseidon",
# :role=>"Richard Nelson"},
# {:year=>"2007",
# :movie_url=>"/wiki/Tin_Man_(TV_miniseries)",
# :movie_title=>"Tin Man",
# :role=>"Mystic Man"},
# {:year=>"2007",
# :movie_url=>"/wiki/Ocean_of_Fear",
# :movie_title=>"Ocean of Fear",
# :role=>"Narrator"},
# {:year=>"2008",
# :movie_url=>"/wiki/Signs_of_the_Time_(film)",
# :movie_title=>"Signs of the Time",
# :role=>"Narrator"},
# {:year=>"2008",
# :movie_url=>"/wiki/W._(film)",
# :movie_title=>"W.",
# :role=>"Dick Cheney"},
# {:year=>"2008",
# :movie_url=>"/w/index.php?title=America_Betrayed&action=edit&redlink=1",
# :movie_title=>"America Betrayed",
# :role=>"Narrator"},
# {:year=>"2009",
# :movie_url=>"/wiki/My_Life_in_Ruins",
# :movie_title=>"My Life in Ruins",
# :role=>"Irv"},
# {:year=>"2009",
# :movie_url=>"/wiki/Leaves_of_Grass_(film)",
# :movie_title=>"Leaves of Grass",
# :role=>"Pug Rothbaum"},
# {:year=>"2009",
# :movie_url=>"/wiki/The_Lightkeepers",
# :movie_title=>"The Lightkeepers",
# :role=>"Seth"},
# {:year=>"2010",
# :movie_url=>"/wiki/Piranha_3D",
# :movie_title=>"Piranha 3D",
# :role=>"Matthew Boyd"},
# {:year=>"2010",
# :movie_url=>"/wiki/Weeds_(TV_series)",
# :movie_title=>"Weeds",
# :role=>"Warren Schiff"},
# {:year=>"2010",
# :movie_url=>"/wiki/RED_(film)",
# :movie_title=>"RED",
# :role=>"Alexander Dunning"},
# {:year=>"2012",
# :movie_url=>"/wiki/Coma_(U.S._miniseries)",
# :movie_title=>"Coma",
# :role=>"Professor Hillside"},
# {:year=>"2013",
# :movie_url=>"/wiki/Very_Good_Girls",
# :movie_title=>"Very Good Girls",
# :role=>"Danny, Gerry's father"},
# {:year=>"2013",
# :movie_url=>"/wiki/Paranoia_(2013_film)",
# :movie_title=>"Paranoia",
# :role=>"Francis Cassidy"}]
To explain what it's doing:
The "Filmology" table is a good source for the information; It's organized logically, so writing code to walk through it is easy.
doc.at('#Filmography').parent.next_element
finds that table using the <h2> heading just above it, then backs up and looks in the next tag, which is the table itself.
table.search('tr')[1..-1] finds the <tr> rows inside the table, skips the first, then iterates (using map) over the remaining ones.
tds = tr.search('td') finds the cells for the table. From that point on it's a matter of peeling that NodeSet apart like an array, by looking at the elements I want. The rest of the code should be pretty obvious. Once the individual parts are retrieved that are of interest they're bundled into a hash, which is returned as part of an array of hashes by map.
Why not try parsing out the filmography section of the wikipedia article? It seems pretty standard across the few actors that I looked at, and it mentions whether or not it was a TV series so you could filter those out easily.
<tr>
<td>1966</td>
<td><i>Gidget</i></td>
<td>Durf the Drag</td>
<td>TV series 1 episode</td>
</tr>
<tr>
<td>1967</td>
<td><i>Valley of the Dolls</i></td>
<td>Assistant stage manager</td>
<td>Uncredited</td>
</tr>
Looks like you could pull nodes similar to this from the code and save all the info to do what you want with it. The first node could be disregarded since "TV" appears multiple times in the different subnodes.
Hope this helps!
-Larry
Okay So I have tested the code based on your actual request and come up with the following
url = "http://en.wikipedia.org/wiki/Richard_Dreyfuss"
doc = Nokogiri::HTML(open(url))
all_links = doc.search("//a[#href]")
all_links.each do |link|
p_text = link.ancestors("p").text
link_index = p_text.index(link.text)
unless link_index.nil?
search_back = link_index > 50 ? link_index - 50 : 0
p_text[search_back..link_index].downcase.include?("film") ? puts(link['href']) : nil
end
end
Output
#=>/wiki/American_Graffiti
/wiki/Jaws_(film)
/wiki/Close_Encounters_of_the_Third_Kind
/wiki/The_Graduate
/wiki/The_Apprenticeship_of_Duddy_Kravitz_(film)
/wiki/Down_And_Out_In_Beverly_Hills
/wiki/Stakeout_(1987_film)
/wiki/Stephen_King
/wiki/The_Body_(novella)
/wiki/Poseidon_(film)
#cite_note-27
/wiki/Jonathan_Tasini
This seems to satisfy the question you were asking but obviously needs to be modified to fit your needs.
Edit
Added your request for running back on 50 characters in the paragraph the response is much shorter now but I am not sure that the results will be as useful as you'd like. This answers the question but does not capture exactly what you are hoping for e.g. the last 2 links are not to films but they are within 50 characters of the world film.

Sphinx runs queries on DB even when turned off

I've istalled sphinx on my server to try it out. I've set up a simple search source, run indexer once and it worked well. Then I've stopped sphinx process and it is not running for a few weeks now:
user#server ~ $ ps aux | grep sphinx
user 5919 0.0 0.0 13584 920 pts/5 S+ 12:07 0:00 grep --colour=auto sphinx
user#server ~ $ ps aux | grep index
user 5921 0.0 0.0 13584 916 pts/5 S+ 12:07 0:00 grep --colour=auto index
user#server ~ $ ps aux | grep search
user 5925 0.0 0.0 13584 916 pts/5 S+ 12:07 0:00 grep --colour=auto search
But yesterday I've noticed an unussually big memory usage on my mysql database server. In show processlist; I saw a query that I programmed in sphinx sources: SELECT id, content FROM articles.
Why is this happening if sphinx is stopped? How to stop sphinx from executing the queries?
My shpinx.conf:
#
# Sphinx configuration file sample
#
# WARNING! While this sample file mentions all available options,
# it contains (very) short helper descriptions only. Please refer to
# doc/sphinx.html for details.
#
#############################################################################
## data source definition
#############################################################################
source src1
{
# data source type. mandatory, no default value
# known types are mysql, pgsql, mssql, xmlpipe, xmlpipe2, odbc
type = mysql
#####################################################################
## SQL settings (for 'mysql' and 'pgsql' types)
#####################################################################
# some straightforward parameters for SQL source types
sql_host = 85.254.49.181
sql_user = root
sql_pass = #########
sql_db = articles_db
sql_port = 3306 # optional, default is 3306
# UNIX socket name
# optional, default is empty (reuse client library defaults)
# usually '/var/lib/mysql/mysql.sock' on Linux
# usually '/tmp/mysql.sock' on FreeBSD
#
# sql_sock = /tmp/mysql.sock
# MySQL specific client connection flags
# optional, default is 0
#
# mysql_connect_flags = 32 # enable compression
# MySQL specific SSL certificate settings
# optional, defaults are empty
#
# mysql_ssl_cert = /etc/ssl/client-cert.pem
# mysql_ssl_key = /etc/ssl/client-key.pem
# mysql_ssl_ca = /etc/ssl/cacert.pem
# MS SQL specific Windows authentication mode flag
# MUST be in sync with charset_type index-level setting
# optional, default is 0
#
# mssql_winauth = 1 # use currently logged on user credentials
# MS SQL specific Unicode indexing flag
# optional, default is 0 (request SBCS data)
#
# mssql_unicode = 1 # request Unicode data from server
# ODBC specific DSN (data source name)
# mandatory for odbc source type, no default value
#
# odbc_dsn = DBQ=C:\data;DefaultDir=C:\data;Driver={Microsoft Text Driver (*.txt; *.csv)};
# sql_query = SELECT id, data FROM documents.csv
# pre-query, executed before the main fetch query
# multi-value, optional, default is empty list of queries
#
sql_query_pre = SET NAMES utf8
# sql_query_pre = SET SESSION query_cache_type=OFF
# main document fetch query
# mandatory, integer document ID field MUST be the first selected column
sql_query = \
SELECT id, content \
FROM articles
# range query setup, query that must return min and max ID values
# optional, default is empty
#
# sql_query will need to reference $start and $end boundaries
# if using ranged query:
#
# sql_query = \
# SELECT doc.id, doc.id AS group, doc.title, doc.data \
# FROM documents doc \
# WHERE id>=$start AND id<=$end
#
# sql_query_range = SELECT MIN(id),MAX(id) FROM documents
# range query step
# optional, default is 1024
#
# sql_range_step = 1000
# unsigned integer attribute declaration
# multi-value (an arbitrary number of attributes is allowed), optional
# optional bit size can be specified, default is 32
#
# sql_attr_uint = author_id
# sql_attr_uint = forum_id:9 # 9 bits for forum_id
#sql_attr_uint = id
# boolean attribute declaration
# multi-value (an arbitrary number of attributes is allowed), optional
# equivalent to sql_attr_uint with 1-bit size
#
# sql_attr_bool = is_deleted
# bigint attribute declaration
# multi-value (an arbitrary number of attributes is allowed), optional
# declares a signed (unlike uint!) 64-bit attribute
#
# sql_attr_bigint = my_bigint_id
# UNIX timestamp attribute declaration
# multi-value (an arbitrary number of attributes is allowed), optional
# similar to integer, but can also be used in date functions
#
# sql_attr_timestamp = posted_ts
# sql_attr_timestamp = last_edited_ts
#sql_attr_timestamp = date_added
# string ordinal attribute declaration
# multi-value (an arbitrary number of attributes is allowed), optional
# sorts strings (bytewise), and stores their indexes in the sorted list
# sorting by this attr is equivalent to sorting by the original strings
#
# sql_attr_str2ordinal = author_name
# floating point attribute declaration
# multi-value (an arbitrary number of attributes is allowed), optional
# values are stored in single precision, 32-bit IEEE 754 format
#
# sql_attr_float = lat_radians
# sql_attr_float = long_radians
# multi-valued attribute (MVA) attribute declaration
# multi-value (an arbitrary number of attributes is allowed), optional
# MVA values are variable length lists of unsigned 32-bit integers
#
# syntax is ATTR-TYPE ATTR-NAME 'from' SOURCE-TYPE [;QUERY] [;RANGE-QUERY]
# ATTR-TYPE is 'uint' or 'timestamp'
# SOURCE-TYPE is 'field', 'query', or 'ranged-query'
# QUERY is SQL query used to fetch all ( docid, attrvalue ) pairs
# RANGE-QUERY is SQL query used to fetch min and max ID values, similar to 'sql_query_range'
#
# sql_attr_multi = uint tag from query; SELECT id, tag FROM tags
# sql_attr_multi = uint tag from ranged-query; \
# SELECT id, tag FROM tags WHERE id>=$start AND id<=$end; \
# SELECT MIN(id), MAX(id) FROM tags
# post-query, executed on sql_query completion
# optional, default is empty
#
# sql_query_post =
# post-index-query, executed on successful indexing completion
# optional, default is empty
# $maxid expands to max document ID actually fetched from DB
#
# sql_query_post_index = REPLACE INTO counters ( id, val ) \
# VALUES ( 'max_indexed_id', $maxid )
# ranged query throttling, in milliseconds
# optional, default is 0 which means no delay
# enforces given delay before each query step
sql_ranged_throttle = 0
# document info query, ONLY for CLI search (ie. testing and debugging)
# optional, default is empty
# must contain $id macro and must fetch the document by that id
sql_query_info = SELECT * FROM articles WHERE id=$id
# kill-list query, fetches the document IDs for kill-list
# k-list will suppress matches from preceding indexes in the same query
# optional, default is empty
#
# sql_query_killlist = SELECT id FROM documents WHERE edited>=#last_reindex
# columns to unpack on indexer side when indexing
# multi-value, optional, default is empty list
#
# unpack_zlib = zlib_column
# unpack_mysqlcompress = compressed_column
# unpack_mysqlcompress = compressed_column_2
# maximum unpacked length allowed in MySQL COMPRESS() unpacker
# optional, default is 16M
#
# unpack_mysqlcompress_maxsize = 16M
#####################################################################
## xmlpipe settings
#####################################################################
# type = xmlpipe
# shell command to invoke xmlpipe stream producer
# mandatory
#
# xmlpipe_command = cat /var/lib/sphinxsearch/test.xml
#####################################################################
## xmlpipe2 settings
#####################################################################
# type = xmlpipe2
# xmlpipe_command = cat /var/lib/sphinxsearch/test2.xml
# xmlpipe2 field declaration
# multi-value, optional, default is empty
#
# xmlpipe_field = subject
# xmlpipe_field = content
# xmlpipe2 attribute declaration
# multi-value, optional, default is empty
# all xmlpipe_attr_XXX options are fully similar to sql_attr_XXX
#
# xmlpipe_attr_timestamp = published
# xmlpipe_attr_uint = author_id
# perform UTF-8 validation, and filter out incorrect codes
# avoids XML parser choking on non-UTF-8 documents
# optional, default is 0
#
# xmlpipe_fixup_utf8 = 1
}
# inherited source example
#
# all the parameters are copied from the parent source,
# and may then be overridden in this source definition
source src1throttled : src1
{
sql_ranged_throttle = 100
}
#############################################################################
## index definition
#############################################################################
# local index example
#
# this is an index which is stored locally in the filesystem
#
# all indexing-time options (such as morphology and charsets)
# are configured per local index
index articles
{
# document source(s) to index
# multi-value, mandatory
# document IDs must be globally unique across all sources
source = src1
# index files path and file name, without extension
# mandatory, path must be writable, extensions will be auto-appended
path = /var/lib/sphinxsearch/data/parts
# document attribute values (docinfo) storage mode
# optional, default is 'extern'
# known values are 'none', 'extern' and 'inline'
docinfo = extern
# memory locking for cached data (.spa and .spi), to prevent swapping
# optional, default is 0 (do not mlock)
# requires searchd to be run from root
mlock = 0
# a list of morphology preprocessors to apply
# optional, default is empty
#
# builtin preprocessors are 'none', 'stem_en', 'stem_ru', 'stem_enru',
# 'soundex', and 'metaphone'; additional preprocessors available from
# libstemmer are 'libstemmer_XXX', where XXX is algorithm code
# (see libstemmer_c/libstemmer/modules.txt)
#
# morphology = stem_en, stem_ru, soundex
# morphology = libstemmer_german
# morphology = libstemmer_sv
morphology = stem_ru
# minimum word length at which to enable stemming
# optional, default is 1 (stem everything)
#
# min_stemming_len = 1
# stopword files list (space separated)
# optional, default is empty
# contents are plain text, charset_table and stemming are both applied
#
# stopwords = /var/lib/sphinxsearch/data/stopwords.txt
# wordforms file, in "mapfrom > mapto" plain text format
# optional, default is empty
#
# wordforms = /var/lib/sphinxsearch/data/wordforms.txt
# tokenizing exceptions file
# optional, default is empty
#
# plain text, case sensitive, space insensitive in map-from part
# one "Map Several Words => ToASingleOne" entry per line
#
# exceptions = /var/lib/sphinxsearch/data/exceptions.txt
# minimum indexed word length
# default is 1 (index everything)
min_word_len = 1
# charset encoding type
# optional, default is 'sbcs'
# known types are 'sbcs' (Single Byte CharSet) and 'utf-8'
charset_type = utf-8
# charset definition and case folding rules "table"
# optional, default value depends on charset_type
#
# defaults are configured to include English and Russian characters only
# you need to change the table to include additional ones
# this behavior MAY change in future versions
#
# 'sbcs' default value is
# charset_table = 0..9, A..Z->a..z, _, a..z, U+A8->U+B8, U+B8, U+C0..U+DF->U+E0..U+FF, U+E0..U+FF
#
# 'utf-8' default value is
# charset_table = 0..9, A..Z->a..z, _, a..z, U+410..U+42F->U+430..U+44F, U+430..U+44F
# ignored characters list
# optional, default value is empty
#
# ignore_chars = U+00AD
# minimum word prefix length to index
# optional, default is 0 (do not index prefixes)
#
# min_prefix_len = 0
# minimum word infix length to index
# optional, default is 0 (do not index infixes)
#
# min_infix_len = 0
# list of fields to limit prefix/infix indexing to
# optional, default value is empty (index all fields in prefix/infix mode)
#
# prefix_fields = filename
# infix_fields = url, domain
# enable star-syntax (wildcards) when searching prefix/infix indexes
# known values are 0 and 1
# optional, default is 0 (do not use wildcard syntax)
#
# enable_star = 1
# n-gram length to index, for CJK indexing
# only supports 0 and 1 for now, other lengths to be implemented
# optional, default is 0 (disable n-grams)
#
# ngram_len = 1
# n-gram characters list, for CJK indexing
# optional, default is empty
#
# ngram_chars = U+3000..U+2FA1F
# phrase boundary characters list
# optional, default is empty
#
# phrase_boundary = ., ?, !, U+2026 # horizontal ellipsis
# phrase boundary word position increment
# optional, default is 0
#
# phrase_boundary_step = 100
# whether to strip HTML tags from incoming documents
# known values are 0 (do not strip) and 1 (do strip)
# optional, default is 0
html_strip = 0
# what HTML attributes to index if stripping HTML
# optional, default is empty (do not index anything)
#
# html_index_attrs = img=alt,title; a=title;
# what HTML elements contents to strip
# optional, default is empty (do not strip element contents)
#
# html_remove_elements = style, script
# whether to preopen index data files on startup
# optional, default is 0 (do not preopen), searchd-only
#
# preopen = 1
# whether to keep dictionary (.spi) on disk, or cache it in RAM
# optional, default is 0 (cache in RAM), searchd-only
#
# ondisk_dict = 1
# whether to enable in-place inversion (2x less disk, 90-95% speed)
# optional, default is 0 (use separate temporary files), indexer-only
#
# inplace_enable = 1
# in-place fine-tuning options
# optional, defaults are listed below
#
# inplace_hit_gap = 0 # preallocated hitlist gap size
# inplace_docinfo_gap = 0 # preallocated docinfo gap size
# inplace_reloc_factor = 0.1 # relocation buffer size within arena
# inplace_write_factor = 0.1 # write buffer size within arena
# whether to index original keywords along with stemmed versions
# enables "=exactform" operator to work
# optional, default is 0
#
# index_exact_words = 1
# position increment on overshort (less that min_word_len) words
# optional, allowed values are 0 and 1, default is 1
#
# overshort_step = 1
# position increment on stopword
# optional, allowed values are 0 and 1, default is 1
#
# stopword_step = 1
}
# inherited index example
#
# all the parameters are copied from the parent index,
# and may then be overridden in this index definition
#index test1stemmed : test1
#{
# path = /var/lib/sphinxsearch/data/test1stemmed
# morphology = stem_en
#}
# distributed index example
#
# this is a virtual index which can NOT be directly indexed,
# and only contains references to other local and/or remote indexes
############################################################################
## indexer settings
#############################################################################
indexer
{
# memory limit, in bytes, kiloytes (16384K) or megabytes (256M)
# optional, default is 32M, max is 2047M, recommended is 256M to 1024M
mem_limit = 32M
# maximum IO calls per second (for I/O throttling)
# optional, default is 0 (unlimited)
#
# max_iops = 40
# maximum IO call size, bytes (for I/O throttling)
# optional, default is 0 (unlimited)
#
# max_iosize = 1048576
# maximum xmlpipe2 field length, bytes
# optional, default is 2M
#
# max_xmlpipe2_field = 4M
# write buffer size, bytes
# several (currently up to 4) buffers will be allocated
# write buffers are allocated in addition to mem_limit
# optional, default is 1M
#
# write_buffer = 1M
}
#############################################################################
## searchd settings
#############################################################################
searchd
{
# hostname, port, or hostname:port, or /unix/socket/path to listen on
# multi-value, multiple listen points are allowed
# optional, default is 0.0.0.0:9312 (listen on all interfaces, port 9312)
#
#listen = localhost:9312
#listen = 0.0.0.0:9306:mysql41
# listen = 192.168.0.1:9312
# listen = 9312
# listen = /var/run/searchd.sock
listen = 0.0.0.0:9306:mysql41
# log file, searchd run info is logged here
# optional, default is 'searchd.log'
log = /var/log/sphinxsearch/searchd.log
# query log file, all search queries are logged here
# optional, default is empty (do not log queries)
query_log = /var/log/sphinxsearch/query.log
# client read timeout, seconds
# optional, default is 5
read_timeout = 5
# request timeout, seconds
# optional, default is 5 minutes
client_timeout = 300
# maximum amount of children to fork (concurrent searches to run)
# optional, default is 0 (unlimited)
max_children = 30
# PID file, searchd process ID file name
# mandatory
pid_file = /var/run/sphinxsearch/searchd.pid
# max amount of matches the daemon ever keeps in RAM, per-index
# WARNING, THERE'S ALSO PER-QUERY LIMIT, SEE SetLimits() API CALL
# default is 1000 (just like Google)
max_matches = 1000
# seamless rotate, prevents rotate stalls if precaching huge datasets
# optional, default is 1
seamless_rotate = 1
# whether to forcibly preopen all indexes on startup
# optional, default is 0 (do not preopen)
preopen_indexes = 0
# whether to unlink .old index copies on succesful rotation.
# optional, default is 1 (do unlink)
unlink_old = 1
# attribute updates periodic flush timeout, seconds
# updates will be automatically dumped to disk this frequently
# optional, default is 0 (disable periodic flush)
#
# attr_flush_period = 900
# instance-wide ondisk_dict defaults (per-index value take precedence)
# optional, default is 0 (precache all dictionaries in RAM)
#
# ondisk_dict_default = 1
# MVA updates pool size
# shared between all instances of searchd, disables attr flushes!
# optional, default size is 1M
mva_updates_pool = 1M
# max allowed network packet size
# limits both query packets from clients, and responses from agents
# optional, default size is 8M
max_packet_size = 8M
# crash log path
# searchd will (try to) log crashed query to 'crash_log_path.PID' file
# optional, default is empty (do not create crash logs)
#
# crash_log_path = /var/log/sphinxsearch/crash
# max allowed per-query filter count
# optional, default is 256
max_filters = 256
# max allowed per-filter values count
# optional, default is 4096
max_filter_values = 4096
# socket listen queue length
# optional, default is 5
#
# listen_backlog = 5
# per-keyword read buffer size
# optional, default is 256K
#
# read_buffer = 256K
# unhinted read size (currently used when reading hits)
# optional, default is 32K
#
# read_unhinted = 32K
}
# --eof--
sphinx itself - which is generally considered to be searchd (the deamon) doesnt run queries ever.
indexer is the tool that actually runs queries, and has no facility to automaticly run. ie it only runs when something invokes it.
Are you sure you didnt add it to cron/crontab - even just for testing?