!C99Shell v. 1.0 pre-release build #16!

Software: Apache/2.0.54 (Fedora). PHP/5.0.4 

uname -a: Linux mina-info.me 2.6.17-1.2142_FC4smp #1 SMP Tue Jul 11 22:57:02 EDT 2006 i686 

uid=48(apache) gid=48(apache) groups=48(apache)
context=system_u:system_r:httpd_sys_script_t
 

Safe-mode: OFF (not secure)

/usr/lib/python2.4/site-packages/ldap/schema/   drwxr-xr-x
Free 3.73 GB of 27.03 GB (13.8%)
Home    Back    Forward    UPDIR    Refresh    Search    Buffer    Encoder    Tools    Proc.    FTP brute    Sec.    SQL    PHP-code    Update    Feedback    Self remove    Logout    


Viewing file:     tokenizer.py (1.99 KB)      -rw-r--r--
Select action/file-type:
(+) | (+) | (+) | Code (+) | Session (+) | (+) | SDB (+) | (+) | (+) | (+) | (+) | (+) |
"""
ldap.schema.tokenizer - Low-level parsing functions for schema element strings
written by Michael Stroeder <michael@stroeder.com>

See http://python-ldap.sourceforge.net for details.

\$Id: tokenizer.py,v 1.8 2003/11/22 20:15:44 stroeder Exp $
"""


def split_tokens(s):
  """
  Returns list of syntax elements with quotes and spaces
  stripped.
  """
  result = []
  result_append = result.append
  s_len = len(s)
  i = 0
  while i<s_len:
    start = i
    while i<s_len and s[i]!="'":
      if s[i]=="(" or s[i]==")":
        if i>start:
          result_append(s[start:i])
        result_append(s[i])
        i +=1 # Consume parentheses
        start = i
      elif s[i]==" ":
        if i>start:
          result_append(s[start:i])
        # Consume space chars
        while i<s_len and s[i]==" ":
          i +=1
        start = i
      else:
        i +=1
    if i>start:
      result_append(s[start:i])
    i +=1
    if i>=s_len:
      break
    start = i
    while i<s_len and s[i]!="'":
      i +=1
    if i>=start:
      result_append(s[start:i])
    i +=1
  return result # split_tokens()


def extract_tokens(l,known_tokens):
  """
  Returns dictionary of known tokens with all values
  """
  assert l[0].strip()=="(" and l[-1].strip()==")",ValueError(l)
  result = {}
  result_has_key = result.has_key
  result.update(known_tokens)
  i = 0
  l_len = len(l)
  while i<l_len:
    if result_has_key(l[i]):
      token = l[i]
      i += 1 # Consume token
      if i<l_len:
        if result_has_key(l[i]):
          # non-valued
          result[token] = (())
        elif l[i]=="(":
          # multi-valued
          i += 1 # Consume left parentheses
          start = i
          while i<l_len and l[i]!=")":
            i += 1
          result[token] = tuple(filter(lambda v:v!='$',l[start:i]))
          i += 1 # Consume right parentheses
        else:
          # single-valued
          result[token] = l[i],
          i += 1 # Consume single value
    else:
      i += 1 # Consume unrecognized item
  return result


:: Command execute ::

Enter:
 
Select:
 

:: Search ::
  - regexp 

:: Upload ::
 
[ Read-Only ]

:: Make Dir ::
 
[ Read-Only ]
:: Make File ::
 
[ Read-Only ]

:: Go Dir ::
 
:: Go File ::
 

--[ c99shell v. 1.0 pre-release build #16 powered by Captain Crunch Security Team | http://ccteam.ru | Generation time: 0.0044 ]--