import time
import os 
"""histry score db"""

#FILE_PATH = ''
#quick_serach
def qs_load(filepath):
    #global FILE_PATH 
    db = {}
    if not os.path.exists(filepath):
        return db
    
    #FILE_PATH = filepath
    print('14:', filepath)

    """
    db  = [{ctime, atime, score, type, str }]
    <type>
        -f  file
        -d  dir 
        -z  z-file
        t/  z-thread 
        u/  z-user file 

    """
    """Manually parses the text file into a dictionary."""

    with open(filepath, 'r') as f:
        for line in f:
            #print('30:', line)
            #line = line.strip()
            #if not line: continue
                
            # Split the line by the pipe character
            # Format: path|timestamp|score
            parts = line.split(';', 4)
            #print('25:', parts)
            if len(parts) == 5:
                ctime = float((parts[0]).strip())
                atime = float((parts[1]).strip())
                score = float((parts[2]).strip())
                _type = parts[3].strip() 
                _value  = parts[4].strip()
                db[_value] = [float(ctime),float(atime),float(score), _type]




    """Step 1: Cleanup (Dump) -> Step 2: Normalize (Scale)"""
    if 0:   #db:    #problem 
        # Calculate the Mean to identify 'junk'        
        mean_score = sum([d[2] for d in db.values()]) /len(db)
            
            
        # We define a 'Floor'. Anything below 10% of average is dumped.
        floor = mean_score * 0.1
            
        new_db = {}
        for _value, data in db.items():
            ctime,atime, score = data
                
            # --- STEP 1: CLEANUP (DUMP) ---
            # If it's below the floor, and old day 90day  we dump it
            if score < floor and (time.time() - atime) / 68400>90 :
                continue
                    
                # --- STEP 2: NORMALIZE (SCALE) ---
                # We shrink the kept score by 50% to keep numbers manageable
                #normalized_score = score / 2
                #new_db[_value] = [atime, normalized_score]
                new_db[_value] = [atime, score]
                
            #self.db = new_db  
            
            #this code also create error 
        """Saves the processed 'Clean' state to the main file."""
        try:
            with open(filepath, 'w', encoding='utf-8') as f:
                for _value, data in db.items():
                    f.write(f"- {data[0]}; {data[1]}; {data[2]}; {_value}\n")
        except Exception as e:
            print(f"File Sync Error: {e}")
    #print('71: ', db)
    return db

#quick serach update
def qs_update(db, _type,  _value, filepath):
    _value =_value.strip()
    if not _value:    return
    """Updates _value score or inserts new _value with decay logic."""
    now         = time.time()
    ctime       = now 
    day_in_sec  = 86400
    new_score   = 100
    _type       = '/'
        
    if _value in db:
        ctime,atime,score,_type = db[_value]
        days_ago = (now - atime) / day_in_sec
            
        # Apply Decay to existing score before adding new points
        if   days_ago > 30: score *= 0.5
        elif days_ago > 7 : score *= 0.8
        new_score += score
        db[_value] = [ctime, now,  new_score, _type]
    else:
        
        # New entry: [timestamp, initial_score]
        db[_value] = [now,   now, new_score,_type]


    # 2. Synchronous Backup (Append only - very fast)
    # This keeps a permanent record of every click for recovery
    #z-history.zls
    #
    with open(filepath, 'a', encoding='utf-8') as f:
        f.write(f" {ctime}; {now}; {new_score}; {_type}; {_value} \n")
    
    #_value log 
    with open(filepath+'.log', 'a', encoding='utf-8') as f:
        f.write(f"- {now}; {_type} {_value} \n")
        
#   quick search sugg 
def qs_sugg(db, _type,  query=None, limit=5):
    """
    Returns top N paths. 
    If query is provided, it filters paths that contain that string.
    """
    # 1. Convert all items to a list of (_value, score)
    # 2. Filter by search query (case-insensitive)
        
    query = query.lower()
    matches = [] 
    #print('134:', _type, query)
    #or base token match 
    
    for p, d in db.items():
        if _type in d[-1]:
            #print('137:', _type, query, p, d)
            found = 1 if all(token in p.lower() for token in query.split('&')) else 0

            if found:    
                matches.append([p, d[1]])

    matches.sort(key=lambda x: x[1], reverse=True)


    # 4. Return just the path strings
    r = [m[0] for m in matches]

    return r 



