135 lines
		
	
	
		
			4.7 KiB
		
	
	
	
		
			TypeScript
		
	
	
	
	
	
			
		
		
	
	
			135 lines
		
	
	
		
			4.7 KiB
		
	
	
	
		
			TypeScript
		
	
	
	
	
	
import * as plugins from './plugins.js';
 | 
						|
 | 
						|
import * as aiDocsClasses from './aidocs_classes/index.js';
 | 
						|
 | 
						|
export class AiDoc {
 | 
						|
  private openaiToken: string;
 | 
						|
 | 
						|
  public npmextraKV: plugins.npmextra.KeyValueStore;
 | 
						|
  public qenvInstance: plugins.qenv.Qenv;
 | 
						|
  public aidocInteract: plugins.smartinteract.SmartInteract;
 | 
						|
  public openaiInstance: plugins.smartai.OpenAiProvider;
 | 
						|
 | 
						|
  argvArg: any;
 | 
						|
 | 
						|
  constructor(argvArg?: any) {
 | 
						|
    this.argvArg = argvArg;
 | 
						|
  }
 | 
						|
 | 
						|
  private printSanitizedToken() {
 | 
						|
    // Check if the token length is greater than the sum of startLength and endLength
 | 
						|
    let printToken: string;
 | 
						|
    if (this.openaiToken.length > 6) {
 | 
						|
      // Extract the beginning and end parts of the token
 | 
						|
      const start = this.openaiToken.substring(0, 3);
 | 
						|
      const end = this.openaiToken.substring(this.openaiToken.length - 3);
 | 
						|
      printToken = `${start}...${end}`;
 | 
						|
    } else {
 | 
						|
      // If the token is not long enough, return it as is
 | 
						|
      printToken = this.openaiToken;
 | 
						|
    }
 | 
						|
    console.log(`OpenAI Token on record: ${printToken}`);
 | 
						|
  }
 | 
						|
 | 
						|
  public async start() {
 | 
						|
    // lets care about prerequisites
 | 
						|
    this.aidocInteract = new plugins.smartinteract.SmartInteract();
 | 
						|
    this.qenvInstance = new plugins.qenv.Qenv();
 | 
						|
    if (!(await this.qenvInstance.getEnvVarOnDemand('OPENAI_TOKEN'))) {
 | 
						|
      this.npmextraKV = new plugins.npmextra.KeyValueStore({
 | 
						|
        typeArg: 'userHomeDir',
 | 
						|
        identityArg: 'tsdoc',
 | 
						|
        mandatoryKeys: ['OPENAI_TOKEN'],
 | 
						|
      });
 | 
						|
 | 
						|
      const missingKeys = await this.npmextraKV.getMissingMandatoryKeys();
 | 
						|
      if (missingKeys.length > 0) {
 | 
						|
        // lets try argv
 | 
						|
        if (this.argvArg?.OPENAI_TOKEN) {
 | 
						|
          this.openaiToken = this.argvArg.OPENAI_TOKEN;
 | 
						|
        } else {
 | 
						|
          // lets try smartinteract
 | 
						|
          // wait for a second until OpenAI fixes punycode problem...
 | 
						|
          await plugins.smartdelay.delayFor(1000);
 | 
						|
          const answerObject = await this.aidocInteract.askQuestion({
 | 
						|
            type: 'input',
 | 
						|
            message: `Please provide your OpenAI token. This will be persisted in your home directory.`,
 | 
						|
            name: 'OPENAI_TOKEN',
 | 
						|
            default: '',
 | 
						|
          });
 | 
						|
          this.openaiToken = answerObject.value;
 | 
						|
        }
 | 
						|
 | 
						|
        this.printSanitizedToken();
 | 
						|
        await this.npmextraKV.writeKey('OPENAI_TOKEN', this.openaiToken);
 | 
						|
      }
 | 
						|
    }
 | 
						|
    if (!this.openaiToken) {
 | 
						|
      this.openaiToken = await this.npmextraKV.readKey('OPENAI_TOKEN');
 | 
						|
    }
 | 
						|
 | 
						|
    // lets assume we have an OPENAI_Token now
 | 
						|
    this.openaiInstance = new plugins.smartai.OpenAiProvider({
 | 
						|
      openaiToken: this.openaiToken,
 | 
						|
    });
 | 
						|
    await this.openaiInstance.start();
 | 
						|
  }
 | 
						|
 | 
						|
  public async stop() {
 | 
						|
    await this.openaiInstance.stop();
 | 
						|
  }
 | 
						|
 | 
						|
  public async buildReadme(projectDirArg: string) {
 | 
						|
    const readmeInstance = new aiDocsClasses.Readme(this, projectDirArg);
 | 
						|
    return await readmeInstance.build();
 | 
						|
  }
 | 
						|
 | 
						|
  public async buildDescription(projectDirArg: string) {
 | 
						|
    const descriptionInstance = new aiDocsClasses.Description(this, projectDirArg);
 | 
						|
    return await descriptionInstance.build();
 | 
						|
  }
 | 
						|
 | 
						|
  public async buildNextCommitObject(projectDirArg: string) {
 | 
						|
    const commitInstance = new aiDocsClasses.Commit(this, projectDirArg);
 | 
						|
    return await commitInstance.buildNextCommitObject();
 | 
						|
  }
 | 
						|
 | 
						|
  public async getProjectContext(projectDirArg: string) {
 | 
						|
    const projectContextInstance = new aiDocsClasses.ProjectContext(projectDirArg);
 | 
						|
    return await projectContextInstance.gatherFiles();
 | 
						|
  }
 | 
						|
  
 | 
						|
  /**
 | 
						|
   * Get the context with token count information
 | 
						|
   * @param projectDirArg The path to the project directory
 | 
						|
   * @returns An object containing the context string and its token count
 | 
						|
   */
 | 
						|
  public async getProjectContextWithTokenCount(projectDirArg: string) {
 | 
						|
    const projectContextInstance = new aiDocsClasses.ProjectContext(projectDirArg);
 | 
						|
    await projectContextInstance.update();
 | 
						|
    return projectContextInstance.getContextWithTokenCount();
 | 
						|
  }
 | 
						|
  
 | 
						|
  /**
 | 
						|
   * Get just the token count for a project's context
 | 
						|
   * @param projectDirArg The path to the project directory
 | 
						|
   * @returns The number of tokens in the project context
 | 
						|
   */
 | 
						|
  public async getProjectContextTokenCount(projectDirArg: string) {
 | 
						|
    const projectContextInstance = new aiDocsClasses.ProjectContext(projectDirArg);
 | 
						|
    await projectContextInstance.update();
 | 
						|
    return projectContextInstance.getTokenCount();
 | 
						|
  }
 | 
						|
  
 | 
						|
  /**
 | 
						|
   * Count tokens in a text string using GPT tokenizer
 | 
						|
   * @param text The text to count tokens for
 | 
						|
   * @param model The model to use for tokenization (default: gpt-3.5-turbo)
 | 
						|
   * @returns The number of tokens in the text
 | 
						|
   */
 | 
						|
  public countTokens(text: string, model: string = 'gpt-3.5-turbo'): number {
 | 
						|
    const projectContextInstance = new aiDocsClasses.ProjectContext('');
 | 
						|
    return projectContextInstance.countTokens(text, model);
 | 
						|
  }
 | 
						|
}
 |