Skip to content

Commit 6547e01

Browse files
authored
Merge pull request #133 from ipa-lab/restructuring
Integrate the RAG-prototype into the LinuxPrivEsc prototype
2 parents 9bd9432 + a812c04 commit 6547e01

File tree

14 files changed

+60
-428
lines changed

14 files changed

+60
-428
lines changed

src/hackingBuddyGPT/strategies.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@ class CommandStrategy(UseCase, abc.ABC):
3636

3737
disable_history: bool = False
3838

39+
enable_compressed_history: bool = False
40+
3941
def before_run(self):
4042
pass
4143

@@ -59,7 +61,10 @@ def init(self):
5961
def get_next_command(self) -> tuple[str, int]:
6062
history = ""
6163
if not self.disable_history:
62-
history = self._sliding_history.get_history(self._max_history_size - self.get_state_size())
64+
if self.enable_compressed_history:
65+
history = self._sliding_history.get_commands_and_last_output(self._max_history_size - self.get_state_size())
66+
else:
67+
history = self._sliding_history.get_history(self._max_history_size - self.get_state_size())
6368

6469
self._template_params.update({"history": history})
6570
cmd = self.llm.get_response(self._template, **self._template_params)
@@ -111,7 +116,10 @@ def perform_round(self, turn: int) -> bool:
111116

112117
# store the results in our local history
113118
if not self.disable_history:
114-
self._sliding_history.add_command(cmd, result)
119+
if self.enable_compressed_history:
120+
self._sliding_history.add_command_only(cmds, result)
121+
else:
122+
self._sliding_history.add_command(cmds, result)
115123

116124
# signal if we were successful in our task
117125
return task_successful
Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
from .web import *
22
from .web_api_testing import *
33
from .viewer import *
4-
from .rag import *
54
from .minimal_linux_privesc import *
65
from .call_usecase_from_usecase import *
76
from .linux_privesc import *

src/hackingBuddyGPT/usecases/linux_privesc.py

Lines changed: 27 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,25 @@
99
from hackingBuddyGPT.usecases.base import use_case
1010
from hackingBuddyGPT.utils import SSHConnection, llm_util
1111
from hackingBuddyGPT.utils.logging import log_conversation
12+
from hackingBuddyGPT.utils.rag import RagBackground
1213

13-
template_analyze = Template("""You executed the command '${cmd}' and retrieved the following result:
14+
template_analyze = Template("""Your task is to analyze the result of an executed command to determina
15+
a way to escalate your privileges into a root shell. Describe your findings including all needed
16+
information while being as concise as possible.
17+
18+
% if len(rag) > 0:
19+
You also have the following background information:
20+
---
21+
${rag}
22+
---
23+
%endif
24+
25+
You executed the command '${cmd}' and retrieved the following result:
1426
1527
~~~ bash
1628
${resp}
1729
~~~
18-
19-
Analyze if this response allows you to determine a way to escalate your privileges into a root shell. Be as concise as possible.""")
30+
""")
2031

2132
template_update_state = Template("""Your current list of known facts relevant for privilege escalation is:
2233
@@ -91,12 +102,14 @@ class PrivEscLinux(CommandStrategy):
91102

92103
enable_structured_guidance: bool = False
93104

94-
enable_rag : bool = False
95-
96105
enable_cot: bool = False
97106

107+
rag_path: str = ''
108+
98109
_state: str = ""
99110

111+
_enable_rag: bool = False
112+
100113
def init(self):
101114
super().init()
102115

@@ -118,6 +131,10 @@ def init(self):
118131

119132
guidance = []
120133

134+
if self.rag_path != '':
135+
self._enable_rag = True
136+
self._rag_data = RagBackground(self.rag_path, self.llm)
137+
121138
if self.enable_cot:
122139
self._template_params['cot'] = template_cot
123140

@@ -214,16 +231,18 @@ def get_rag_query(self, cmd, result):
214231
@log_conversation("Analyze its result...", start_section=True)
215232
def analyze_result(self, cmd, result):
216233

217-
if self.enable_rag:
218-
# TODO: do the RAG query here and add it to the prompt
234+
relevant_document_data = ''
235+
if self._enable_rag:
219236
queries = self.get_rag_query(cmd, result)
220237
print("QUERIES: " + queries.result)
238+
relevant_document_data = self._rag_data.get_relevant_documents(queries.result)
239+
print("RELEVANT DOCUMENT DATA: " + relevant_document_data)
221240

222241
state_size = self.get_state_size()
223242
target_size = self.llm.context_size - llm_util.SAFETY_MARGIN - state_size
224243

225244
# ugly, but cut down result to fit context size
226245
result = llm_util.trim_result_front(self.llm, target_size, result)
227-
answer = self.llm.get_response(template_analyze, cmd=cmd, resp=result, facts=self._state)
246+
answer = self.llm.get_response(template_analyze, cmd=cmd, resp=result, facts=self._state, rag=relevant_document_data)
228247
self.log.call_response(answer)
229248
self._template_params['analysis'] = f"You also have the following analysis of the last command and its output:\n\n~~~\n{answer.result}\n~~~"

src/hackingBuddyGPT/usecases/rag/README.md

Lines changed: 0 additions & 32 deletions
This file was deleted.

src/hackingBuddyGPT/usecases/rag/__init__.py

Lines changed: 0 additions & 1 deletion
This file was deleted.

src/hackingBuddyGPT/usecases/rag/common.py

Lines changed: 0 additions & 234 deletions
This file was deleted.

0 commit comments

Comments
 (0)