| path
				 stringlengths 15 77 | type
				 stringclasses 1
				value | project
				 stringclasses 1
				value | commit_hash
				 stringlengths 40 40 | commit_message
				 stringlengths 15 198 | ground_truth
				 stringlengths 26 155 | main_code
				 stringlengths 176 2.5k | context
				 stringlengths 91 9.37k | 
|---|---|---|---|---|---|---|---|
| 
	tests.coeditor.test_code_change/test_change_scope | 
	Modified | 
	temp-1 | 
	88a80dd462a4273154c266f5ef7d055f0b4fbfaf | 
	Fix new scope headers. Fix unit tests. | 
	 <0>:<add>     assert_str_equal(inner_class_code, indent(inner_attr1_expect, " " * 4))
 | 
	      # module: tests.coeditor.test_code_change
      def test_change_scope():
      <s>code, indent(f1_expect, " " * 4))
    -     assert_str_equal(f1_code, f1_expect)
      
          f2_expect = dedent(
              """\
              @annotated
              def f2():
                  return 1
              """
          )
    +     f2_code = scope.subscopes[ProjectPath("code1", "f2")].all_code
    -     f2_code = scope.subscopes[ProjectPath("code1", "f2")].spans_code
          assert_str_equal(f2_code, f2_expect)
      
          attr1_expect = dedent(
              """\
              attr1: int
              """
          )
          attr1_code = scope.subscopes[ProjectPath("code1", "A")].spans_code
          assert_str_equal(attr1_code, indent(attr1_expect, " " * 4))
      
          method1_expect = dedent(
              """\
              @staticmethod
              def method1():
                  return 1
              """
          )
          method1_code = (
              scope.subscopes[ProjectPath("code1", "A")]
              .subscopes[ProjectPath("code1", "A.method1")]
    +         .all_code
    -         .spans_code
          )
          assert_str_equal(method1_code, indent(method1_expect, " " * 4))
      
          inner_attr1_expect = dedent(
              """\
    +         class B:
    +             inner_attr1: int
    -         inner_attr1: int
              """
          )
    +     inner_class_code = (
    -     inner_attr1_code = (
              scope.subscopes[ProjectPath("code1", "A")]
              .subscopes[ProjectPath("code1", "A.B")]
    +         .all_code
    -         .spans_code
          )
    -     assert_str_equal(inner_attr1_code, indent(inner_attr1_expect, " " * 8))
 <0>  
       | 
	===========above chunk 0===========
    # module: tests.coeditor.test_code_change
    def test_change_scope():
    # offset: -1
        code1 = dedent(
            """\
            import os
            
            x = 1
            y = x + 1
            
            def f1():
                global x
                x *= 5
                return x
                
            if __name__ == "__main__":
                print(f1() + x)
                    
            @annotated
            def f2():
                return 1
                
            @dataclass
            class A:
                attr1: int
                
                @staticmethod
                def method1():
                    return 1
                    
                class B:
                    inner_attr1: int
            """
        )
        mod_tree = code_to_module(code1)
        scope = ChangeScope.from_tree(ProjectPath("code1", ""), mod_tree)
        global_spans = [
            dedent(
                """\
                x = 1
                y = x + 1
                """
            ),
            dedent(
                """\
                if __name__ == "__main__":
                    print(f1() + x)
                """
            ),
        ]
        for i, code in enumerate(global_spans):
            assert_str_equal(scope.spans[i].code, code)
    
        f1_expect = dedent(
            """\
  -         def f1():
  +         global x
  -             global x
  +         x *= 5
  -             x *= 5
  +         return x
  -             return x
            """
        )
        f1_code = scope.subscopes[ProjectPath("code1", "f1")].spans_code
  +     assert_str_equal(f1_code, indent(f1_expect, " " * 4))
  -     assert_str_equal(f1_code, f1</s>
===========unchanged ref 0===========
    at: textwrap
        indent(text: str, prefix: str, predicate: Optional[Callable[[str], bool]]=...) -> str
    
     | 
| 
	tests.coeditor.test_code_change/TestChangedSpan.test_comments_change | 
	Modified | 
	temp-1 | 
	88a80dd462a4273154c266f5ef7d055f0b4fbfaf | 
	Fix new scope headers. Fix unit tests. | 
	 <0>:<add>             (Modified, 1),
 | 
	      # module: tests.coeditor.test_code_change
      class TestChangedSpan:
          def test_comments_change(self):
              # have to update code as well for the changes to count
              code2 = dedent(
                  """\
                  import os
                  
                  x = 1
                  # belongs to f1
                  
                  def f1():
                      "added doc string"
                      global x
                      x *= 5
                      return x + 1
                  
                  # belongs to main    
                  if __name__ == "__main__":
                      print(f1() + x + 1)  # belongs to print
                  """
              )
              scope2 = ChangeScope.from_tree(ProjectPath("code1", ""), code_to_module(code2))
              self.check_changed_spans(
                  get_changed_spans(Modified(self.scope1, scope2)),
                  (Modified, -1),
    -             (Modified, 3),
 <0>              (Modified, 1),
              )
      
       | 
	===========unchanged ref 0===========
    at: tests.coeditor.test_code_change.TestChangedSpan
        code1 = dedent(
                """\
                import os
            
                x = 1
                y = x + 1
            
                def f1():
                    global x
                    x *= 5
                    return x
                
                if __name__ == "__main__":
                    print(f1() + x)
                """
            )
    
        scope1 = ChangeScope.from_tree(ProjectPath("code1", ""), code_to_module(code1))
    
        check_changed_spans(changed_spans: Sequence[ChangedSpan], *expects: tuple[type, int])
    
    
===========changed ref 0===========
    # module: tests.coeditor.test_code_change
    def test_change_scope():
        code1 = dedent(
            """\
            import os
            
            x = 1
            y = x + 1
            
            def f1():
                global x
                x *= 5
                return x
                
            if __name__ == "__main__":
                print(f1() + x)
                    
            @annotated
            def f2():
                return 1
                
            @dataclass
            class A:
                attr1: int
                
                @staticmethod
                def method1():
                    return 1
                    
                class B:
                    inner_attr1: int
            """
        )
        mod_tree = code_to_module(code1)
        scope = ChangeScope.from_tree(ProjectPath("code1", ""), mod_tree)
        global_spans = [
            dedent(
                """\
                x = 1
                y = x + 1
                """
            ),
            dedent(
                """\
                if __name__ == "__main__":
                    print(f1() + x)
                """
            ),
        ]
        for i, code in enumerate(global_spans):
            assert_str_equal(scope.spans[i].code, code)
    
        f1_expect = dedent(
            """\
  -         def f1():
  +         global x
  -             global x
  +         x *= 5
  -             x *= 5
  +         return x
  -             return x
            """
        )
        f1_code = scope.subscopes[ProjectPath("code1", "f1")].spans_code
  +     assert_str_equal(f1_code, indent(f1_expect, " " * 4))
  -     assert_str_equal(f1_code, f1_expect)
    
        f2_expect = dedent(
            """\
            @annotated
            def f2():
                return 1
            """
        )
  +     f2_code = scope.subscopes[ProjectPath("code1", "f2")].all_code
  -     f2_code = scope.</s>
===========changed ref 1===========
    # module: tests.coeditor.test_code_change
    def test_change_scope():
    # offset: 1
    <s> = scope.subscopes[ProjectPath("code1", "f2")].all_code
  -     f2_code = scope.subscopes[ProjectPath("code1", "f2")].spans_code
        assert_str_equal(f2_code, f2_expect)
    
        attr1_expect = dedent(
            """\
            attr1: int
            """
        )
        attr1_code = scope.subscopes[ProjectPath("code1", "A")].spans_code
        assert_str_equal(attr1_code, indent(attr1_expect, " " * 4))
    
        method1_expect = dedent(
            """\
            @staticmethod
            def method1():
                return 1
            """
        )
        method1_code = (
            scope.subscopes[ProjectPath("code1", "A")]
            .subscopes[ProjectPath("code1", "A.method1")]
  +         .all_code
  -         .spans_code
        )
        assert_str_equal(method1_code, indent(method1_expect, " " * 4))
    
        inner_attr1_expect = dedent(
            """\
  +         class B:
  +             inner_attr1: int
  -         inner_attr1: int
            """
        )
  +     inner_class_code = (
  -     inner_attr1_code = (
            scope.subscopes[ProjectPath("code1", "A")]
            .subscopes[ProjectPath("code1", "A.B")]
  +         .all_code
  -         .spans_code
        )
  +     assert_str_equal(inner_class_code, indent(inner_attr1_expect, " " * 4))
  -     assert_str_equal(inner_attr1_code, indent(inner_attr1_expect, " " * 8))
     | 
| 
	coeditor.code_change/ChangeScope.header_code | 
	Modified | 
	temp-1 | 
	88a80dd462a4273154c266f5ef7d055f0b4fbfaf | 
	Fix new scope headers. Fix unit tests. | 
	 <0>:<add>         return "".join(snippets)
 | 
	      # module: coeditor.code_change
      @dataclass
      class ChangeScope:
          @cached_property
          def header_code(self) -> str:
              if isinstance(self.tree, ptree.Module):
                  return f"# module: {self.path.module}"
              # get the first non-empty line of self.tree
    +         tree = self.tree
    +         to_visit = []
    +         parent = not_none(tree.parent)
    +         while parent.type in ("decorated", "async_funcdef"):
    +             to_visit.insert(0, parent.children[0])
    +             parent = not_none(parent.parent)
    +         to_visit.extend(tree.children)
    + 
    +         snippets = list[str]()
    -         first_line = "#" if isinstance(self.tree, ptree.Function) else ""
    +         for i, c in enumerate(to_visit):
    -         for i, c in enumerate(self.tree.children):
    +             if c.type == "suite":
    +                 break
                  snippet = cast(str, c.get_code())
                  if i == 0:
                      # remove leading newlines
                      snippet = snippet.lstrip("\n")
                  assert isinstance(snippet, str)
    -             if count_lines(snippet) == 1:
    -                 first_line += snippet
    -             else:
    -                 first_line += snippet.splitlines()[0]
    -                 break
    -         return first_line
    +             snippets.append(snippet)
 <0>  
       | 
	===========unchanged ref 0===========
    at: coeditor._utils
        not_none(x: Optional[T1]) -> T1
    
    at: coeditor.code_change.ChangeScope
        path: ProjectPath
    
        tree: ScopeTree
    
        spans: Sequence["StatementSpan"]
    
        subscopes: Mapping[ProjectPath, Self]
    
    at: functools
        cached_property(func: Callable[[Any], _T])
    
    at: parso.python.tree
        Module(children)
    
    at: parso.tree.BaseNode.__init__
        self.children = children
    
        self.parent: Optional[BaseNode] = None
    
    at: parso.tree.NodeOrLeaf
        __slots__ = ('parent',)
    
        type: str
    
        parent: 'Optional[BaseNode]'
    
    at: spot.static_analysis.ProjectPath
        module: ModuleName
    
        path: ElemPath
    
    
===========changed ref 0===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
  +     @cached_property
  +     def all_code(self) -> str:
  +         return f"{self.header_code}\n{self.spans_code}"
  + 
===========changed ref 1===========
    # module: coeditor.code_change
  + ScopeTree = ptree.Function | ptree.Class | ptree.Module
  - ScopeTree = ptree.Module | ptree.Function | ptree.Class
    PyNode = ptree.PythonBaseNode | ptree.PythonNode
    
    _tlogger = TimeLogger()
    
===========changed ref 2===========
    # module: tests.coeditor.test_code_change
    class TestChangedSpan:
        def test_comments_change(self):
            # have to update code as well for the changes to count
            code2 = dedent(
                """\
                import os
                
                x = 1
                # belongs to f1
                
                def f1():
                    "added doc string"
                    global x
                    x *= 5
                    return x + 1
                
                # belongs to main    
                if __name__ == "__main__":
                    print(f1() + x + 1)  # belongs to print
                """
            )
            scope2 = ChangeScope.from_tree(ProjectPath("code1", ""), code_to_module(code2))
            self.check_changed_spans(
                get_changed_spans(Modified(self.scope1, scope2)),
                (Modified, -1),
  +             (Modified, 1),
  -             (Modified, 3),
                (Modified, 1),
            )
    
===========changed ref 3===========
    # module: tests.coeditor.test_code_change
    def test_change_scope():
        code1 = dedent(
            """\
            import os
            
            x = 1
            y = x + 1
            
            def f1():
                global x
                x *= 5
                return x
                
            if __name__ == "__main__":
                print(f1() + x)
                    
            @annotated
            def f2():
                return 1
                
            @dataclass
            class A:
                attr1: int
                
                @staticmethod
                def method1():
                    return 1
                    
                class B:
                    inner_attr1: int
            """
        )
        mod_tree = code_to_module(code1)
        scope = ChangeScope.from_tree(ProjectPath("code1", ""), mod_tree)
        global_spans = [
            dedent(
                """\
                x = 1
                y = x + 1
                """
            ),
            dedent(
                """\
                if __name__ == "__main__":
                    print(f1() + x)
                """
            ),
        ]
        for i, code in enumerate(global_spans):
            assert_str_equal(scope.spans[i].code, code)
    
        f1_expect = dedent(
            """\
  -         def f1():
  +         global x
  -             global x
  +         x *= 5
  -             x *= 5
  +         return x
  -             return x
            """
        )
        f1_code = scope.subscopes[ProjectPath("code1", "f1")].spans_code
  +     assert_str_equal(f1_code, indent(f1_expect, " " * 4))
  -     assert_str_equal(f1_code, f1_expect)
    
        f2_expect = dedent(
            """\
            @annotated
            def f2():
                return 1
            """
        )
  +     f2_code = scope.subscopes[ProjectPath("code1", "f2")].all_code
  -     f2_code = scope.</s>
===========changed ref 4===========
    # module: tests.coeditor.test_code_change
    def test_change_scope():
    # offset: 1
    <s> = scope.subscopes[ProjectPath("code1", "f2")].all_code
  -     f2_code = scope.subscopes[ProjectPath("code1", "f2")].spans_code
        assert_str_equal(f2_code, f2_expect)
    
        attr1_expect = dedent(
            """\
            attr1: int
            """
        )
        attr1_code = scope.subscopes[ProjectPath("code1", "A")].spans_code
        assert_str_equal(attr1_code, indent(attr1_expect, " " * 4))
    
        method1_expect = dedent(
            """\
            @staticmethod
            def method1():
                return 1
            """
        )
        method1_code = (
            scope.subscopes[ProjectPath("code1", "A")]
            .subscopes[ProjectPath("code1", "A.method1")]
  +         .all_code
  -         .spans_code
        )
        assert_str_equal(method1_code, indent(method1_expect, " " * 4))
    
        inner_attr1_expect = dedent(
            """\
  +         class B:
  +             inner_attr1: int
  -         inner_attr1: int
            """
        )
  +     inner_class_code = (
  -     inner_attr1_code = (
            scope.subscopes[ProjectPath("code1", "A")]
            .subscopes[ProjectPath("code1", "A.B")]
  +         .all_code
  -         .spans_code
        )
  +     assert_str_equal(inner_class_code, indent(inner_attr1_expect, " " * 4))
  -     assert_str_equal(inner_attr1_code, indent(inner_attr1_expect, " " * 8))
     | 
| 
	coeditor.code_change/ChangeScope.from_tree | 
	Modified | 
	temp-1 | 
	88a80dd462a4273154c266f5ef7d055f0b4fbfaf | 
	Fix new scope headers. Fix unit tests. | 
	 <0>:<add>             subscopes[spath] = subscope
 | 
	      # module: coeditor.code_change
      @dataclass
      class ChangeScope:
          @staticmethod
          def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
      <s>as_any(s)):
    -                     current_stmts.append(s)
    -                 else:
    -                     if current_stmts:
    -                         spans.append(StatementSpan(current_stmts))
    -                         current_stmts = []
    -             if current_stmts:
    -                 spans.append(StatementSpan(current_stmts))
      
    +         current_stmts = []
    +         content = (
    +             tree.children
    +             if isinstance(tree, ptree.Module)
    +             else cast(ptree.PythonNode, tree.get_suite()).children
    +         )
    +         for s in content:
    +             # we don't create inner scopes for function contents
    +             if is_func or _is_scope_statement(as_any(s)):
    +                 current_stmts.append(s)
    +             else:
    +                 if current_stmts:
    +                     spans.append(StatementSpan(current_stmts))
    +                     current_stmts = []
    +         if current_stmts:
    +             spans.append(StatementSpan(current_stmts))
    + 
    +         if is_func:
    +             # we don't create inner scopes for function contents
    +             return scope
    +         for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
    -             for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
    +             stree: ptree.Function | ptree.Class
    -                 stree: ptree.Function | ptree.Class
    +             spath = path.append(cast(ptree.Name, stree.name).value)
    -                 spath = path.append(cast(ptree.Name, stree.name).value)
    +             subscope = ChangeScope.from_tree(spath, stree)
    -                 subscope = ChangeScope.from_tree(spath, stree)
    -                 subscopes[spath] = subscope
 <0>          return scope
      
       | 
	===========above chunk 0===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @staticmethod
        def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
    # offset: -1
            spans = []
            subscopes = dict()
            scope = ChangeScope(path, tree, spans, subscopes)
  +         assert isinstance(tree, ScopeTree)
  +         is_func = isinstance(tree, ptree.Function)
  -         if isinstance(tree, ptree.Function):
  -             span = StatementSpan([_to_decorated(tree)])
  -             spans.append(span)
  -         else:
  -             assert isinstance(tree, (ptree.Module, ptree.Class))
  -             current_stmts = []
  -             content = (
  -                 tree.children
  -                 if isinstance(tree, ptree.Module)
  -                 else cast(ptree.PythonNode, tree.get_suite()).children
  -             )
  -             for s in content:
  -                 if _is_scope_statement(as_any(s)):
  -                     current_stmts.append(s)
  -                 else:
  -                     if current_stmts</s>
===========unchanged ref 0===========
    at: coeditor._utils
        as_any(x) -> Any
    
    at: coeditor.code_change
        ScopeTree = ptree.Function | ptree.Class | ptree.Module
    
        ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[ProjectPath, Self])
    
        _is_scope_statement(stmt: PyNode) -> bool
    
        StatementSpan(statements: Sequence[PyNode])
    
    at: coeditor.code_change.ChangeScope.header_code
        snippets = list[str]()
    
    at: parso.python.tree
        PythonNode()
    
        Module(children)
    
        Function(children)
    
    at: parso.python.tree.Scope
        __slots__ = ()
    
        get_suite()
    
    at: parso.tree.BaseNode.__init__
        self.children = children
    
    at: parso.tree.NodeOrLeaf
        type: str
    
        get_code(include_prefix=True)
    
    at: spot.static_analysis
        ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
    
    at: typing
        cast(typ: Type[_T], val: Any) -> _T
        cast(typ: str, val: Any) -> Any
        cast(typ: object, val: Any) -> Any
    
    
===========changed ref 0===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
  +     @cached_property
  +     def all_code(self) -> str:
  +         return f"{self.header_code}\n{self.spans_code}"
  + 
===========changed ref 1===========
    # module: coeditor.code_change
  + ScopeTree = ptree.Function | ptree.Class | ptree.Module
  - ScopeTree = ptree.Module | ptree.Function | ptree.Class
    PyNode = ptree.PythonBaseNode | ptree.PythonNode
    
    _tlogger = TimeLogger()
    
===========changed ref 2===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @cached_property
        def header_code(self) -> str:
            if isinstance(self.tree, ptree.Module):
                return f"# module: {self.path.module}"
            # get the first non-empty line of self.tree
  +         tree = self.tree
  +         to_visit = []
  +         parent = not_none(tree.parent)
  +         while parent.type in ("decorated", "async_funcdef"):
  +             to_visit.insert(0, parent.children[0])
  +             parent = not_none(parent.parent)
  +         to_visit.extend(tree.children)
  + 
  +         snippets = list[str]()
  -         first_line = "#" if isinstance(self.tree, ptree.Function) else ""
  +         for i, c in enumerate(to_visit):
  -         for i, c in enumerate(self.tree.children):
  +             if c.type == "suite":
  +                 break
                snippet = cast(str, c.get_code())
                if i == 0:
                    # remove leading newlines
                    snippet = snippet.lstrip("\n")
                assert isinstance(snippet, str)
  -             if count_lines(snippet) == 1:
  -                 first_line += snippet
  -             else:
  -                 first_line += snippet.splitlines()[0]
  -                 break
  -         return first_line
  +             snippets.append(snippet)
  +         return "".join(snippets)
    
===========changed ref 3===========
    # module: tests.coeditor.test_code_change
    class TestChangedSpan:
        def test_comments_change(self):
            # have to update code as well for the changes to count
            code2 = dedent(
                """\
                import os
                
                x = 1
                # belongs to f1
                
                def f1():
                    "added doc string"
                    global x
                    x *= 5
                    return x + 1
                
                # belongs to main    
                if __name__ == "__main__":
                    print(f1() + x + 1)  # belongs to print
                """
            )
            scope2 = ChangeScope.from_tree(ProjectPath("code1", ""), code_to_module(code2))
            self.check_changed_spans(
                get_changed_spans(Modified(self.scope1, scope2)),
                (Modified, -1),
  +             (Modified, 1),
  -             (Modified, 3),
                (Modified, 1),
            )
    
===========changed ref 4===========
    # module: tests.coeditor.test_code_change
    def test_change_scope():
        code1 = dedent(
            """\
            import os
            
            x = 1
            y = x + 1
            
            def f1():
                global x
                x *= 5
                return x
                
            if __name__ == "__main__":
                print(f1() + x)
                    
            @annotated
            def f2():
                return 1
                
            @dataclass
            class A:
                attr1: int
                
                @staticmethod
                def method1():
                    return 1
                    
                class B:
                    inner_attr1: int
            """
        )
        mod_tree = code_to_module(code1)
        scope = ChangeScope.from_tree(ProjectPath("code1", ""), mod_tree)
        global_spans = [
            dedent(
                """\
                x = 1
                y = x + 1
                """
            ),
            dedent(
                """\
                if __name__ == "__main__":
                    print(f1() + x)
                """
            ),
        ]
        for i, code in enumerate(global_spans):
            assert_str_equal(scope.spans[i].code, code)
    
        f1_expect = dedent(
            """\
  -         def f1():
  +         global x
  -             global x
  +         x *= 5
  -             x *= 5
  +         return x
  -             return x
            """
        )
        f1_code = scope.subscopes[ProjectPath("code1", "f1")].spans_code
  +     assert_str_equal(f1_code, indent(f1_expect, " " * 4))
  -     assert_str_equal(f1_code, f1_expect)
    
        f2_expect = dedent(
            """\
            @annotated
            def f2():
                return 1
            """
        )
  +     f2_code = scope.subscopes[ProjectPath("code1", "f2")].all_code
  -     f2_code = scope.</s> | 
| 
	coeditor.ctx_change_encoder/TkCtxCodeChangeEncoder._encode_parent_scopes | 
	Modified | 
	temp-1 | 
	88a80dd462a4273154c266f5ef7d055f0b4fbfaf | 
	Fix new scope headers. Fix unit tests. | 
	 <0>:<add>         return scope_tks
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class TkCtxCodeChangeEncoder:
          def _encode_parent_scopes(
              self, scope_changes: Sequence[Change[ChangeScope]], offset: int
          ) -> TokenSeq:
              scope_tks = join_list(
                  (self._encode_scope_change(c) for c in scope_changes), sep=Newline_id
              )
              if offset != 0:
                  ending = encode_basic(f"\n# offset: {offset}\n")
              else:
                  ending = [Newline_id]
    +         scope_tks = truncate_section(
    +             scope_tks + ending, TruncateAt.Left, self.max_scope_tks
    +         )
    -         return scope_tks + ending
 <0>  
       | 
	===========unchanged ref 0===========
    at: coeditor.common
        TokenSeq = list[Token]
    
        join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder
        VERSION = "0.0"
    
        max_ref_tks: int = 512
    
        max_query_tks: int = 512
    
        max_output_tks: int = 256
    
        max_scope_tks: int = 128
    
        max_lines_to_edit: int = 20
    
        ref_chunk_overlap: int = 32
    
        max_chunks_per_ref: int = 4
    
        max_lines_per_function: int = 500
    
        skip_unchanged_problems: bool = True
    
        _encode_scope_change(c: Change[ChangeScope]) -> TokenSeq
    
    at: coeditor.encoding
        Newline_id = get_tk_id("\n")
    
        truncate_section(sec: TokenSeq, direction: TruncateAt.Value, limit: int, add_bos: bool=True, inplace: bool=False) -> TokenSeq
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
        VERSION = "0.0"
        max_ref_tks: int = 512
        max_query_tks: int = 512
        max_output_tks: int = 256
  +     max_scope_tks: int = 128
  -     max_scope_tks: int = 50
        max_lines_to_edit: int = 20
        ref_chunk_overlap: int = 32
        max_chunks_per_ref: int = 4
        max_lines_per_function: int = 500
        skip_unchanged_problems: bool = True
    
===========changed ref 1===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
  +     @cached_property
  +     def all_code(self) -> str:
  +         return f"{self.header_code}\n{self.spans_code}"
  + 
===========changed ref 2===========
    # module: coeditor.code_change
  + ScopeTree = ptree.Function | ptree.Class | ptree.Module
  - ScopeTree = ptree.Module | ptree.Function | ptree.Class
    PyNode = ptree.PythonBaseNode | ptree.PythonNode
    
    _tlogger = TimeLogger()
    
===========changed ref 3===========
    # module: tests.coeditor.test_code_change
    class TestChangedSpan:
        def test_comments_change(self):
            # have to update code as well for the changes to count
            code2 = dedent(
                """\
                import os
                
                x = 1
                # belongs to f1
                
                def f1():
                    "added doc string"
                    global x
                    x *= 5
                    return x + 1
                
                # belongs to main    
                if __name__ == "__main__":
                    print(f1() + x + 1)  # belongs to print
                """
            )
            scope2 = ChangeScope.from_tree(ProjectPath("code1", ""), code_to_module(code2))
            self.check_changed_spans(
                get_changed_spans(Modified(self.scope1, scope2)),
                (Modified, -1),
  +             (Modified, 1),
  -             (Modified, 3),
                (Modified, 1),
            )
    
===========changed ref 4===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @cached_property
        def header_code(self) -> str:
            if isinstance(self.tree, ptree.Module):
                return f"# module: {self.path.module}"
            # get the first non-empty line of self.tree
  +         tree = self.tree
  +         to_visit = []
  +         parent = not_none(tree.parent)
  +         while parent.type in ("decorated", "async_funcdef"):
  +             to_visit.insert(0, parent.children[0])
  +             parent = not_none(parent.parent)
  +         to_visit.extend(tree.children)
  + 
  +         snippets = list[str]()
  -         first_line = "#" if isinstance(self.tree, ptree.Function) else ""
  +         for i, c in enumerate(to_visit):
  -         for i, c in enumerate(self.tree.children):
  +             if c.type == "suite":
  +                 break
                snippet = cast(str, c.get_code())
                if i == 0:
                    # remove leading newlines
                    snippet = snippet.lstrip("\n")
                assert isinstance(snippet, str)
  -             if count_lines(snippet) == 1:
  -                 first_line += snippet
  -             else:
  -                 first_line += snippet.splitlines()[0]
  -                 break
  -         return first_line
  +             snippets.append(snippet)
  +         return "".join(snippets)
    
===========changed ref 5===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @staticmethod
        def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
            spans = []
            subscopes = dict()
            scope = ChangeScope(path, tree, spans, subscopes)
  +         assert isinstance(tree, ScopeTree)
  +         is_func = isinstance(tree, ptree.Function)
  -         if isinstance(tree, ptree.Function):
  -             span = StatementSpan([_to_decorated(tree)])
  -             spans.append(span)
  -         else:
  -             assert isinstance(tree, (ptree.Module, ptree.Class))
  -             current_stmts = []
  -             content = (
  -                 tree.children
  -                 if isinstance(tree, ptree.Module)
  -                 else cast(ptree.PythonNode, tree.get_suite()).children
  -             )
  -             for s in content:
  -                 if _is_scope_statement(as_any(s)):
  -                     current_stmts.append(s)
  -                 else:
  -                     if current_stmts:
  -                         spans.append(StatementSpan(current_stmts))
  -                         current_stmts = []
  -             if current_stmts:
  -                 spans.append(StatementSpan(current_stmts))
    
  +         current_stmts = []
  +         content = (
  +             tree.children
  +             if isinstance(tree, ptree.Module)
  +             else cast(ptree.PythonNode, tree.get_suite()).children
  +         )
  +         for s in content:
  +             # we don't create inner scopes for function contents
  +             if is_func or _is_scope_statement(as_any(s)):
  +                 current_stmts.append(s)
  +             else:
  +                 if current_stmts:
  +                     spans.append(StatementSpan(current_stmts))
  +                     current_stmts = []
  +         if current_stmts:
  +             spans.append(StatementSpan(current_stmts))
  + 
  +         if is_func:
  +             # we don't create inner scopes for function contents
  +             return scope</s>
===========changed ref 6===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @staticmethod
        def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
    # offset: 1
    <s>stmts))
  + 
  +         if is_func:
  +             # we don't create inner scopes for function contents
  +             return scope
  +         for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
  -             for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
  +             stree: ptree.Function | ptree.Class
  -                 stree: ptree.Function | ptree.Class
  +             spath = path.append(cast(ptree.Name, stree.name).value)
  -                 spath = path.append(cast(ptree.Name, stree.name).value)
  +             subscope = ChangeScope.from_tree(spath, stree)
  -                 subscope = ChangeScope.from_tree(spath, stree)
  +             subscopes[spath] = subscope
  -                 subscopes[spath] = subscope
            return scope
     | 
| 
	coeditor.ctx_change_encoder/CtxCodeChangeProblemGenerator.encode_change | 
	Modified | 
	temp-1 | 
	02aa6e2654a126be45b9b14e746d5bf2ad329e65 | 
	Fix static analysis. | 
	 <0>:<add>                 result.append((ProjectPath(used.full_name, ""), str(used)))
 | 
	      # module: coeditor.ctx_change_encoder
      class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
          def encode_change(
              self,
              pchange: JProjectChange,
              mod2usages: Mapping[ModuleName, LineUsageAnalysis],
              module_order: Sequence[ModuleName],
          ) -> Iterable[CtxCodeChangeProblem]:
      <s> _get_relevant(span: ChangedSpan):
                  if isinstance(span.change, Added):
                      # nothing to analyze
                      return []
                  path = span.parent_scopes[-1].earlier().path
                  line_usages = mod2usages[path.module]
    +             all_used = set[PyDefinition]()
    -             all_used = set[PyFullName]()
                  l_start, l_end = span.line_range
                  for l in range(l_start, l_end + 1):
    +                 for pydef in line_usages.line2usages.get(l, set()):
    +                     if (
    +                         pydef.module == path.module
    +                         and l_start <= pydef.start_pos[0] <= l_end
    +                     ):
    +                         # skip self references
    +                         print(f"Skip: {pydef}")
    +                         continue
    +                     all_used.add(pydef)
    -                 all_used.update(line_usages.line2usages.get(l, tuple()))
      
                  result = list[tuple[ProjectPath, str]]()
                  for used in all_used:
    -                 result.append((ProjectPath("?", used), used))
 <0>              return result
      
              sorted_cspans = list[ChangedSpan]()
              for m in module_order:
                  if (mchange := pchange.changed.get(m)) is None:
                      continue
                  for span in mchange.changed.values():
                      if span.change.as_char() == Modified.as_char():
                          yield CtxCodeChangeProblem(
                              span,
                              relevant_changes=sorted_cspans.copy(),
                              relevant_unchanged=_get_relevant(span),
                          )
                      sorted_cspans.append(span)
      
       | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def encode_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: -1
            def _get_relevant(span: ChangedSpan):
                if isinstance(span.change, Added):
                    # nothing to analyze
                    return</s>
===========unchanged ref 0===========
    at: coeditor.common
        RelPath = NewType("RelPath", Path)
    
    at: coeditor.ctx_change_encoder
        CtxCodeChangeProblem(span: ChangedSpan, relevant_changes: list[ChangedSpan], relevant_unchanged: list[tuple[ProjectPath, str]])
    
        LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
    
        fast_goto(script: jedi.Script, tree_name: tree.Name, *, follow_imports=False, follow_builtin_imports=False, only_stubs=False, prefer_stubs=False) -> set[classes.Name]
    
    at: coeditor.ctx_change_encoder.CtxCodeChangeProblemGenerator.__init__
        self.analysis = analysis
    
    at: coeditor.ctx_change_encoder.CtxCodeChangeProblemGenerator.pre_edit_analysis
        jmod = modules[mod_path]
    
        script = jedi.Script(path=project.path / mod_path, project=project)
    
    at: coeditor.ctx_change_encoder.JediUsageAnalysis
        follow_imports: bool = True
    
        only_same_project_usages: bool = False
    
        get_module_usages(self, script: jedi.Script, proj_root: Path, silent: bool=False)
        get_module_usages(script: jedi.Script, proj_root: Path, silent: bool=False)
    
    at: jedi.api
        Script(code=None, *, path=None, environment=None, project=None)
    
    at: jedi.api.Script.__init__
        self._module_node, code = self._inference_state.parse_and_get_code(
                    code=code,
                    path=self.path,
                    use_latest_grammar=path and path.suffix == '.pyi',
                    cache=False,  # No disk cache, because the current script often changes.
                    diff_cache=settings.fast_parser,
                    cache_path=settings.cache_directory,
                )
    
    
===========unchanged ref 1===========
    at: jedi.api.project
        Project(path, *, environment_path=None, load_unsafe_extensions=False, sys_path=None, added_sys_path=(), smart_sys_path=True)
    
    at: jedi.api.project.Project
        _environment = None
    
    at: jedi.api.project.Project.__init__
        self._path = path
    
    at: pathlib.Path
        __slots__ = ()
    
        exists() -> bool
    
    at: spot.static_analysis
        ModuleName = str
    
        sort_modules_by_imports(imports: Mapping[ModuleName, set[ModuleName]]) -> list[ModuleName]
    
    at: typing
        Iterable = _alias(collections.abc.Iterable, 1)
    
        Mapping = _alias(collections.abc.Mapping, 2)
    
        Sequence = _alias(collections.abc.Sequence, 1)
    
    at: typing.Mapping
        items() -> AbstractSet[Tuple[_KT, _VT_co]]
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class LineUsageAnalysis:
  +     line2usages: Mapping[int, set[PyDefinition]]
  -     line2usages: dict[int, set[PyFullName]]
    
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
  + @dataclass(unsafe_hash=True)
  + class PyDefinition:
  +     full_name: PyFullName
  +     module: ModuleName
  +     file: Path
  +     start_pos: tuple[int, int]
  +     end_pos: tuple[int, int]
  + 
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
  + @dataclass(unsafe_hash=True)
  + class PyDefinition:
  +     @staticmethod
  +     def from_signatures(
  +         name: classes.BaseName, project: Path | None = None
  +     ) -> Iterable["PyDefinition"]:
  +         if name.in_builtin_module():
  +             return
  +         for sig in name.get_signatures():
  +             if (
  +                 not sig.in_builtin_module()
  +                 and (full_name := sig.full_name)
  +                 and (file := sig.module_path)
  +                 and (project in file.parents)
  +                 and (module := sig.module_name)
  +                 and (start_pos := sig.get_definition_start_position())
  +                 and (end_pos := sig.get_definition_end_position())
  +             ):
  +                 full_name = PyFullName(full_name)
  +                 yield PyDefinition(full_name, module, file, start_pos, end_pos)
  +  | 
| 
	coeditor.ctx_change_encoder/JediUsageAnalysis.get_module_usages | 
	Modified | 
	temp-1 | 
	02aa6e2654a126be45b9b14e746d5bf2ad329e65 | 
	Fix static analysis. | 
	 <0>:<add>                     errors[repr(e)] = errors.setdefault(str(e), 0) + 1
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class JediUsageAnalysis:
          def get_module_usages(
              self, script: jedi.Script, proj_root: Path, silent: bool = False
          ):
      <s> all_names.sort(key=lambda x: x.start_pos)
              errors = self.error_counts
    +         resolve_cache = dict[_ObjId, set[PyDefinition]]()
              for name in tqdm(all_names, f"Analyzing {script.path}", disable=silent):
                  name: tree.Name
                  line = name.start_pos[0]
                  usages = line2usages.setdefault(line, set())
                  try:
                      defs = fast_goto(
                          script,
                          name,
                          follow_imports=self.follow_imports,
                          follow_builtin_imports=False,
                      )
                      for d in defs:
    -                     if (
    -                         d.module_path
    -                         and d.full_name
    -                         and (
    -                             not self.only_same_project_usages
    -                             or (proj_root in d.module_path.parents)
    -                         )
    -                     ):
    -                         usages.add(PyFullName(d.full_name))
    +                     key = _ObjId(id(d))
    +                     if (defs := resolve_cache.get(key)) is None:
    +                         defs = set(PyDefinition.from_signatures(d, proj_root))
    +                         resolve_cache[key] = defs
    +                     usages.update(defs)
    + 
                  except (AttributeError, AssertionError) as e:
    +                 text = repr(e)
    -                 text = str(e)
                      errors[text] = errors.setdefault(text, 0) + 1
                  except ValueError as e:
                      # if the message is "not enough values to unpack"
                      if "not enough values to unpack (expected 2" in str(e):
    -                     errors[str(e)] = errors.setdefault(str(e), 0) + 1
 <0>                  else:
                          raise
              return LineUsageAnalysis(line2usages)
      
       | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class JediUsageAnalysis:
        def get_module_usages(
            self, script: jedi.Script, proj_root: Path, silent: bool = False
        ):
    # offset: -1
            jmod: tree.Module = script._module_node
  +         line2usages = dict[int, set[PyDefinition]]()
  -         line2usages = dict[int, set[PyFullName]]()
            all_names = [
                name for k, names in jmod.get_used_names()._dict.items() for name in names
            ]
            all_names.sort(key=lambda x: x.start_pos)
            errors = self.error_counts
  +         resolve</s>
===========unchanged ref 0===========
    at: coeditor._utils
        TimeLogger(times: dict[str, list[float]]=field(default_factory=dict))
    
    at: coeditor.ctx_change_encoder
        PyDefinition(full_name: PyFullName, module: ModuleName, file: Path, start_pos: tuple[int, int], end_pos: tuple[int, int])
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder
        VERSION = "0.0"
    
        max_ref_tks: int = 512
    
        max_query_tks: int = 512
    
        max_output_tks: int = 256
    
        max_scope_tks: int = 128
    
        max_lines_to_edit: int = 20
    
        ref_chunk_overlap: int = 32
    
        max_chunks_per_ref: int = 4
    
        max_lines_per_function: int = 500
    
        skip_unchanged_problems: bool = True
    
        _encode_parent_scopes(scope_changes: Sequence[Change[ChangeScope]], offset: int) -> TokenSeq
    
        _encode_change(change: Change[str]) -> TokenSeq
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder._group_encode_changed_refs
        all_chunks = list[TokenSeq]()
    
        file_tks = TokenSeq()
    
    at: coeditor.encoding
        Newline_id = get_tk_id("\n")
    
        break_into_chunks(tks: TokenSeq, header_f: Callable[[int], TokenSeq], chunk_size: int, overlap: int, right_to_left: bool=False, add_bos: bool=True, max_return_chunks: int | None=None) -> list[TokenSeq]
    
    
===========unchanged ref 1===========
    at: dataclasses
        dataclass(_cls: Type[_T]) -> Type[_T]
        dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
        dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
    
    at: jedi.api
        Script(code=None, *, path=None, environment=None, project=None)
    
    at: jedi.api.Script.__init__
        self._module_node, code = self._inference_state.parse_and_get_code(
                    code=code,
                    path=self.path,
                    use_latest_grammar=path and path.suffix == '.pyi',
                    cache=False,  # No disk cache, because the current script often changes.
                    diff_cache=settings.fast_parser,
                    cache_path=settings.cache_directory,
                )
    
    at: parso.python.tree
        Module(children)
    
    at: pathlib
        Path()
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
  + @dataclass(unsafe_hash=True)
  + class PyDefinition:
  +     full_name: PyFullName
  +     module: ModuleName
  +     file: Path
  +     start_pos: tuple[int, int]
  +     end_pos: tuple[int, int]
  + 
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class JediUsageAnalysis:
        follow_imports: bool = True
  +     only_same_project_usages: bool = False
  -     only_same_project_usages: bool = True
    
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class LineUsageAnalysis:
  +     line2usages: Mapping[int, set[PyDefinition]]
  -     line2usages: dict[int, set[PyFullName]]
    
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
  + @dataclass(unsafe_hash=True)
  + class PyDefinition:
  +     @staticmethod
  +     def from_signatures(
  +         name: classes.BaseName, project: Path | None = None
  +     ) -> Iterable["PyDefinition"]:
  +         if name.in_builtin_module():
  +             return
  +         for sig in name.get_signatures():
  +             if (
  +                 not sig.in_builtin_module()
  +                 and (full_name := sig.full_name)
  +                 and (file := sig.module_path)
  +                 and (project in file.parents)
  +                 and (module := sig.module_name)
  +                 and (start_pos := sig.get_definition_start_position())
  +                 and (end_pos := sig.get_definition_end_position())
  +             ):
  +                 full_name = PyFullName(full_name)
  +                 yield PyDefinition(full_name, module, file, start_pos, end_pos)
  + 
===========changed ref 4===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def encode_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
            def _get_relevant(span: ChangedSpan):
                if isinstance(span.change, Added):
                    # nothing to analyze
                    return []
                path = span.parent_scopes[-1].earlier().path
                line_usages = mod2usages[path.module]
  +             all_used = set[PyDefinition]()
  -             all_used = set[PyFullName]()
                l_start, l_end = span.line_range
                for l in range(l_start, l_end + 1):
  +                 for pydef in line_usages.line2usages.get(l, set()):
  +                     if (
  +                         pydef.module == path.module
  +                         and l_start <= pydef.start_pos[0] <= l_end
  +                     ):
  +                         # skip self references
  +                         print(f"Skip: {pydef}")
  +                         continue
  +                     all_used.add(pydef)
  -                 all_used.update(line_usages.line2usages.get(l, tuple()))
    
                result = list[tuple[ProjectPath, str]]()
                for used in all_used:
  +                 result.append((ProjectPath(used.full_name, ""), str(used)))
  -                 result.append((ProjectPath("?", used), used))
                return result
    
            sorted_cspans = list[ChangedSpan]()
            for m in module_order:
                if (mchange := pchange.changed.get(m)) is None:
                    continue
                for span in mchange.changed.values():
                    if span.change.as_char() == Modified.as_char():
                        yield CtxCodeChangeProblem(
                            span,
                            relevant_changes=sorted_cspans.copy(),
                            relevant_unchanged=</s>
===========changed ref 5===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def encode_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: 1
    <s> yield CtxCodeChangeProblem(
                            span,
                            relevant_changes=sorted_cspans.copy(),
                            relevant_unchanged=_get_relevant(span),
                        )
                    sorted_cspans.append(span)
     | 
| 
	coeditor.code_change/_edits_from_commit_history | 
	Modified | 
	temp-1 | 
	3fc7535603679d696adc71d9db9ffc11f0ba700e | 
	Analyzing only changed lines to speed up. | 
	 <0>:<add>         checkout_commit(commit_next.hash)
 | 
	      # module: coeditor.code_change
      def _edits_from_commit_history(
          project: Path,
          history: Sequence[CommitInfo],
          change_encoder: ProjectChangeProcessor[TEnc],
          ignore_dirs: set[str],
          silent: bool,
      ) -> Iterable[TEnc]:
      <s>_path = RelPath(path.relative_to(project))
                  match path_change:
                      case Added():
                          mod = parse_module(path)
                          new_path2module[rel_path] = mod
                          changed[mod.mname] = JModuleChange.from_modules(Added(mod))
                      case Deleted():
                          mod = new_path2module.pop(rel_path)
                          changed[mod.mname] = JModuleChange.from_modules(Deleted(mod))
                      case Modified(path1, path2):
                          assert path1 == path2
                          mod_old = new_path2module[rel_path]
                          new_path2module[rel_path] = mod_new = parse_module(path)
                          changed[mod_new.mname] = JModuleChange.from_modules(
                              Modified(mod_old, mod_new)
                          )
      
              with _tlogger.timed("post_edit_analysis"):
                  post_analysis = change_encoder.post_edit_analysis(
                      proj,
                      new_path2module,
    +                 changed,
    -                 path_changes,
                  )
    + 
    +         # now go backwards in time to perform pre-edit analysis
    +         checkout_commit(commit_now.hash)
    +         with _tlogger.timed("pre_edit_analysis"):
    +             pre_analysis = change_encoder.pre_edit_analysis(
    +                 proj,
    +                 path2module,
    +                 changed,
    +             )
 <0>  
              pchange = JProjectChange(changed, commit_next)
      
              with _tlogger.timed("encode_change"):
                  encs = change_encoder.encode_change(pchange, pre_analysis, post_analysis)
                  yield from encs
              commit_now = commit_next
              path2module = new_path2module
      
       | 
	===========above chunk 0===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_encoder: ProjectChangeProcessor[TEnc],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Iterable[TEnc]:
    # offset: -1
    <s>) == 2:
                    tag, path = segs
                    if not is_src(path):
                        continue
                    if tag.endswith("A"):
                        path_changes.append(Added(path))
                    elif tag.endswith("D"):
                        path_changes.append(Deleted(path))
                    if tag.endswith("M"):
                        path_changes.append(Modified(path, path))
                elif len(segs) == 3:
                    tag, path1, path2 = segs
                    assert tag.startswith("R")
                    if not is_src(path1) or not is_src(path2):
                        continue
                    path_changes.append(Deleted(path1))
                    path_changes.append(Added(path2))
    
  -         with _tlogger.timed("pre_edit_analysis"):
  -             pre_analysis = change_encoder.pre_edit_analysis(
  -                 proj,
  -                 path2module,
  -                 path_changes,
  -             )
  +         checkout_commit(commit_next.hash)
    
  -         # check out commit_next
  -         with _tlogger.timed("checkout"):
  -             subprocess.run(
  -                 ["git", "checkout", commit_next.hash],
  -                 cwd=project,
  -                 capture_output=True,
  -                 check=True,
  -             )
            proj = jedi.Project(path=project, added_sys_path=[project / "src"])
    
            new_path2module = path2module.copy()
            changed = dict[ModuleName, JModuleChange]()
            for path_change in path_changes:
                path = project / path_change.earlier()
                rel_path = RelPath(path.relative_to(project))
                match path_change:
                    case Added():
                        mod =</s>
===========above chunk 1===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_encoder: ProjectChangeProcessor[TEnc],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Iterable[TEnc]:
    # offset: -2
    <s>logger.timed("checkout"):
  -         subprocess.run(
  -             ["git", "checkout", "-f", commit_now.hash],
  -             cwd=project,
  -             capture_output=True,
  -             check=True,
  -         )
  +     checkout_commit(commit_now.hash, force=True)
        proj = jedi.Project(path=project, added_sys_path=[project / "src"])
    
        # now we can get the first project state, although this not needed for now
        # but we'll use it later for pre-edit analysis
        path2module = {
            RelPath(f): parse_module(project / f) for f in get_python_files(project)
        }
    
        def is_src(path_s: str) -> bool:
            path = Path(path_s)
            return path.suffix == ".py" and all(p not in ignore_dirs for p in path.parts)
    
        future_commits = list(reversed(history[:-1]))
        for commit_next in tqdm(
            future_commits, smoothing=0, desc="processing commits", disable=silent
        ):
            # get changed files
            changed_files = run_command(
                ["git", "diff", commit_now.hash, commit_next.hash, "--name-status"],
                cwd=project,
            ).splitlines()
    
            path_changes = list[Change[str]]()
    
            for line in changed_files:
                segs = line.split("\t")
                if len(segs) == 2:
                    tag, path = segs
                    if not is_src(path):
                        continue
                    if tag.ends</s>
===========above chunk 2===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_encoder: ProjectChangeProcessor[TEnc],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Iterable[TEnc]:
    # offset: -3
    <s>)
                mcontext = s._get_module_context()
                assert isinstance(mcontext, ModuleContext)
                mname = cast(str, mcontext.py__name__())
                if mname.startswith("src."):
                    e = ValueError(f"Bad module name: {mname}")
                    files = list(project.iterdir())
                    print_err(f"project: {proj}", file=sys.stderr)
                    print_err(f"files in root: {files}", file=sys.stderr)
                    raise e
                m = copy.deepcopy(s._module_node)  # needed due to reusing
                assert isinstance(m, ptree.Module)
                # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
                # m = parso.parse(path.read_text())
                return JModule(mname, m)
    
  +     def checkout_commit(commit_hash: str, force: bool = False):
  +         with _tlogger.timed("checkout"):
  +             subprocess.run(
  +                 ["git", "checkout", "-f", commit_hash],
  +                 cwd=project,
  +                 capture_output=True,
  +                 check=True,
  +             )
  -     # turn this off so we don't have to deep copy the Modules
  -     # jedi.settings.fast_parser = False
  -     # Update: Have to use deep copy for now due to a bug in jedi: https://github.com/davidhalter/jedi/issues/1888
    
        # checkout to the first commit
        commit_now = history[-1]
  -     with</s>
===========above chunk 3===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_encoder: ProjectChangeProcessor[TEnc],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Iterable[TEnc]:
    # offset: -4
        def parse_module(path: Path):
            with _tlogger.timed("parse_module"):
                s = jedi.Script(path=path, project</s> | 
| 
	coeditor.ctx_change_encoder/PyDefinition.from_signatures | 
	Modified | 
	temp-1 | 
	3fc7535603679d696adc71d9db9ffc11f0ba700e | 
	Analyzing only changed lines to speed up. | 
	 <0>:<add>             yield PyDefinition(full_name, import_module, start_pos, end_pos)
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass(unsafe_hash=True)
      class PyDefinition:
          @staticmethod
    -     def from_signatures(
    -         name: classes.BaseName, project: Path | None = None
    -     ) -> Iterable["PyDefinition"]:
    +     def from_signatures(name: classes.BaseName) -> Iterable["PyDefinition"]:
    +         cast(classes.Name, name).is_definition()
    +         if (
    +             not name.in_builtin_module()
    -         if name.in_builtin_module():
    -             return
    -         for sig in name.get_signatures():
    -             if (
    -                 not sig.in_builtin_module()
    +             and (full_name := name.full_name)
    -                 and (full_name := sig.full_name)
    -                 and (file := sig.module_path)
    -                 and (project in file.parents)
    +             and (import_module := name.module_name)
    -                 and (module := sig.module_name)
    +             and (start_pos := name.get_definition_start_position())
    -                 and (start_pos := sig.get_definition_start_position())
    +             and (end_pos := name.get_definition_end_position())
    -                 and (end_pos := sig.get_definition_end_position())
    +         ):
    -             ):
    +             full_name = PyFullName(full_name)
    -                 full_name = PyFullName(full_name)
    -                 yield PyDefinition(full_name, module, file, start_pos, end_pos)
 <0>  
       | 
	===========unchanged ref 0===========
    at: coeditor.ctx_change_encoder
        PyFullName = NewType("PyPathStr", str)
    
        PyDefinition(full_name: PyFullName, import_module: ModuleName, start_pos: tuple[int, int], end_pos: tuple[int, int])
    
    at: coeditor.ctx_change_encoder.PyDefinition
        full_name: PyFullName
    
        import_module: ModuleName
    
    at: dataclasses
        dataclass(_cls: Type[_T]) -> Type[_T]
        dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
        dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
    
    at: jedi.api.classes
        BaseName(inference_state, name)
    
        Name(inference_state, definition)
    
    at: jedi.api.classes.BaseName
        _mapping = {
                'posixpath': 'os.path',
                'riscospath': 'os.path',
                'ntpath': 'os.path',
                'os2emxpath': 'os.path',
                'macpath': 'os.path',
                'genericpath': 'os.path',
                'posix': 'os',
                '_io': 'io',
                '_functools': 'functools',
                '_collections': 'collections',
                '_socket': 'socket',
                '_sqlite3': 'sqlite3',
            }
    
        _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
                'argparse._ActionsContainer': 'argparse.ArgumentParser',
            }.items())
    
        in_builtin_module()
    
        get_definition_start_position()
    
        get_definition_end_position()
    
    at: jedi.api.classes.Name
        is_definition()
    
    
===========unchanged ref 1===========
    at: typing
        cast(typ: Type[_T], val: Any) -> _T
        cast(typ: str, val: Any) -> Any
        cast(typ: object, val: Any) -> Any
    
        Iterable = _alias(collections.abc.Iterable, 1)
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass(unsafe_hash=True)
    class PyDefinition:
  +     """Note that the module and positions can be referring to either the import
  +     statement or the actual definition."""
  + 
        full_name: PyFullName
  +     import_module: ModuleName
  -     module: ModuleName
  -     file: Path
        start_pos: tuple[int, int]
        end_pos: tuple[int, int]
    
===========changed ref 1===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_encoder: ProjectChangeProcessor[TEnc],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Iterable[TEnc]:
        def parse_module(path: Path):
            with _tlogger.timed("parse_module"):
                s = jedi.Script(path=path, project=proj)
                mcontext = s._get_module_context()
                assert isinstance(mcontext, ModuleContext)
                mname = cast(str, mcontext.py__name__())
                if mname.startswith("src."):
                    e = ValueError(f"Bad module name: {mname}")
                    files = list(project.iterdir())
                    print_err(f"project: {proj}", file=sys.stderr)
                    print_err(f"files in root: {files}", file=sys.stderr)
                    raise e
                m = copy.deepcopy(s._module_node)  # needed due to reusing
                assert isinstance(m, ptree.Module)
                # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
                # m = parso.parse(path.read_text())
                return JModule(mname, m)
    
  +     def checkout_commit(commit_hash: str, force: bool = False):
  +         with _tlogger.timed("checkout"):
  +             subprocess.run(
  +                 ["git", "checkout", "-f", commit_hash],
  +                 cwd=project,
  +                 capture_output=True,
  +                 check=True,
  +             )
  -     # turn this off so we don't have to deep copy the Modules
  -     # jedi.settings.fast_parser = False
  -     # Update: Have to use deep copy for now due to a bug in jedi: https://github.com/davidhalter/jedi/issues/1888
    
        # checkout to the first commit
        commit_now = history[-1]
  - </s>
===========changed ref 2===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_encoder: ProjectChangeProcessor[TEnc],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Iterable[TEnc]:
    # offset: 1
    <s>/jedi/issues/1888
    
        # checkout to the first commit
        commit_now = history[-1]
  -     with _tlogger.timed("checkout"):
  -         subprocess.run(
  -             ["git", "checkout", "-f", commit_now.hash],
  -             cwd=project,
  -             capture_output=True,
  -             check=True,
  -         )
  +     checkout_commit(commit_now.hash, force=True)
        proj = jedi.Project(path=project, added_sys_path=[project / "src"])
    
        # now we can get the first project state, although this not needed for now
        # but we'll use it later for pre-edit analysis
        path2module = {
            RelPath(f): parse_module(project / f) for f in get_python_files(project)
        }
    
        def is_src(path_s: str) -> bool:
            path = Path(path_s)
            return path.suffix == ".py" and all(p not in ignore_dirs for p in path.parts)
    
        future_commits = list(reversed(history[:-1]))
        for commit_next in tqdm(
            future_commits, smoothing=0, desc="processing commits", disable=silent
        ):
            # get changed files
            changed_files = run_command(
                ["git", "diff", commit_now.hash, commit_next.hash, "--name-status"],
                cwd=project,
            ).splitlines()
    
            path_changes = list[Change[str]]()
    
            for line in changed_files:
                segs = line.split("\t")
                if len</s> | 
| 
	coeditor.ctx_change_encoder/CtxCodeChangeProblemGenerator.pre_edit_analysis | 
	Modified | 
	temp-1 | 
	3fc7535603679d696adc71d9db9ffc11f0ba700e | 
	Analyzing only changed lines to speed up. | 
	 <0>:<add>             result[mname] = line_usages
 | 
	      # module: coeditor.ctx_change_encoder
      class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
          def pre_edit_analysis(
              self,
              project: jedi.Project,
              modules: Mapping[RelPath, JModule],
    +         changes: Mapping[ModuleName, JModuleChange],
    -         file_changes: Sequence[Change[str]],
          ) -> Mapping[ModuleName, LineUsageAnalysis]:
      <s>path)
              result = dict[ModuleName, LineUsageAnalysis]()
    + 
    +         src_map = {m.mname: f for f, m in modules.items()}
    +         for mname, mchange in changes.items():
    -         for change in file_changes:
    +             if not isinstance(mchange.module_change, Modified):
    -             if not isinstance(change, Modified):
                      continue
    + 
    +             lines_to_analyze = set[int]()
    +             for span in mchange.changed.values():
    +                 if span.change is Added:
    +                     continue
    +                 start, end = span.line_range
    +                 lines_to_analyze.update(range(start, end + 1))
    + 
    +             mod_path = src_map[mname]
    +             assert (
    +                 src_file := project.path / mod_path
    +             ).exists(), f"src file missing: {src_file}"
    -             mod_path = RelPath(Path(change.before))
    -             jmod = modules[mod_path]
    -             assert (project.path / mod_path).exists()
    +             script = jedi.Script(path=src_file, project=project)
    -             script = jedi.Script(path=project.path / mod_path, project=project)
    +             line_usages = self.analysis.get_line_usages(
    -             line_usages = self.analysis.get_module_usages(
    +                 script, project.path, lines_to_analyze, silent=True
    -                 script, project.path, silent=True
                  )
    -             result[jmod.mname] = line_usages
 <0>          return result
      
       | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def pre_edit_analysis(
            self,
            project: jedi.Project,
            modules: Mapping[RelPath, JModule],
  +         changes: Mapping[ModuleName, JModuleChange],
  -         file_changes: Sequence[Change[str]],
        ) -> Mapping[ModuleName, LineUsageAnalysis]:
    # offset: -1
            "Return the definition usages of each line."
            # proot = Path(project._path)
            result = dict[ModuleName, LineUsageAnalysis]()
  + 
  +         src_map = {m.mname:</s>
===========unchanged ref 0===========
    at: coeditor.common
        RelPath = NewType("RelPath", Path)
    
    at: coeditor.ctx_change_encoder
        LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
    
    at: spot.static_analysis
        ModuleName = str
    
    at: typing
        Mapping = _alias(collections.abc.Mapping, 2)
    
    at: typing.Mapping
        items() -> AbstractSet[Tuple[_KT, _VT_co]]
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass(unsafe_hash=True)
    class PyDefinition:
  +     """Note that the module and positions can be referring to either the import
  +     statement or the actual definition."""
  + 
        full_name: PyFullName
  +     import_module: ModuleName
  -     module: ModuleName
  -     file: Path
        start_pos: tuple[int, int]
        end_pos: tuple[int, int]
    
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    @dataclass(unsafe_hash=True)
    class PyDefinition:
        @staticmethod
  -     def from_signatures(
  -         name: classes.BaseName, project: Path | None = None
  -     ) -> Iterable["PyDefinition"]:
  +     def from_signatures(name: classes.BaseName) -> Iterable["PyDefinition"]:
  +         cast(classes.Name, name).is_definition()
  +         if (
  +             not name.in_builtin_module()
  -         if name.in_builtin_module():
  -             return
  -         for sig in name.get_signatures():
  -             if (
  -                 not sig.in_builtin_module()
  +             and (full_name := name.full_name)
  -                 and (full_name := sig.full_name)
  -                 and (file := sig.module_path)
  -                 and (project in file.parents)
  +             and (import_module := name.module_name)
  -                 and (module := sig.module_name)
  +             and (start_pos := name.get_definition_start_position())
  -                 and (start_pos := sig.get_definition_start_position())
  +             and (end_pos := name.get_definition_end_position())
  -                 and (end_pos := sig.get_definition_end_position())
  +         ):
  -             ):
  +             full_name = PyFullName(full_name)
  -                 full_name = PyFullName(full_name)
  +             yield PyDefinition(full_name, import_module, start_pos, end_pos)
  -                 yield PyDefinition(full_name, module, file, start_pos, end_pos)
    
===========changed ref 2===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_encoder: ProjectChangeProcessor[TEnc],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Iterable[TEnc]:
        def parse_module(path: Path):
            with _tlogger.timed("parse_module"):
                s = jedi.Script(path=path, project=proj)
                mcontext = s._get_module_context()
                assert isinstance(mcontext, ModuleContext)
                mname = cast(str, mcontext.py__name__())
                if mname.startswith("src."):
                    e = ValueError(f"Bad module name: {mname}")
                    files = list(project.iterdir())
                    print_err(f"project: {proj}", file=sys.stderr)
                    print_err(f"files in root: {files}", file=sys.stderr)
                    raise e
                m = copy.deepcopy(s._module_node)  # needed due to reusing
                assert isinstance(m, ptree.Module)
                # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
                # m = parso.parse(path.read_text())
                return JModule(mname, m)
    
  +     def checkout_commit(commit_hash: str, force: bool = False):
  +         with _tlogger.timed("checkout"):
  +             subprocess.run(
  +                 ["git", "checkout", "-f", commit_hash],
  +                 cwd=project,
  +                 capture_output=True,
  +                 check=True,
  +             )
  -     # turn this off so we don't have to deep copy the Modules
  -     # jedi.settings.fast_parser = False
  -     # Update: Have to use deep copy for now due to a bug in jedi: https://github.com/davidhalter/jedi/issues/1888
    
        # checkout to the first commit
        commit_now = history[-1]
  - </s>
===========changed ref 3===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_encoder: ProjectChangeProcessor[TEnc],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Iterable[TEnc]:
    # offset: 1
    <s>/jedi/issues/1888
    
        # checkout to the first commit
        commit_now = history[-1]
  -     with _tlogger.timed("checkout"):
  -         subprocess.run(
  -             ["git", "checkout", "-f", commit_now.hash],
  -             cwd=project,
  -             capture_output=True,
  -             check=True,
  -         )
  +     checkout_commit(commit_now.hash, force=True)
        proj = jedi.Project(path=project, added_sys_path=[project / "src"])
    
        # now we can get the first project state, although this not needed for now
        # but we'll use it later for pre-edit analysis
        path2module = {
            RelPath(f): parse_module(project / f) for f in get_python_files(project)
        }
    
        def is_src(path_s: str) -> bool:
            path = Path(path_s)
            return path.suffix == ".py" and all(p not in ignore_dirs for p in path.parts)
    
        future_commits = list(reversed(history[:-1]))
        for commit_next in tqdm(
            future_commits, smoothing=0, desc="processing commits", disable=silent
        ):
            # get changed files
            changed_files = run_command(
                ["git", "diff", commit_now.hash, commit_next.hash, "--name-status"],
                cwd=project,
            ).splitlines()
    
            path_changes = list[Change[str]]()
    
            for line in changed_files:
                segs = line.split("\t")
                if len</s> | 
| 
	coeditor.code_change/ChangeScope.all_code | 
	Modified | 
	temp-1 | 
	b2f78bf15287dca9f1312cdd7720e6e9175fdeef | 
	Fix line ranges. Improve analysis and add tests. | 
	 <0>:<add>         return self.header_code + self.spans_code
 | 
	      # module: coeditor.code_change
      @dataclass
      class ChangeScope:
          @cached_property
          def all_code(self) -> str:
    -         return f"{self.header_code}\n{self.spans_code}"
 <0>  
       | 
	===========changed ref 0===========
    # module: coeditor.code_change
  + def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  +     if not can_be_empty and start >= end:
  +         raise ValueError(f"Bad line range: {start=}, {end=}")
  +     return _LineRange((start, end))
  + 
===========changed ref 1===========
    # module: coeditor.code_change
    ScopeTree = ptree.Function | ptree.Class | ptree.Module
    PyNode = ptree.PythonBaseNode | ptree.PythonNode
  + _LineRange = NewType("LineRange", tuple[int, int])
    
    _tlogger = TimeLogger()
    
===========changed ref 2===========
    # module: coeditor.code_change
  + def _strip_empty_lines(s: str):
  +     s1 = s.lstrip("\n")
  +     s2 = s1.rstrip("\n")
  +     e_lines_left = len(s) - len(s1)
  +     e_lines_right = len(s1) - len(s2)
  +     return s2, e_lines_left, e_lines_right
  + 
===========changed ref 3===========
  + # module: tests.coeditor.testcases.usages
  + 
  + 
===========changed ref 4===========
  + # module: tests.coeditor.testcases.defs
  + 
  + 
===========changed ref 5===========
  + # module: tests.coeditor.testcases.usages
  + def get_named_changes(*args):
  +     raise NotImplementedError
  + 
===========changed ref 6===========
  + # module: tests.coeditor.testcases.usages
  + @dataclass
  + class JModule:
  +     def iter_imports(self, tree):
  +         raise NotImplementedError
  + 
===========changed ref 7===========
  + # module: tests.coeditor.testcases.usages
  + get_modified_spans = as_any(None)
  + 
===========changed ref 8===========
  + # module: tests.coeditor.testcases.defs
  + @dataclass
  + class ChangeScope:
  +     @cached_property
  +     def spans_code(self) -> str:
  +         return "\n".join(s.code for s in self.spans)
  + 
===========changed ref 9===========
  + # module: tests.coeditor.testcases.usages
  + @dataclass
  + class JModule:
  +     def _to_scope(self) -> ChangeScope:
  +         return ChangeScope.from_tree(ProjectPath(self.mname, ""), self.tree)
  + 
===========changed ref 10===========
  + # module: tests.coeditor.testcases.usages
  + @dataclass
  + class JModule:
  +     "A light wrapper around a jedi module."
  +     mname: ModuleName
  +     tree: ptree.Module
  + 
===========changed ref 11===========
  + # module: tests.coeditor.testcases.defs
  + ScopeTree = ptree.Function | ptree.Class | ptree.Module
  + ChangedSpan = NewType("ChangedSpan", str)
  + 
===========changed ref 12===========
  + # module: tests.coeditor.testcases.defs
  + @dataclass
  + class ChangeScope:
  +     """
  +     A change scope is a python module, non-hidden function, or a non-hidden class, or a python module.
  +         - functions and classes that are inside a parent function are considered hidden.
  +     """
  + 
  +     path: ProjectPath
  +     tree: ScopeTree
  +     spans: Sequence
  +     subscopes: Mapping[ProjectPath, Self]
  + 
===========changed ref 13===========
  + # module: tests.coeditor.testcases.defs
  + @dataclass
  + class ChangeScope:
  +     @staticmethod
  +     def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
  +         spans = []
  +         subscopes = dict()
  +         scope = ChangeScope(path, tree, spans, subscopes)
  +         assert isinstance(tree, ScopeTree)
  +         is_func = isinstance(tree, ptree.Function)
  + 
  +         current_stmts = []
  +         content = (
  +             tree.children
  +             if isinstance(tree, ptree.Module)
  +             else cast(ptree.PythonNode, tree.get_suite()).children
  +         )
  +         raise NotImplementedError
  + 
===========changed ref 14===========
  + # module: tests.coeditor.testcases.usages
  + @dataclass
  + class JModule:
  +     @cached_property
  +     def imported_names(self):
  +         names = set[ptree.Name]()
  +         for stmt in self.iter_imports(self.tree):
  +             if isinstance(stmt, ptree.ImportFrom):
  +                 for n in stmt.get_from_names():
  +                     assert isinstance(n, ptree.Name)
  +                     names.add(n)
  +             elif isinstance(stmt, ptree.ImportName):
  +                 for n in stmt.get_defined_names():
  +                     assert isinstance(n, ptree.Name)
  +                     names.add(n)
  +         return names
  + 
===========changed ref 15===========
  + # module: tests.coeditor.testcases.usages
  + def recurse(scope_change: Change[ChangeScope], parent_changes) -> Iterable[ChangedSpan]:
  +     parent_changes = (*parent_changes, scope_change)
  +     match scope_change:
  +         case Modified(old_scope, new_scope):
  +             # compute statement differences
  +             yield from get_modified_spans(old_scope, new_scope, parent_changes)
  +             for sub_change in get_named_changes(
  +                 old_scope.subscopes, new_scope.subscopes
  +             ).values():
  +                 yield from recurse(sub_change, parent_changes)
  +         case Added(scope) | Deleted(scope):
  +             for span in scope.spans:
  +                 code_change = scope_change.new_value(span.code)
  +                 yield ChangedSpan(
  +                     code_change,
  +                     parent_changes,
  +                     span.line_range,
  +                 )
  +             for s in scope.subscopes.values():
  +                 s_change = scope_change.new_value(s)
  +                 yield from recurse(s_change, parent_changes)
  +  | 
| 
	coeditor.code_change/_edits_from_commit_history | 
	Modified | 
	temp-1 | 
	b2f78bf15287dca9f1312cdd7720e6e9175fdeef | 
	Fix line ranges. Improve analysis and add tests. | 
	 <0>:<add>         pchange = JProjectChange(changed, modules_mod, commit_next)
 | 
	      # module: coeditor.code_change
      def _edits_from_commit_history(
          project: Path,
          history: Sequence[CommitInfo],
          change_encoder: ProjectChangeProcessor[TEnc],
          ignore_dirs: set[str],
          silent: bool,
      ) -> Iterable[TEnc]:
      <s>change:
                      case Added():
                          mod = parse_module(path)
                          new_path2module[rel_path] = mod
                          changed[mod.mname] = JModuleChange.from_modules(Added(mod))
                      case Deleted():
                          mod = new_path2module.pop(rel_path)
                          changed[mod.mname] = JModuleChange.from_modules(Deleted(mod))
                      case Modified(path1, path2):
                          assert path1 == path2
                          mod_old = new_path2module[rel_path]
                          new_path2module[rel_path] = mod_new = parse_module(path)
                          changed[mod_new.mname] = JModuleChange.from_modules(
                              Modified(mod_old, mod_new)
                          )
      
              with _tlogger.timed("post_edit_analysis"):
                  post_analysis = change_encoder.post_edit_analysis(
                      proj,
                      new_path2module,
                      changed,
                  )
      
              # now go backwards in time to perform pre-edit analysis
              checkout_commit(commit_now.hash)
              with _tlogger.timed("pre_edit_analysis"):
                  pre_analysis = change_encoder.pre_edit_analysis(
                      proj,
                      path2module,
                      changed,
                  )
              checkout_commit(commit_next.hash)
      
    +         modules_mod = Modified(path2module.values(), new_path2module.values())
    -         pchange = JProjectChange(changed, commit_next)
 <0>  
              with _tlogger.timed("encode_change"):
                  encs = change_encoder.encode_change(pchange, pre_analysis, post_analysis)
                  yield from encs
              commit_now = commit_next
              path2module = new_path2module
      
       | 
	===========above chunk 0===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_encoder: ProjectChangeProcessor[TEnc],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Iterable[TEnc]:
    # offset: -1
    <s>0, desc="processing commits", disable=silent
        ):
            # get changed files
            changed_files = run_command(
                ["git", "diff", commit_now.hash, commit_next.hash, "--name-status"],
                cwd=project,
            ).splitlines()
    
            path_changes = list[Change[str]]()
    
            for line in changed_files:
                segs = line.split("\t")
                if len(segs) == 2:
                    tag, path = segs
                    if not is_src(path):
                        continue
                    if tag.endswith("A"):
                        path_changes.append(Added(path))
                    elif tag.endswith("D"):
                        path_changes.append(Deleted(path))
                    if tag.endswith("M"):
                        path_changes.append(Modified(path, path))
                elif len(segs) == 3:
                    tag, path1, path2 = segs
                    assert tag.startswith("R")
                    if not is_src(path1) or not is_src(path2):
                        continue
                    path_changes.append(Deleted(path1))
                    path_changes.append(Added(path2))
    
            checkout_commit(commit_next.hash)
    
            proj = jedi.Project(path=project, added_sys_path=[project / "src"])
    
            new_path2module = path2module.copy()
            changed = dict[ModuleName, JModuleChange]()
            for path_change in path_changes:
                path = project / path_change.earlier()
                rel_path = RelPath(path.relative_to(project))
                match path_change:
                    case Added():
                        mod = parse_module(path)
                        new_path2module[rel_path] =</s>
===========above chunk 1===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_encoder: ProjectChangeProcessor[TEnc],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Iterable[TEnc]:
    # offset: -2
    <s> # m = parso.parse(path.read_text())
                return JModule(mname, m)
    
        def checkout_commit(commit_hash: str, force: bool = False):
            with _tlogger.timed("checkout"):
                subprocess.run(
                    ["git", "checkout", "-f", commit_hash],
                    cwd=project,
                    capture_output=True,
                    check=True,
                )
    
        # checkout to the first commit
        commit_now = history[-1]
        checkout_commit(commit_now.hash, force=True)
        proj = jedi.Project(path=project, added_sys_path=[project / "src"])
    
        # now we can get the first project state, although this not needed for now
        # but we'll use it later for pre-edit analysis
        path2module = {
  +         RelPath(f): parse_module(project / f)
  +         for f in tqdm(
  +             get_python_files(project), desc="building initial project", disable=silent
  +         )
  -         RelPath(f): parse_module(project / f) for f in get_python_files(project)
        }
    
        def is_src(path_s: str) -> bool:
            path = Path(path_s)
            return path.suffix == ".py" and all(p not in ignore_dirs for p in path.parts)
    
        future_commits = list(reversed(history[:-1]))
        for commit_next in tqdm(
            future_commits, smoothing=0, desc="processing commits", disable=silent
        ):
            # get changed files
            changed_files = run_command(</s>
===========above chunk 2===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_encoder: ProjectChangeProcessor[TEnc],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Iterable[TEnc]:
    # offset: -3
        def parse_module(path: Path):
            with _tlogger.timed("parse_module"):
                s = jedi.Script(path=path, project=proj)
                mcontext = s._get_module_context()
                assert isinstance(mcontext, ModuleContext)
                mname = cast(str, mcontext.py__name__())
                if mname.startswith("src."):
                    e = ValueError(f"Bad module name: {mname}")
                    files = list(project.iterdir())
                    print_err(f"project: {proj}", file=sys.stderr)
                    print_err(f"files in root: {files}", file=sys.stderr)
                    raise e
                m = copy.deepcopy(s._module_node)  # needed due to reusing
                assert isinstance(m, ptree.Module)
                # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))</s>
===========unchanged ref 0===========
    at: coeditor._utils.TimeLogger
        times: dict[str, list[float]] = field(default_factory=dict)
    
        timed(name: str)
    
    at: coeditor.code_change
        _tlogger = TimeLogger()
    
        JModule(mname: ModuleName, tree: ptree.Module)
    
        JModuleChange(module_change: Change[JModule], changed: Mapping[ProjectPath, ChangedSpan])
    
        get_python_files(project: Path)
    
        DefaultIgnoreDirs = {".venv", ".mypy_cache", ".git", "venv", "build"}
    
        TEnc = TypeVar("TEnc", covariant=True)
    
        ProjectChangeProcessor()
    
        NoProcessing()
    
    at: coeditor.code_change.JModuleChange
        module_change: Change[JModule]
    
        changed: Mapping[ProjectPath, ChangedSpan]
    
        from_modules(module_change: Change[JModule])
    
    at: coeditor.common
        RelPath = NewType("RelPath", Path)
    
        run_command(args: Sequence[str], cwd: str | Path) -> str
    
        print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
    
    at: copy
        deepcopy(x: _T, memo: Optional[Dict[int, Any]]=..., _nil: Any=...) -> _T
    
    at: jedi.api
        Script(code=None, *, path=None, environment=None, project=None)
    
    at: jedi.api.Script
        _get_module_context()
    
    at: jedi.api.Script.__init__
        self._module_node, code = self._inference_state.parse_and_get_code(
                    code=code,
                    path=self.path,
                    use_latest_grammar=path and path.suffix == '.pyi',
                    cache=False,  # No disk cache, because the current script often changes.
                    diff_cache=settings.fast_parser,
                    cache_path=settings.cache_directory,
                )
    
     | 
| 
	coeditor.ctx_change_encoder/CtxCodeChangeProblemGenerator.__init__ | 
	Modified | 
	temp-1 | 
	b2f78bf15287dca9f1312cdd7720e6e9175fdeef | 
	Fix line ranges. Improve analysis and add tests. | 
	 <0>:<add>             analysis = JediUsageAnalyzer()
 | 
	      # module: coeditor.ctx_change_encoder
      class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
    +     def __init__(self, analysis: "JediUsageAnalyzer | None"):
    -     def __init__(self, analysis: "JediUsageAnalysis | None"):
              if analysis is None:
    -             analysis = JediUsageAnalysis()
 <0>          self.analysis = analysis
      
       | 
	===========unchanged ref 0===========
    at: coeditor.ctx_change_encoder
        CtxCodeChangeProblem(span: ChangedSpan, relevant_changes: list[ChangedSpan], relevant_unchanged: list[tuple[ProjectPath, str]])
    
        JediUsageAnalyzer(follow_imports: bool=True)
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
  - jedi.cache.clear_time_caches = lambda: None
  - 
===========changed ref 1===========
  + # module: tests.coeditor.test_analysis
  + 
  + 
===========changed ref 2===========
  + # module: tests.coeditor.testcases.usages
  + 
  + 
===========changed ref 3===========
  + # module: tests.coeditor.testcases.defs
  + 
  + 
===========changed ref 4===========
  + # module: tests.coeditor.testcases.usages
  + def get_named_changes(*args):
  +     raise NotImplementedError
  + 
===========changed ref 5===========
  + # module: tests.coeditor.testcases.usages
  + @dataclass
  + class JModule:
  +     def iter_imports(self, tree):
  +         raise NotImplementedError
  + 
===========changed ref 6===========
  + # module: tests.coeditor.testcases.usages
  + get_modified_spans = as_any(None)
  + 
===========changed ref 7===========
  + # module: tests.coeditor.testcases.defs
  + @dataclass
  + class ChangeScope:
  +     @cached_property
  +     def spans_code(self) -> str:
  +         return "\n".join(s.code for s in self.spans)
  + 
===========changed ref 8===========
  + # module: tests.coeditor.test_analysis
  + testcase_root = proj_root() / "tests" / "coeditor" / "testcases"
  + 
===========changed ref 9===========
  + # module: tests.coeditor.testcases.usages
  + @dataclass
  + class JModule:
  +     def _to_scope(self) -> ChangeScope:
  +         return ChangeScope.from_tree(ProjectPath(self.mname, ""), self.tree)
  + 
===========changed ref 10===========
  + # module: tests.coeditor.testcases.usages
  + @dataclass
  + class JModule:
  +     "A light wrapper around a jedi module."
  +     mname: ModuleName
  +     tree: ptree.Module
  + 
===========changed ref 11===========
  + # module: tests.coeditor.testcases.defs
  + ScopeTree = ptree.Function | ptree.Class | ptree.Module
  + ChangedSpan = NewType("ChangedSpan", str)
  + 
===========changed ref 12===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @cached_property
        def all_code(self) -> str:
  +         return self.header_code + self.spans_code
  -         return f"{self.header_code}\n{self.spans_code}"
    
===========changed ref 13===========
    # module: coeditor.code_change
    @dataclass
    class JProjectChange:
        changed: Mapping[ModuleName, JModuleChange]
  +     all_modules: Modified[Collection[JModule]]
        commit_info: "CommitInfo | None"
    
===========changed ref 14===========
    # module: coeditor.code_change
    @dataclass
    class ChangedSpan:
  +     @property
  +     def header_line_range(self) -> _LineRange:
  +         parent_scope = self.parent_scopes[-1].earlier()
  +         hrange = parent_scope.header_line_range
  +         return hrange
  + 
===========changed ref 15===========
    # module: coeditor.code_change
  + def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  +     if not can_be_empty and start >= end:
  +         raise ValueError(f"Bad line range: {start=}, {end=}")
  +     return _LineRange((start, end))
  + 
===========changed ref 16===========
  + # module: tests.coeditor.test_analysis
  + def assert_has_usages(defs: Collection[PyDefinition], *full_names: str):
  +     nameset = list(d.full_name for d in defs)
  +     for name in full_names:
  +         if PyFullName(name) not in nameset:
  +             raise AssertionError(f"{name} not in {nameset}")
  + 
===========changed ref 17===========
  + # module: tests.coeditor.test_analysis
  + def assert_no_usages(defs: Collection[PyDefinition], *full_names: str):
  +     nameset = list(d.full_name for d in defs)
  +     for name in full_names:
  +         if PyFullName(name) in nameset:
  +             raise AssertionError(f"{name} should not be in {nameset}")
  + 
===========changed ref 18===========
    # module: coeditor.code_change
    ScopeTree = ptree.Function | ptree.Class | ptree.Module
    PyNode = ptree.PythonBaseNode | ptree.PythonNode
  + _LineRange = NewType("LineRange", tuple[int, int])
    
    _tlogger = TimeLogger()
    
===========changed ref 19===========
    # module: coeditor.code_change
    @dataclass
    class ChangedSpan:
        "Represents the changes made to a statement span."
        change: Change[str]
        parent_scopes: Sequence[Change[ChangeScope]]
  -     line_range: tuple[int, int]
  -     old_statements: Sequence[PyNode]
  +     line_range: _LineRange
    
===========changed ref 20===========
    # module: coeditor.code_change
  + def _strip_empty_lines(s: str):
  +     s1 = s.lstrip("\n")
  +     s2 = s1.rstrip("\n")
  +     e_lines_left = len(s) - len(s1)
  +     e_lines_right = len(s1) - len(s2)
  +     return s2, e_lines_left, e_lines_right
  + 
===========changed ref 21===========
  + # module: tests.coeditor.testcases.defs
  + @dataclass
  + class ChangeScope:
  +     """
  +     A change scope is a python module, non-hidden function, or a non-hidden class, or a python module.
  +         - functions and classes that are inside a parent function are considered hidden.
  +     """
  + 
  +     path: ProjectPath
  +     tree: ScopeTree
  +     spans: Sequence
  +     subscopes: Mapping[ProjectPath, Self]
  + 
===========changed ref 22===========
  + # module: tests.coeditor.testcases.defs
  + @dataclass
  + class ChangeScope:
  +     @staticmethod
  +     def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
  +         spans = []
  +         subscopes = dict()
  +         scope = ChangeScope(path, tree, spans, subscopes)
  +         assert isinstance(tree, ScopeTree)
  +         is_func = isinstance(tree, ptree.Function)
  + 
  +         current_stmts = []
  +         content = (
  +             tree.children
  +             if isinstance(tree, ptree.Module)
  +             else cast(ptree.PythonNode, tree.get_suite()).children
  +         )
  +         raise NotImplementedError
  + 
===========changed ref 23===========
  + # module: tests.coeditor.testcases.usages
  + @dataclass
  + class JModule:
  +     @cached_property
  +     def imported_names(self):
  +         names = set[ptree.Name]()
  +         for stmt in self.iter_imports(self.tree):
  +             if isinstance(stmt, ptree.ImportFrom):
  +                 for n in stmt.get_from_names():
  +                     assert isinstance(n, ptree.Name)
  +                     names.add(n)
  +             elif isinstance(stmt, ptree.ImportName):
  +                 for n in stmt.get_defined_names():
  +                     assert isinstance(n, ptree.Name)
  +                     names.add(n)
  +         return names
  +  | 
| 
	coeditor.ctx_change_encoder/CtxCodeChangeProblemGenerator.pre_edit_analysis | 
	Modified | 
	temp-1 | 
	b2f78bf15287dca9f1312cdd7720e6e9175fdeef | 
	Fix line ranges. Improve analysis and add tests. | 
	 <0>:<add>                 lines_to_analyze.update(range(*span.header_line_range))
 | 
	      # module: coeditor.ctx_change_encoder
      class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
          def pre_edit_analysis(
              self,
              project: jedi.Project,
              modules: Mapping[RelPath, JModule],
              changes: Mapping[ModuleName, JModuleChange],
          ) -> Mapping[ModuleName, LineUsageAnalysis]:
              "Return the definition usages of each line."
              # proot = Path(project._path)
              result = dict[ModuleName, LineUsageAnalysis]()
      
              src_map = {m.mname: f for f, m in modules.items()}
              for mname, mchange in changes.items():
                  if not isinstance(mchange.module_change, Modified):
                      continue
      
                  lines_to_analyze = set[int]()
                  for span in mchange.changed.values():
                      if span.change is Added:
                          continue
    -                 start, end = span.line_range
    +                 lines_to_analyze.update(range(*span.line_range))
    -                 lines_to_analyze.update(range(start, end + 1))
 <0>  
                  mod_path = src_map[mname]
                  assert (
                      src_file := project.path / mod_path
                  ).exists(), f"src file missing: {src_file}"
                  script = jedi.Script(path=src_file, project=project)
                  line_usages = self.analysis.get_line_usages(
                      script, project.path, lines_to_analyze, silent=True
                  )
                  result[mname] = line_usages
              return result
      
       | 
	===========unchanged ref 0===========
    at: coeditor.common
        RelPath = NewType("RelPath", Path)
    
    at: coeditor.ctx_change_encoder
        LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
    
    at: coeditor.ctx_change_encoder.CtxCodeChangeProblemGenerator.__init__
        analysis = JediUsageAnalyzer()
        self.analysis = analysis
    
    at: coeditor.ctx_change_encoder.JediUsageAnalyzer
        follow_imports: bool = True
    
        get_line_usages(script: jedi.Script, proj_root: Path, lines_to_analyze: Collection[int], silent: bool=False)
    
    at: jedi.api
        Script(code=None, *, path=None, environment=None, project=None)
    
    at: jedi.api.project
        Project(path, *, environment_path=None, load_unsafe_extensions=False, sys_path=None, added_sys_path=(), smart_sys_path=True)
    
    at: jedi.api.project.Project
        _environment = None
    
    at: pathlib.Path
        __slots__ = ()
    
        exists() -> bool
    
    at: spot.static_analysis
        ModuleName = str
    
    at: typing
        Mapping = _alias(collections.abc.Mapping, 2)
    
    at: typing.Mapping
        items() -> AbstractSet[Tuple[_KT, _VT_co]]
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
  +     def __init__(self, analysis: "JediUsageAnalyzer | None"):
  -     def __init__(self, analysis: "JediUsageAnalysis | None"):
            if analysis is None:
  +             analysis = JediUsageAnalyzer()
  -             analysis = JediUsageAnalysis()
            self.analysis = analysis
    
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
  - jedi.cache.clear_time_caches = lambda: None
  - 
===========changed ref 2===========
  + # module: tests.coeditor.test_analysis
  + 
  + 
===========changed ref 3===========
  + # module: tests.coeditor.testcases.usages
  + 
  + 
===========changed ref 4===========
  + # module: tests.coeditor.testcases.defs
  + 
  + 
===========changed ref 5===========
  + # module: tests.coeditor.testcases.usages
  + def get_named_changes(*args):
  +     raise NotImplementedError
  + 
===========changed ref 6===========
  + # module: tests.coeditor.testcases.usages
  + @dataclass
  + class JModule:
  +     def iter_imports(self, tree):
  +         raise NotImplementedError
  + 
===========changed ref 7===========
  + # module: tests.coeditor.testcases.usages
  + get_modified_spans = as_any(None)
  + 
===========changed ref 8===========
  + # module: tests.coeditor.testcases.defs
  + @dataclass
  + class ChangeScope:
  +     @cached_property
  +     def spans_code(self) -> str:
  +         return "\n".join(s.code for s in self.spans)
  + 
===========changed ref 9===========
  + # module: tests.coeditor.test_analysis
  + testcase_root = proj_root() / "tests" / "coeditor" / "testcases"
  + 
===========changed ref 10===========
  + # module: tests.coeditor.testcases.usages
  + @dataclass
  + class JModule:
  +     def _to_scope(self) -> ChangeScope:
  +         return ChangeScope.from_tree(ProjectPath(self.mname, ""), self.tree)
  + 
===========changed ref 11===========
  + # module: tests.coeditor.testcases.usages
  + @dataclass
  + class JModule:
  +     "A light wrapper around a jedi module."
  +     mname: ModuleName
  +     tree: ptree.Module
  + 
===========changed ref 12===========
  + # module: tests.coeditor.testcases.defs
  + ScopeTree = ptree.Function | ptree.Class | ptree.Module
  + ChangedSpan = NewType("ChangedSpan", str)
  + 
===========changed ref 13===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @cached_property
        def all_code(self) -> str:
  +         return self.header_code + self.spans_code
  -         return f"{self.header_code}\n{self.spans_code}"
    
===========changed ref 14===========
    # module: coeditor.code_change
    @dataclass
    class JProjectChange:
        changed: Mapping[ModuleName, JModuleChange]
  +     all_modules: Modified[Collection[JModule]]
        commit_info: "CommitInfo | None"
    
===========changed ref 15===========
    # module: coeditor.code_change
    @dataclass
    class ChangedSpan:
  +     @property
  +     def header_line_range(self) -> _LineRange:
  +         parent_scope = self.parent_scopes[-1].earlier()
  +         hrange = parent_scope.header_line_range
  +         return hrange
  + 
===========changed ref 16===========
    # module: coeditor.code_change
  + def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  +     if not can_be_empty and start >= end:
  +         raise ValueError(f"Bad line range: {start=}, {end=}")
  +     return _LineRange((start, end))
  + 
===========changed ref 17===========
  + # module: tests.coeditor.test_analysis
  + def assert_has_usages(defs: Collection[PyDefinition], *full_names: str):
  +     nameset = list(d.full_name for d in defs)
  +     for name in full_names:
  +         if PyFullName(name) not in nameset:
  +             raise AssertionError(f"{name} not in {nameset}")
  + 
===========changed ref 18===========
  + # module: tests.coeditor.test_analysis
  + def assert_no_usages(defs: Collection[PyDefinition], *full_names: str):
  +     nameset = list(d.full_name for d in defs)
  +     for name in full_names:
  +         if PyFullName(name) in nameset:
  +             raise AssertionError(f"{name} should not be in {nameset}")
  + 
===========changed ref 19===========
    # module: coeditor.code_change
    ScopeTree = ptree.Function | ptree.Class | ptree.Module
    PyNode = ptree.PythonBaseNode | ptree.PythonNode
  + _LineRange = NewType("LineRange", tuple[int, int])
    
    _tlogger = TimeLogger()
    
===========changed ref 20===========
    # module: coeditor.code_change
    @dataclass
    class ChangedSpan:
        "Represents the changes made to a statement span."
        change: Change[str]
        parent_scopes: Sequence[Change[ChangeScope]]
  -     line_range: tuple[int, int]
  -     old_statements: Sequence[PyNode]
  +     line_range: _LineRange
    
===========changed ref 21===========
    # module: coeditor.code_change
  + def _strip_empty_lines(s: str):
  +     s1 = s.lstrip("\n")
  +     s2 = s1.rstrip("\n")
  +     e_lines_left = len(s) - len(s1)
  +     e_lines_right = len(s1) - len(s2)
  +     return s2, e_lines_left, e_lines_right
  + 
===========changed ref 22===========
  + # module: tests.coeditor.testcases.defs
  + @dataclass
  + class ChangeScope:
  +     """
  +     A change scope is a python module, non-hidden function, or a non-hidden class, or a python module.
  +         - functions and classes that are inside a parent function are considered hidden.
  +     """
  + 
  +     path: ProjectPath
  +     tree: ScopeTree
  +     spans: Sequence
  +     subscopes: Mapping[ProjectPath, Self]
  + 
===========changed ref 23===========
  + # module: tests.coeditor.testcases.defs
  + @dataclass
  + class ChangeScope:
  +     @staticmethod
  +     def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
  +         spans = []
  +         subscopes = dict()
  +         scope = ChangeScope(path, tree, spans, subscopes)
  +         assert isinstance(tree, ScopeTree)
  +         is_func = isinstance(tree, ptree.Function)
  + 
  +         current_stmts = []
  +         content = (
  +             tree.children
  +             if isinstance(tree, ptree.Module)
  +             else cast(ptree.PythonNode, tree.get_suite()).children
  +         )
  +         raise NotImplementedError
  +  | 
| 
	coeditor.encoding/change_to_line_diffs | 
	Modified | 
	temp-1 | 
	d0fe36e93da8ca33161e524783335898bd47ebbc | 
	- More analysis bug fix. - Encode unchanged using CSpan. | 
	 <0>:<add>                 diffs = compute_line_diffs(splitlines(before), splitlines(after))
 | 
	      # module: coeditor.encoding
      def change_to_line_diffs(change: Change[str]) -> list[str]:
          "Encode a change as a token sequence."
          match change:
              case Modified(before, after):
    +             if change.unchanged:
    +                 diffs = []
    +             else:
    -             diffs = compute_line_diffs(splitlines(before), splitlines(after))
 <0>              # rearrange_diffs_(diffs)
                  if not diffs:
                      # as a special case, `unified_diff` would return an empty when there is no change.
                      diffs = [" " + l for l in splitlines(before)]
              case Added(after):
                  diffs = ["+" + l for l in splitlines(after)]
              case Deleted(before):
                  diffs = ["-" + l for l in splitlines(before)]
              case _:
                  raise ValueError(f"Invalid change type: {change}.")
          return diffs
      
       | 
	===========unchanged ref 0===========
    at: coeditor._utils
        compute_line_diffs(before: Sequence[str], after: Sequence[str], keep_explain_lines: bool=False)
    
    at: coeditor.common
        splitlines(text: str) -> list[str]
    
     | 
| 
	tests.coeditor.test_code_change/test_change_scope | 
	Modified | 
	temp-1 | 
	d0fe36e93da8ca33161e524783335898bd47ebbc | 
	- More analysis bug fix. - Encode unchanged using CSpan. | 
	 <0>:<add>     inner_class_code = scope.subscopes["A"].subscopes["B"].all_code
 | 
	      # module: tests.coeditor.test_code_change
      def test_change_scope():
      <s>
          assert_str_equal(f1_code, indent(f1_expect, " " * 4))
      
          f2_expect = dedent(
              """\
              @annotated
              def f2():
                  return 1
              """
          )
    +     f2_code = scope.subscopes["f2"].all_code
    -     f2_code = scope.subscopes[ProjectPath("code1", "f2")].all_code
          assert_str_equal(f2_code, f2_expect)
      
          attr1_expect = dedent(
              """\
              attr1: int
              """
          )
    +     attr1_code = scope.subscopes["A"].spans_code
    -     attr1_code = scope.subscopes[ProjectPath("code1", "A")].spans_code
          assert_str_equal(attr1_code, indent(attr1_expect, " " * 4))
      
          method1_expect = dedent(
              """\
              @staticmethod
              def method1():
                  return 1
              """
          )
    -     method1_code = (
    -         scope.subscopes[ProjectPath("code1", "A")]
    -         .subscopes[ProjectPath("code1", "A.method1")]
    -         .all_code
    -     )
    +     method1_code = scope.subscopes["A"].subscopes["method1"].all_code
          assert_str_equal(method1_code, indent(method1_expect, " " * 4))
      
          inner_attr1_expect = dedent(
              """\
              class B:
                  inner_attr1: int
              """
          )
    -     inner_class_code = (
    -         scope.subscopes[ProjectPath("code1", "A")]
    -         .subscopes[ProjectPath("code1", "A.B")]
    -         .all_code
    -     )
 <0>      assert_str_equal(inner_class_code, indent(inner_attr1_expect, " " * 4))
      
       | 
	===========above chunk 0===========
    # module: tests.coeditor.test_code_change
    def test_change_scope():
    # offset: -1
        code1 = dedent(
            """\
            import os
            
            x = 1
            y = x + 1
            
            def f1():
                global x
                x *= 5
                return x
                
            if __name__ == "__main__":
                print(f1() + x)
                    
            @annotated
            def f2():
                return 1
                
            @dataclass
            class A:
                attr1: int
                
                @staticmethod
                def method1():
                    return 1
                    
                class B:
                    inner_attr1: int
            """
        )
        mod_tree = code_to_module(code1)
        scope = ChangeScope.from_tree(ProjectPath("code1", ""), mod_tree)
        global_spans = [
            dedent(
                """\
                x = 1
                y = x + 1
                """
            ),
            dedent(
                """\
                if __name__ == "__main__":
                    print(f1() + x)
                """
            ),
        ]
        for i, code in enumerate(global_spans):
            assert_str_equal(scope.spans[i].code, code)
    
        f1_expect = dedent(
            """\
            global x
            x *= 5
            return x
            """
        )
  +     f1_code = scope.subscopes["f1"].spans_code
  -     f1_code = scope.subscopes[ProjectPath("code1", "f1")].spans_code
        assert_str_equal(f1_code, indent(f1_expect, " " * 4))
    
        f2</s>
===========unchanged ref 0===========
    at: tests.coeditor.test_code_change.TestChangedSpan
        scope1 = ChangeScope.from_tree(ProjectPath("code1", ""), code_to_module(code1))
    
    at: textwrap
        indent(text: str, prefix: str, predicate: Optional[Callable[[str], bool]]=...) -> str
    
    
===========changed ref 0===========
    # module: coeditor.encoding
    def change_to_line_diffs(change: Change[str]) -> list[str]:
        "Encode a change as a token sequence."
        match change:
            case Modified(before, after):
  +             if change.unchanged:
  +                 diffs = []
  +             else:
  +                 diffs = compute_line_diffs(splitlines(before), splitlines(after))
  -             diffs = compute_line_diffs(splitlines(before), splitlines(after))
                # rearrange_diffs_(diffs)
                if not diffs:
                    # as a special case, `unified_diff` would return an empty when there is no change.
                    diffs = [" " + l for l in splitlines(before)]
            case Added(after):
                diffs = ["+" + l for l in splitlines(after)]
            case Deleted(before):
                diffs = ["-" + l for l in splitlines(before)]
            case _:
                raise ValueError(f"Invalid change type: {change}.")
        return diffs
     | 
| 
	coeditor.code_change/ChangeScope.from_tree | 
	Modified | 
	temp-1 | 
	d0fe36e93da8ca33161e524783335898bd47ebbc | 
	- More analysis bug fix. - Encode unchanged using CSpan. | 
	 <0>:<add>             subscopes[name] = subscope
 | 
	      # module: coeditor.code_change
      @dataclass
      class ChangeScope:
          @staticmethod
          def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
              spans = []
              subscopes = dict()
    +         scope = ChangeScope(path, tree, spans, subscopes, None)
    -         scope = ChangeScope(path, tree, spans, subscopes)
              assert isinstance(tree, ScopeTree)
              is_func = isinstance(tree, ptree.Function)
      
              current_stmts = []
              content = (
                  tree.children
                  if isinstance(tree, ptree.Module)
                  else cast(ptree.PythonNode, tree.get_suite()).children
              )
              for s in content:
                  # we don't create inner scopes for function contents
                  if is_func or _is_scope_statement(as_any(s)):
                      current_stmts.append(s)
                  else:
                      if current_stmts:
    +                     spans.append(StatementSpan(current_stmts, scope))
    -                     spans.append(StatementSpan(current_stmts))
                          current_stmts = []
              if current_stmts:
    +             spans.append(StatementSpan(current_stmts, scope))
    -             spans.append(StatementSpan(current_stmts))
      
              if is_func:
                  # we don't create inner scopes for function contents
                  return scope
              for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
                  stree: ptree.Function | ptree.Class
    +             name = cast(ptree.Name, stree.name).value
    -             spath = path.append(cast(ptree.Name, stree.name).value)
    +             spath = path.append(name)
                  subscope = ChangeScope.from_tree(spath, stree)
    +             subscope.parent_scope = scope
    -             subscopes[spath] = subscope
 <0>          return scope
      
       | 
	===========unchanged ref 0===========
    at: coeditor._utils
        not_none(x: Optional[T1]) -> T1
    
        assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
    
    at: coeditor.code_change
        _LineRange = NewType("LineRange", tuple[int, int])
    
        _line_range(start: int, end: int, can_be_empty: bool=False) -> _LineRange
    
        _strip_empty_lines(s: str)
    
    at: coeditor.code_change.ChangeScope
        path: ProjectPath
    
        tree: ScopeTree
    
        spans: Sequence["StatementSpan"]
    
        subscopes: Mapping[str, Self]
    
        parent_scope: "ChangeScope | None"
    
    at: coeditor.code_change.ChangeScope.__post_init__
        tree = self.tree
    
    at: coeditor.code_change.StatementSpan.__post_init__
        self.line_range: _LineRange = _line_range(start, end)
    
    at: coeditor.common
        count_lines(text: str) -> int
    
    at: parso.tree
        NodeOrLeaf()
    
    at: parso.tree.BaseNode.__init__
        self.children = children
    
        self.parent: Optional[BaseNode] = None
    
    at: parso.tree.NodeOrLeaf
        __slots__ = ('parent',)
    
        type: str
    
        parent: 'Optional[BaseNode]'
    
        get_code(include_prefix=True)
    
    at: typing
        cast(typ: Type[_T], val: Any) -> _T
        cast(typ: str, val: Any) -> Any
        cast(typ: object, val: Any) -> Any
    
    
===========changed ref 0===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
  +     def _search_span(self, line: int) -> "StatementSpan | None":
  +         for span in self.spans:
  +             if span.line_range[0] <= line < span.line_range[1]:
  +                 return span
  +         return None
  + 
===========changed ref 1===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
  +     def _search(self, path: ElemPath, line: int) -> Self | "StatementSpan":
  +         scope = self._search_scope(path)
  +         if scope.header_line_range[0] <= line < scope.header_line_range[1]:
  +             return scope
  +         span = scope._search_span(line)
  +         return span or scope
  + 
===========changed ref 2===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
  +     def _search_scope(self, path: ElemPath) -> Self:
  +         """Find the scope that can potentially contain the given path. Follow the
  +         path segments until no more subscopes are found."""
  +         segs = path.split(".")
  +         scope = self
  +         for s in segs:
  +             if s in scope.subscopes:
  +                 scope = scope.subscopes[s]
  +             else:
  +                 break
  +         return scope
  + 
===========changed ref 3===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        """
        A change scope is a python module, non-hidden function, or a non-hidden class, or a python module.
            - functions and classes that are inside a parent function are considered hidden.
        """
    
        path: ProjectPath
        tree: ScopeTree
        spans: Sequence["StatementSpan"]
  +     subscopes: Mapping[str, Self]
  -     subscopes: Mapping[ProjectPath, Self]
  +     parent_scope: "ChangeScope | None"
    
===========changed ref 4===========
    # module: coeditor.history
    @dataclass
    class Modified(_ChangeBase[E1]):
  +     @staticmethod
  +     def from_unchanged(v: T1) -> "Modified[T1]":
  +         return Modified(v, v, unchanged=True)
  + 
===========changed ref 5===========
    # module: coeditor.history
    @dataclass
    class Modified(_ChangeBase[E1]):
        before: E1
        after: E1
  +     unchanged: bool = False
    
===========changed ref 6===========
    # module: coeditor.encoding
    def change_to_line_diffs(change: Change[str]) -> list[str]:
        "Encode a change as a token sequence."
        match change:
            case Modified(before, after):
  +             if change.unchanged:
  +                 diffs = []
  +             else:
  +                 diffs = compute_line_diffs(splitlines(before), splitlines(after))
  -             diffs = compute_line_diffs(splitlines(before), splitlines(after))
                # rearrange_diffs_(diffs)
                if not diffs:
                    # as a special case, `unified_diff` would return an empty when there is no change.
                    diffs = [" " + l for l in splitlines(before)]
            case Added(after):
                diffs = ["+" + l for l in splitlines(after)]
            case Deleted(before):
                diffs = ["-" + l for l in splitlines(before)]
            case _:
                raise ValueError(f"Invalid change type: {change}.")
        return diffs
    
===========changed ref 7===========
    # module: tests.coeditor.test_code_change
    def test_change_scope():
        code1 = dedent(
            """\
            import os
            
            x = 1
            y = x + 1
            
            def f1():
                global x
                x *= 5
                return x
                
            if __name__ == "__main__":
                print(f1() + x)
                    
            @annotated
            def f2():
                return 1
                
            @dataclass
            class A:
                attr1: int
                
                @staticmethod
                def method1():
                    return 1
                    
                class B:
                    inner_attr1: int
            """
        )
        mod_tree = code_to_module(code1)
        scope = ChangeScope.from_tree(ProjectPath("code1", ""), mod_tree)
        global_spans = [
            dedent(
                """\
                x = 1
                y = x + 1
                """
            ),
            dedent(
                """\
                if __name__ == "__main__":
                    print(f1() + x)
                """
            ),
        ]
        for i, code in enumerate(global_spans):
            assert_str_equal(scope.spans[i].code, code)
    
        f1_expect = dedent(
            """\
            global x
            x *= 5
            return x
            """
        )
  +     f1_code = scope.subscopes["f1"].spans_code
  -     f1_code = scope.subscopes[ProjectPath("code1", "f1")].spans_code
        assert_str_equal(f1_code, indent(f1_expect, " " * 4))
    
        f2_expect = dedent(
            """\
            @annotated
            def f2():
                return 1
            """
        )
  +     f2_code = scope.subscopes["f2"].all_code
  -     f2_code = scope.subscopes[ProjectPath("code1", "f2")].all_code
        assert_str_equal(f2_code, f2</s> | 
| 
	coeditor.code_change/JModuleChange.from_modules | 
	Modified | 
	temp-1 | 
	d0fe36e93da8ca33161e524783335898bd47ebbc | 
	- More analysis bug fix. - Encode unchanged using CSpan. | 
	 <0>:<add>                 module_change.map(lambda m: m.as_scope), tuple()
 | 
	      # module: coeditor.code_change
      @dataclass
      class JModuleChange:
          @staticmethod
          def from_modules(module_change: Change[JModule]):
              "Compute the change spans from two versions of the same module."
              with _tlogger.timed("JModuleChange.from_modules"):
                  changed = dict[ProjectPath, ChangedSpan]()
                  for cspan in get_changed_spans(
    -                 module_change.map(lambda m: m._to_scope()), tuple()
 <0>              ):
                      path = cspan.parent_scopes[-1].earlier().path
                      changed[path] = cspan
                  return JModuleChange(module_change, changed)
      
       | 
	===========unchanged ref 0===========
    at: coeditor.code_change.ChangedSpan
        change: Change[str]
    
        parent_scopes: Sequence[Change[ChangeScope]]
    
        line_range: _LineRange
    
    at: dataclasses
        dataclass(_cls: Type[_T]) -> Type[_T]
        dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
        dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
    
    at: functools
        cached_property(func: Callable[[Any], _T])
    
    at: parso.python.tree
        Module(children)
    
    at: spot.static_analysis
        ModuleName = str
    
    
===========changed ref 0===========
    # module: coeditor.code_change
    @dataclass
    class JModule:
  -     def _to_scope(self) -> ChangeScope:
  -         return ChangeScope.from_tree(ProjectPath(self.mname, ""), self.tree)
  - 
===========changed ref 1===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
  +     def ancestors(self) -> list[Self]:
  +         scope = self
  +         result = [scope]
  +         while scope := scope.parent_scope:
  +             result.append(scope)
  +         result.reverse()
  +         return result
  + 
===========changed ref 2===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
  +     def _search_span(self, line: int) -> "StatementSpan | None":
  +         for span in self.spans:
  +             if span.line_range[0] <= line < span.line_range[1]:
  +                 return span
  +         return None
  + 
===========changed ref 3===========
    # module: coeditor.code_change
    @dataclass
    class StatementSpan:
        """
        A statement span is a set of lines inside the same change scope. It is the basic unit of code changes handled by our model.
            - For a modified function, the span is the function itself.
            - For a modified module, the spans are the regions between the functions and classes plus
            the spans recursively generated.
            - For a modified class, the spans are the regions between methods plus
            the spans recursively generated.
        """
    
        statements: Sequence[PyNode]
  +     scope: ChangeScope
    
===========changed ref 4===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
  +     def _search(self, path: ElemPath, line: int) -> Self | "StatementSpan":
  +         scope = self._search_scope(path)
  +         if scope.header_line_range[0] <= line < scope.header_line_range[1]:
  +             return scope
  +         span = scope._search_span(line)
  +         return span or scope
  + 
===========changed ref 5===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
  +     def _search_scope(self, path: ElemPath) -> Self:
  +         """Find the scope that can potentially contain the given path. Follow the
  +         path segments until no more subscopes are found."""
  +         segs = path.split(".")
  +         scope = self
  +         for s in segs:
  +             if s in scope.subscopes:
  +                 scope = scope.subscopes[s]
  +             else:
  +                 break
  +         return scope
  + 
===========changed ref 6===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        """
        A change scope is a python module, non-hidden function, or a non-hidden class, or a python module.
            - functions and classes that are inside a parent function are considered hidden.
        """
    
        path: ProjectPath
        tree: ScopeTree
        spans: Sequence["StatementSpan"]
  +     subscopes: Mapping[str, Self]
  -     subscopes: Mapping[ProjectPath, Self]
  +     parent_scope: "ChangeScope | None"
    
===========changed ref 7===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @staticmethod
        def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
            spans = []
            subscopes = dict()
  +         scope = ChangeScope(path, tree, spans, subscopes, None)
  -         scope = ChangeScope(path, tree, spans, subscopes)
            assert isinstance(tree, ScopeTree)
            is_func = isinstance(tree, ptree.Function)
    
            current_stmts = []
            content = (
                tree.children
                if isinstance(tree, ptree.Module)
                else cast(ptree.PythonNode, tree.get_suite()).children
            )
            for s in content:
                # we don't create inner scopes for function contents
                if is_func or _is_scope_statement(as_any(s)):
                    current_stmts.append(s)
                else:
                    if current_stmts:
  +                     spans.append(StatementSpan(current_stmts, scope))
  -                     spans.append(StatementSpan(current_stmts))
                        current_stmts = []
            if current_stmts:
  +             spans.append(StatementSpan(current_stmts, scope))
  -             spans.append(StatementSpan(current_stmts))
    
            if is_func:
                # we don't create inner scopes for function contents
                return scope
            for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
                stree: ptree.Function | ptree.Class
  +             name = cast(ptree.Name, stree.name).value
  -             spath = path.append(cast(ptree.Name, stree.name).value)
  +             spath = path.append(name)
                subscope = ChangeScope.from_tree(spath, stree)
  +             subscope.parent_scope = scope
  +             subscopes[name] = subscope
  -             subscopes[spath] = subscope
            return scope
    
===========changed ref 8===========
    # module: coeditor.history
    @dataclass
    class Modified(_ChangeBase[E1]):
  +     @staticmethod
  +     def from_unchanged(v: T1) -> "Modified[T1]":
  +         return Modified(v, v, unchanged=True)
  + 
===========changed ref 9===========
    # module: coeditor.history
    @dataclass
    class Modified(_ChangeBase[E1]):
        before: E1
        after: E1
  +     unchanged: bool = False
    
===========changed ref 10===========
    # module: coeditor.encoding
    def change_to_line_diffs(change: Change[str]) -> list[str]:
        "Encode a change as a token sequence."
        match change:
            case Modified(before, after):
  +             if change.unchanged:
  +                 diffs = []
  +             else:
  +                 diffs = compute_line_diffs(splitlines(before), splitlines(after))
  -             diffs = compute_line_diffs(splitlines(before), splitlines(after))
                # rearrange_diffs_(diffs)
                if not diffs:
                    # as a special case, `unified_diff` would return an empty when there is no change.
                    diffs = [" " + l for l in splitlines(before)]
            case Added(after):
                diffs = ["+" + l for l in splitlines(after)]
            case Deleted(before):
                diffs = ["-" + l for l in splitlines(before)]
            case _:
                raise ValueError(f"Invalid change type: {change}.")
        return diffs
     | 
| 
	coeditor.ctx_change_encoder/CtxCodeChangeProblemGenerator.encode_change | 
	Modified | 
	temp-1 | 
	d0fe36e93da8ca33161e524783335898bd47ebbc | 
	- More analysis bug fix. - Encode unchanged using CSpan. | 
	 <0>:<add>                         result.append(cspan)
 | 
	      # module: coeditor.ctx_change_encoder
      class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
          def encode_change(
              self,
              pchange: JProjectChange,
              mod2usages: Mapping[ModuleName, LineUsageAnalysis],
              module_order: Sequence[ModuleName],
          ) -> Iterable[CtxCodeChangeProblem]:
      <s>("."))
                      if path is None:
                          continue
    +                 jmod = before_mod_map[path.module]
    +                 scope = jmod.as_scope
    +                 elem = scope._search_scope(path.path)
    +                 match elem:
    +                     case ChangeScope(path=path, tree=ptree.Function()):
    +                         ancestors = elem.ancestors()
    +                         body_code = "    " * len(ancestors) + "..."
    +                         h_end = elem.header_line_range[1]
    +                         cspan = ChangedSpan(
    +                             Modified.from_unchanged(body_code),
    +                             [Modified.from_unchanged(s) for s in ancestors],
    +                             _line_range(h_end, h_end + 1),
    +                         )
    +                         result.append(cspan)
    +                     case _:
    +                         cspan = ChangedSpan(
    +                             Modified.from_unchanged(f"path={path}"),
    +                             [Modified.from_unchanged(s) for s in elem.ancestors()],
    +                             _line_range(0, 1),
    +                         )
    -                 result.append((ProjectPath(used.full_name, ""), str(used)))
 <0>              return result
      
              sorted_cspans = list[ChangedSpan]()
              for m in module_order:
                  if (mchange := pchange.changed.get(m)) is None:
                      continue
                  for span in mchange.changed.values():
                      if span.change.as_char() == Modified.as_char():
                          yield CtxCodeChangeProblem(
                              span,
                              relevant_changes=sorted_cspans.copy(),
                              relevant_unchanged=_get_relevant(span),
                          )
                      sorted_cspans.append(span)
      
       | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def encode_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: -1
            before_mod_map = {m.mname: m for m in pchange.all_modules.before}
            mod_hier = ModuleHierarchy.from_modules(before_mod_map)
    
            def _get_relevant(span: ChangedSpan):
                if isinstance(span.change, Added):
                    # nothing to analyze
                    return []
                path = span.parent_scopes[-1].earlier().path
                line_usages = mod2usages[path.module]
                all_used = set[PyDefinition]()
                all_lines = set(range(*span.line_range))
                all_lines.update(range(*span.header_line_range))
                for l in all_lines:
                    for pydef in line_usages.line2usages.get(l, set()):
                        if (
  +                         pydef.full_name.startswith(path.module)
  -                         pydef.full_name.startswith(pydef.import_module)
                            and pydef.start_pos[0] in all_lines
                        ):
                            # skip self references
                            continue
                        all_used.add(pydef)
    
  +             result = list[ChangedSpan]()
  -             result = list[tuple[ProjectPath, str]]()
                for used in all_used:
                    path = mod_hier.resolve_path(used.full_name.split("."))
                    if path is None:
                        continue
  +                 jmod = before_mod_map[path.module]
    </s>
===========unchanged ref 0===========
    at: coeditor.code_change
        _line_range(start: int, end: int, can_be_empty: bool=False) -> _LineRange
    
        ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[ProjectPath, Self])
    
        ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: _LineRange)
    
    at: coeditor.code_change.ChangeScope
        path: ProjectPath
    
        tree: ScopeTree
    
        spans: Sequence["StatementSpan"]
    
        subscopes: Mapping[ProjectPath, Self]
    
    at: coeditor.code_change.ChangedSpan
        change: Change[str]
    
        parent_scopes: Sequence[Change[ChangeScope]]
    
        line_range: _LineRange
    
    at: coeditor.ctx_change_encoder
        CtxCodeChangeProblem(span: ChangedSpan, relevant_changes: list[ChangedSpan], relevant_unchanged: list[ChangedSpan])
    
        PyDefinition(full_name: PyFullName, import_module: ModuleName, start_pos: tuple[int, int], end_pos: tuple[int, int])
    
        LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
    
    at: coeditor.ctx_change_encoder.CtxCodeChangeProblemGenerator.post_edit_analysis
        module_order = sort_modules_by_imports(module_deps)
    
    at: coeditor.ctx_change_encoder.LineUsageAnalysis
        line2usages: Mapping[int, set[PyDefinition]]
    
    at: coeditor.ctx_change_encoder.PyDefinition
        full_name: PyFullName
    
        import_module: ModuleName
    
        start_pos: tuple[int, int]
    
        end_pos: tuple[int, int]
    
    at: parso.python.tree
        Function(children)
    
    at: spot.static_analysis
        ModuleName = str
    
        ModuleHierarchy()
    
    at: spot.static_analysis.ModuleHierarchy
        resolve_path(segs: list[str]) -> ProjectPath | None
    
    
===========unchanged ref 1===========
        from_modules(modules: Iterable[str]) -> "ModuleHierarchy"
    
    at: spot.static_analysis.ProjectPath
        module: ModuleName
    
        path: ElemPath
    
    at: typing
        Iterable = _alias(collections.abc.Iterable, 1)
    
        Mapping = _alias(collections.abc.Mapping, 2)
    
        Sequence = _alias(collections.abc.Sequence, 1)
    
    at: typing.Mapping
        get(key: _KT) -> Optional[_VT_co]
        get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    # jedi.cache.clear_time_caches = lambda: None
    
    
    @dataclass
    class CtxCodeChangeProblem:
        span: ChangedSpan
        # most relevant to least relevant
        relevant_changes: list[ChangedSpan]
        # most relevant to least relevant
  +     relevant_unchanged: list[ChangedSpan]
  -     relevant_unchanged: list[tuple[ProjectPath, str]]
    
===========changed ref 1===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        """
        A change scope is a python module, non-hidden function, or a non-hidden class, or a python module.
            - functions and classes that are inside a parent function are considered hidden.
        """
    
        path: ProjectPath
        tree: ScopeTree
        spans: Sequence["StatementSpan"]
  +     subscopes: Mapping[str, Self]
  -     subscopes: Mapping[ProjectPath, Self]
  +     parent_scope: "ChangeScope | None"
    
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    @dataclass(unsafe_hash=True)
    class PyDefinition:
  +     @staticmethod
  +     def from_name(name: classes.BaseName) -> Iterable["PyDefinition"]:
  +         cast(classes.Name, name).is_definition()
  +         if (
  +             not name.in_builtin_module()
  +             and (full_name := name.full_name)
  +             and (import_module := name.module_name)
  +             and (start_pos := name.get_definition_start_position())
  +             and (end_pos := name.get_definition_end_position())
  +         ):
  +             full_name = PyFullName(full_name)
  +             yield PyDefinition(full_name, import_module, start_pos, end_pos)
  + 
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
    @dataclass(unsafe_hash=True)
    class PyDefinition:
  -     @staticmethod
  -     def from_signatures(name: classes.BaseName) -> Iterable["PyDefinition"]:
  -         cast(classes.Name, name).is_definition()
  -         if (
  -             not name.in_builtin_module()
  -             and (full_name := name.full_name)
  -             and (import_module := name.module_name)
  -             and (start_pos := name.get_definition_start_position())
  -             and (end_pos := name.get_definition_end_position())
  -         ):
  -             full_name = PyFullName(full_name)
  -             yield PyDefinition(full_name, import_module, start_pos, end_pos)
  - 
===========changed ref 4===========
    # module: coeditor.history
    @dataclass
    class Modified(_ChangeBase[E1]):
  +     @staticmethod
  +     def from_unchanged(v: T1) -> "Modified[T1]":
  +         return Modified(v, v, unchanged=True)
  + 
===========changed ref 5===========
    # module: coeditor.history
    @dataclass
    class Modified(_ChangeBase[E1]):
        before: E1
        after: E1
  +     unchanged: bool = False
    
===========changed ref 6===========
    # module: coeditor.code_change
    @dataclass
    class JModule:
  +     @cached_property
  +     def as_scope(self) -> ChangeScope:
  +         return ChangeScope.from_tree(ProjectPath(self.mname, ""), self.tree)
  + 
===========changed ref 7===========
    # module: coeditor.code_change
    @dataclass
    class JModule:
  -     def _to_scope(self) -> ChangeScope:
  -         return ChangeScope.from_tree(ProjectPath(self.mname, ""), self.tree)
  -  | 
| 
	coeditor.ctx_change_encoder/JediUsageAnalyzer.get_line_usages | 
	Modified | 
	temp-1 | 
	d0fe36e93da8ca33161e524783335898bd47ebbc | 
	- More analysis bug fix. - Encode unchanged using CSpan. | 
	 <0>:<add>                     usages.update(PyDefinition.from_name(d))
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class JediUsageAnalyzer:
          def get_line_usages(
              self,
              script: jedi.Script,
              proj_root: Path,
              lines_to_analyze: Collection[int],
              silent: bool = False,
          ):
              jmod: tree.Module = script._module_node
              line2usages = dict[int, set[PyDefinition]]()
              all_names = [
                  name for k, names in jmod.get_used_names()._dict.items() for name in names
              ]
              all_names.sort(key=lambda x: x.start_pos)
              errors = self.error_counts
              resolve_cache = dict[_ObjId, set[PyDefinition]]()
              for name in tqdm(all_names, f"Analyzing {script.path}", disable=silent):
                  name: tree.Name
                  line = name.start_pos[0]
                  if line not in lines_to_analyze:
                      continue
                  usages = line2usages.setdefault(line, set())
                  try:
                      defs = fast_goto(
                          script,
                          name,
                          follow_imports=self.follow_imports,
                          follow_builtin_imports=False,
                      )
                      for d in defs:
    -                     usages.update(PyDefinition.from_signatures(d))
 <0>  
                  except (AttributeError, AssertionError) as e:
                      text = repr(e)
                      errors[text] = errors.setdefault(text, 0) + 1
                  except ValueError as e:
                      # if the message is "not enough values to unpack"
                      if "not enough values to unpack (expected 2" in str(e):
                          errors[repr(e)] = errors.setdefault(str(e), 0) + 1
                      else:
                          raise
              return LineUsageAnalysis(line2usages)
      
       | 
	===========unchanged ref 0===========
    at: coeditor._utils
        TimeLogger(times: dict[str, list[float]]=field(default_factory=dict))
    
    at: coeditor.ctx_change_encoder
        PyDefinition(full_name: PyFullName, import_module: ModuleName, start_pos: tuple[int, int], end_pos: tuple[int, int])
    
        _ObjId = NewType("_ObjId", int)
    
        fast_goto(script: jedi.Script, tree_name: tree.Name, *, follow_imports=False, follow_builtin_imports=False, only_stubs=False, prefer_stubs=False) -> set[classes.Name]
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder
        max_chunks_per_ref: int = 4
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder._group_encode_changed_refs
        all_chunks = list[TokenSeq]()
    
        mod_chunks = break_into_chunks(
                        file_tks,
                        lambda i: self._encode_parent_scopes(mod_change, i),
                        self.max_ref_tks,
                        overlap=self.ref_chunk_overlap,
                        max_return_chunks=self.max_chunks_per_ref,
                    )
    
    at: dataclasses
        dataclass(_cls: Type[_T]) -> Type[_T]
        dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
        dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
    
    at: jedi.api
        Script(code=None, *, path=None, environment=None, project=None)
    
    at: jedi.api.Script.__init__
        self.path = path.absolute() if path else None
    
    
===========unchanged ref 1===========
        self._module_node, code = self._inference_state.parse_and_get_code(
                    code=code,
                    path=self.path,
                    use_latest_grammar=path and path.suffix == '.pyi',
                    cache=False,  # No disk cache, because the current script often changes.
                    diff_cache=settings.fast_parser,
                    cache_path=settings.cache_directory,
                )
    
    at: parso.python.tree
        Name()
    
        Module(children)
    
    at: parso.python.tree.Module
        __slots__ = ('_used_names',)
    
        type = 'file_input'
    
        get_used_names()
    
    at: parso.python.tree.UsedNamesMapping.__init__
        self._dict = dct
    
    at: parso.tree.Leaf.__init__
        self.start_pos = start_pos
    
    at: pathlib
        Path()
    
    at: tqdm.std
        tqdm(iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, write_bytes=False, lock_args=None, nrows=None, colour=None, delay=0, gui=False, **kwargs)
    
    at: typing
        Collection = _alias(collections.abc.Collection, 1)
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
  +     def _group_encode_unchanged_refs(
  +         self, elems: Sequence[ChangedSpan]
  +     ) -> Sequence[TokenSeq]:
  +         return self._group_encode_changed_refs(elems)
  + 
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
  -     def _encode_unchanged_ref(
  -         self, path: ProjectPath, content: str
  -     ) -> Iterable[TokenSeq]:
  -         if (key := (path, content)) in self._value_cache:
  -             return self._value_cache[key]
  -         main_tks = encode_basic(f"#{str(path)}\n{content}")
  -         ref_chunks = (truncate_section(main_tks, TruncateAt.Right, self.max_ref_tks),)
  -         self._value_cache[key] = ref_chunks
  -         return ref_chunks
  - 
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
        def _encode_parent_scopes(
            self, scope_changes: Sequence[Change[ChangeScope]], offset: int
        ) -> TokenSeq:
            scope_tks = join_list((self._encode_scope_change(c) for c in scope_changes))
            if offset != 0:
  +             scope_tks.extend(encode_basic(f"# offset: {offset}\n"))
  -             ending = encode_basic(f"\n# offset: {offset}\n")
  -         else:
  -             ending = [Newline_id]
  -         scope_tks = truncate_section(
  +         scope_tks = truncate_section(scope_tks, TruncateAt.Left, self.max_scope_tks)
  -             scope_tks + ending, TruncateAt.Left, self.max_scope_tks
  -         )
            return scope_tks
    
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
    # jedi.cache.clear_time_caches = lambda: None
    
    
    @dataclass
    class CtxCodeChangeProblem:
        span: ChangedSpan
        # most relevant to least relevant
        relevant_changes: list[ChangedSpan]
        # most relevant to least relevant
  +     relevant_unchanged: list[ChangedSpan]
  -     relevant_unchanged: list[tuple[ProjectPath, str]]
    
===========changed ref 4===========
    # module: coeditor.ctx_change_encoder
    @dataclass(unsafe_hash=True)
    class PyDefinition:
  +     @staticmethod
  +     def from_name(name: classes.BaseName) -> Iterable["PyDefinition"]:
  +         cast(classes.Name, name).is_definition()
  +         if (
  +             not name.in_builtin_module()
  +             and (full_name := name.full_name)
  +             and (import_module := name.module_name)
  +             and (start_pos := name.get_definition_start_position())
  +             and (end_pos := name.get_definition_end_position())
  +         ):
  +             full_name = PyFullName(full_name)
  +             yield PyDefinition(full_name, import_module, start_pos, end_pos)
  + 
===========changed ref 5===========
    # module: coeditor.ctx_change_encoder
    @dataclass(unsafe_hash=True)
    class PyDefinition:
  -     @staticmethod
  -     def from_signatures(name: classes.BaseName) -> Iterable["PyDefinition"]:
  -         cast(classes.Name, name).is_definition()
  -         if (
  -             not name.in_builtin_module()
  -             and (full_name := name.full_name)
  -             and (import_module := name.module_name)
  -             and (start_pos := name.get_definition_start_position())
  -             and (end_pos := name.get_definition_end_position())
  -         ):
  -             full_name = PyFullName(full_name)
  -             yield PyDefinition(full_name, import_module, start_pos, end_pos)
  - 
===========changed ref 6===========
    # module: coeditor.history
    @dataclass
    class Modified(_ChangeBase[E1]):
  +     @staticmethod
  +     def from_unchanged(v: T1) -> "Modified[T1]":
  +         return Modified(v, v, unchanged=True)
  + 
===========changed ref 7===========
    # module: coeditor.history
    @dataclass
    class Modified(_ChangeBase[E1]):
        before: E1
        after: E1
  +     unchanged: bool = False
     | 
| 
	coeditor.code_change/ChangeScope.__post_init__ | 
	Modified | 
	temp-1 | 
	d58fffa6f57746cb134bd98b5de3a8e668c4311d | 
	Finish unchanged ref encoding. | 
	 <0>:<add>         self.header_line_range: LineRange = header_line_range
 | 
	      # module: coeditor.code_change
      @dataclass
      class ChangeScope:
          def __post_init__(self):
      <s> = f"# module: {self.path.module}"
    +             header_line_range = line_range(0, 0, can_be_empty=True)
    -             header_line_range = _line_range(0, 0, can_be_empty=True)
              else:
                  h_start, h_end = 0, 0
                  tree = self.tree
                  to_visit = list[NodeOrLeaf]()
                  parent = not_none(tree.parent)
                  while parent.type in ("decorated", "async_funcdef"):
                      to_visit.insert(0, parent.children[0])
                      parent = not_none(parent.parent)
                  to_visit.extend(tree.children)
                  visited = list[NodeOrLeaf]()
                  for c in to_visit:
                      if c.type == "suite":
                          break
                      visited.append(c)
                  header_code = "".join(cast(str, c.get_code()) for c in visited)
                  header_code, _, e_right = _strip_empty_lines(header_code)
                  h_start = visited[0].start_pos[0]
                  h_end = visited[-1].end_pos[0] + 1 - e_right
                  assert_eq(count_lines(header_code) == h_end - h_start)
    +             header_line_range = line_range(h_start, h_end)
    -             header_line_range = _line_range(h_start, h_end)
                  if self.spans and h_end > self.spans[0].line_range[0]:
                      raise ValueError(
                          f"Header covers the fisrt span: {self.path=}, {h_start=}, {h_end=} "
                          f"{self.spans[0].line_range=}"
                      )
      
              self.header_code: str = header_code + "\n"
    -         self.header_line_range: _LineRange = header_line_range
 <0>  
       | 
	===========above chunk 0===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        def __post_init__(self):
    # offset: -1
            # compute header
            if isinstance(self.tree, ptree.Module):
                header_code = f"# module: {self.path.module}"
  +             header_line_range = line_range(0, 0,</s>
===========unchanged ref 0===========
    at: coeditor._utils
        not_none(x: Optional[T1]) -> T1
    
        assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
    
    at: coeditor.code_change
        LineRange = NewType("LineRange", tuple[int, int])
    
        line_range(start: int, end: int, can_be_empty: bool=False) -> LineRange
    
        _strip_empty_lines(s: str)
    
    at: coeditor.code_change.ChangeScope
        path: ProjectPath
    
        tree: ScopeTree
    
        spans: Sequence["StatementSpan"]
    
        subscopes: Mapping[str, Self]
    
        parent_scope: "ChangeScope | None"
    
    at: coeditor.code_change.StatementSpan.__post_init__
        self.line_range: LineRange = line_range(start, end)
    
    at: coeditor.common
        count_lines(text: str) -> int
    
    at: parso.python.tree
        Module(children)
    
    at: parso.tree
        NodeOrLeaf()
    
    at: parso.tree.BaseNode.__init__
        self.children = children
    
        self.parent: Optional[BaseNode] = None
    
    at: parso.tree.NodeOrLeaf
        __slots__ = ('parent',)
    
        type: str
    
        parent: 'Optional[BaseNode]'
    
        get_code(include_prefix=True)
    
    at: spot.static_analysis.ProjectPath
        module: ModuleName
    
        path: ElemPath
    
    at: typing
        cast(typ: Type[_T], val: Any) -> _T
        cast(typ: str, val: Any) -> Any
        cast(typ: object, val: Any) -> Any
    
    
===========changed ref 0===========
    # module: coeditor.code_change
  - def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  -     if not can_be_empty and start >= end:
  -         raise ValueError(f"Bad line range: {start=}, {end=}")
  -     return _LineRange((start, end))
  - 
===========changed ref 1===========
    # module: coeditor.code_change
  - def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  -     if not can_be_empty and start >= end:
  -         raise ValueError(f"Bad line range: {start=}, {end=}")
  -     return _LineRange((start, end))
  - 
===========changed ref 2===========
    # module: coeditor.code_change
    ScopeTree = ptree.Function | ptree.Class | ptree.Module
    PyNode = ptree.PythonBaseNode | ptree.PythonNode
  + LineRange = NewType("LineRange", tuple[int, int])
  - _LineRange = NewType("LineRange", tuple[int, int])
    
    _tlogger = TimeLogger()
    
===========changed ref 3===========
    # module: tests.coeditor.test_edits
    class TestChangeIdentities:
        cases = {
            "empty": Modified("", ""),
            "generation": Modified("", "123"),
            "no change": Modified(
                dedent(
                    """\
                    def f1():
                        x = 1
                    """
                ),
                dedent(
                    """\
                    def f1():
                        x = 1
                    """
                ),
            ),
  +         "unchanged=True": Modified.from_unchanged(
  +             dedent(
  +                 """\
  +                 def f1():
  +                     x = 1
  +                 """
  +             ),
  +         ),
  +         # this test case cannot pass for some reason. Tokenizer bug?
  +         # "leading_whitespace": Modified.from_unchanged("    ..."),
            "replace last": Modified(
                dedent(
                    """\
                    def f1():
                        x = 1"""
                ),
                dedent(
                    """\
                    def f1():
                        x = 2
                        return x * 2"""
                ),
            ),
            "no special tokens": Modified(
                dedent(
                    """\
                    def f1():
                        x = 1
                        y = 2
                        z = x + y
                        return z
    
                    def f2():
                        f1()"""
                ),
                dedent(
                    """\
                    # new comment
                    def f_new():
                        x = 1
                        if x > 0:
                            y = 2 * x
                        y *= 2
                        z = x + y
                        return z
    
                    def f2():
                        f1()
                        return f_new() + a
                    
                    new_var = 0
                    """
                ),
            ),
            "with special tokens": Modified(
                dedent(
                    """\
                    def f1():
                        x = "<add>"
                        y = "<del>\tx"
                        return x + y
    
                    """
                ),
                dedent(
                    """\
                    # new comment 1
                    # new comment 2
                    def f1():
                        if newcond:
                            x = "<add</s>
===========changed ref 4===========
    # module: tests.coeditor.test_edits
    class TestChangeIdentities:
    # offset: 1
    <s>
                    # new comment 1
                    # new comment 2
                    def f1():
                        if newcond:
                            x = "<add>"
                        new_var = 5
                        y = "<del>"
                        return x + new_var + y
                    """
                ),
            ),
            "super long": Modified(
                "\n".join(f"x = {i}" for i in range(0, 200)),
                "\n".join(f"x = {2* (i // 2)}" for i in range(0, 200)),
            ),
        }
     | 
| 
	coeditor.code_change/ChangeScope.from_tree | 
	Modified | 
	temp-1 | 
	d58fffa6f57746cb134bd98b5de3a8e668c4311d | 
	Finish unchanged ref encoding. | 
	 <0>:<add>             spans.append(StatementSpan(len(spans), current_stmts, scope))
 | 
	      # module: coeditor.code_change
      @dataclass
      class ChangeScope:
          @staticmethod
          def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
              spans = []
              subscopes = dict()
              scope = ChangeScope(path, tree, spans, subscopes, None)
              assert isinstance(tree, ScopeTree)
              is_func = isinstance(tree, ptree.Function)
      
              current_stmts = []
              content = (
                  tree.children
                  if isinstance(tree, ptree.Module)
                  else cast(ptree.PythonNode, tree.get_suite()).children
              )
              for s in content:
                  # we don't create inner scopes for function contents
                  if is_func or _is_scope_statement(as_any(s)):
                      current_stmts.append(s)
                  else:
                      if current_stmts:
    +                     spans.append(StatementSpan(len(spans), current_stmts, scope))
    -                     spans.append(StatementSpan(current_stmts, scope))
                          current_stmts = []
              if current_stmts:
    -             spans.append(StatementSpan(current_stmts, scope))
 <0>  
              if is_func:
                  # we don't create inner scopes for function contents
                  return scope
              for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
                  stree: ptree.Function | ptree.Class
                  name = cast(ptree.Name, stree.name).value
                  spath = path.append(name)
                  subscope = ChangeScope.from_tree(spath, stree)
                  subscope.parent_scope = scope
                  subscopes[name] = subscope
              return scope
      
       | 
	===========unchanged ref 0===========
    at: coeditor._utils
        as_any(x) -> Any
    
    at: coeditor.code_change
        ScopeTree = ptree.Function | ptree.Class | ptree.Module
    
        ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None")
    
        _is_scope_statement(stmt: PyNode) -> bool
    
        StatementSpan(nth_in_parent: int, statements: Sequence[PyNode], scope: ChangeScope)
    
    at: coeditor.code_change.ChangeScope
        parent_scope: "ChangeScope | None"
    
    at: parso.python.tree
        PythonNode()
    
        Name()
    
        Module(children)
    
        Class(children)
    
        Function(children)
    
    at: parso.python.tree.Class
        type = 'classdef'
    
        __slots__ = ()
    
    at: parso.python.tree.ClassOrFunc
        __slots__ = ()
    
    at: parso.python.tree.Function
        type = 'funcdef'
    
    at: parso.python.tree.Scope
        __slots__ = ()
    
        _search_in_scope(*names)
    
        get_suite()
    
    at: parso.tree.BaseNode.__init__
        self.children = children
    
    at: parso.tree.Leaf.__init__
        self.value = value
    
    at: spot.static_analysis
        ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
    
    at: spot.static_analysis.ProjectPath
        append(path: ElemPath) -> "ProjectPath"
    
    at: typing
        cast(typ: Type[_T], val: Any) -> _T
        cast(typ: str, val: Any) -> Any
        cast(typ: object, val: Any) -> Any
    
    
===========changed ref 0===========
    # module: coeditor.code_change
  - def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  -     if not can_be_empty and start >= end:
  -         raise ValueError(f"Bad line range: {start=}, {end=}")
  -     return _LineRange((start, end))
  - 
===========changed ref 1===========
    # module: coeditor.code_change
  - def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  -     if not can_be_empty and start >= end:
  -         raise ValueError(f"Bad line range: {start=}, {end=}")
  -     return _LineRange((start, end))
  - 
===========changed ref 2===========
    # module: coeditor.code_change
    ScopeTree = ptree.Function | ptree.Class | ptree.Module
    PyNode = ptree.PythonBaseNode | ptree.PythonNode
  + LineRange = NewType("LineRange", tuple[int, int])
  - _LineRange = NewType("LineRange", tuple[int, int])
    
    _tlogger = TimeLogger()
    
===========changed ref 3===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        def __post_init__(self):
            # compute header
            if isinstance(self.tree, ptree.Module):
                header_code = f"# module: {self.path.module}"
  +             header_line_range = line_range(0, 0, can_be_empty=True)
  -             header_line_range = _line_range(0, 0, can_be_empty=True)
            else:
                h_start, h_end = 0, 0
                tree = self.tree
                to_visit = list[NodeOrLeaf]()
                parent = not_none(tree.parent)
                while parent.type in ("decorated", "async_funcdef"):
                    to_visit.insert(0, parent.children[0])
                    parent = not_none(parent.parent)
                to_visit.extend(tree.children)
                visited = list[NodeOrLeaf]()
                for c in to_visit:
                    if c.type == "suite":
                        break
                    visited.append(c)
                header_code = "".join(cast(str, c.get_code()) for c in visited)
                header_code, _, e_right = _strip_empty_lines(header_code)
                h_start = visited[0].start_pos[0]
                h_end = visited[-1].end_pos[0] + 1 - e_right
                assert_eq(count_lines(header_code) == h_end - h_start)
  +             header_line_range = line_range(h_start, h_end)
  -             header_line_range = _line_range(h_start, h_end)
                if self.spans and h_end > self.spans[0].line_range[0]:
                    raise ValueError(
                        f"Header covers the fisrt span: {self.path=}, {h_start=}, {h_end=} "
                        f"{self.spans[0].line_range=}"
                    )
    
            self.header_code: str = header_code + "\n"</s>
===========changed ref 4===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        def __post_init__(self):
    # offset: 1
    <s>spans[0].line_range=}"
                    )
    
            self.header_code: str = header_code + "\n"
  +         self.header_line_range: LineRange = header_line_range
  -         self.header_line_range: _LineRange = header_line_range
    
===========changed ref 5===========
    # module: tests.coeditor.test_edits
    class TestChangeIdentities:
        cases = {
            "empty": Modified("", ""),
            "generation": Modified("", "123"),
            "no change": Modified(
                dedent(
                    """\
                    def f1():
                        x = 1
                    """
                ),
                dedent(
                    """\
                    def f1():
                        x = 1
                    """
                ),
            ),
  +         "unchanged=True": Modified.from_unchanged(
  +             dedent(
  +                 """\
  +                 def f1():
  +                     x = 1
  +                 """
  +             ),
  +         ),
  +         # this test case cannot pass for some reason. Tokenizer bug?
  +         # "leading_whitespace": Modified.from_unchanged("    ..."),
            "replace last": Modified(
                dedent(
                    """\
                    def f1():
                        x = 1"""
                ),
                dedent(
                    """\
                    def f1():
                        x = 2
                        return x * 2"""
                ),
            ),
            "no special tokens": Modified(
                dedent(
                    """\
                    def f1():
                        x = 1
                        y = 2
                        z = x + y
                        return z
    
                    def f2():
                        f1()"""
                ),
                dedent(
                    """\
                    # new comment
                    def f_new():
                        x = 1
                        if x > 0:
                            y = 2 * x
                        y *= 2
                        z = x + y
                        return z
    
                    def f2():
                        f1()
                        return f_new() + a
                    
                    new_var = 0
                    """
                ),
            ),
            "with special tokens": Modified(
                dedent(
                    """\
                    def f1():
                        x = "<add>"
                        y = "<del>\tx"
                        return x + y
    
                    """
                ),
                dedent(
                    """\
                    # new comment 1
                    # new comment 2
                    def f1():
                        if newcond:
                            x = "<add</s> | 
| 
	coeditor.code_change/StatementSpan.__post_init__ | 
	Modified | 
	temp-1 | 
	d58fffa6f57746cb134bd98b5de3a8e668c4311d | 
	Finish unchanged ref encoding. | 
	 <0>:<add>             self.line_range: LineRange = line_range(start, end)
 | 
	      # module: coeditor.code_change
      @dataclass
      class StatementSpan:
          def __post_init__(self):
              assert self.statements
    +         # remove leading newlines
    +         n_leading_newlines = 0
    +         stmts = self.statements
    +         for s in stmts:
    +             if s.type == ptree.Newline.type:
    +                 n_leading_newlines += 1
    +             else:
    +                 break
    +         if n_leading_newlines:
    +             self.statements = stmts[n_leading_newlines:]
    + 
              origin_code = "".join(s.get_code() for s in self.statements)
              code, _, e_right = _strip_empty_lines(origin_code)
              start = self.statements[0].start_pos[0]
              end = self.statements[-1].end_pos[0] + 1 - e_right
      
              self.code: str = code + "\n"
              try:
    -             self.line_range: _LineRange = _line_range(start, end)
 <0>          except ValueError:
                  print_err(f"{origin_code=}, {e_right=}, {start=}, {end=}")
                  raise
      
       | 
	===========unchanged ref 0===========
    at: coeditor.code_change.StatementSpan
        nth_in_parent: int
    
        statements: Sequence[PyNode]
    
        scope: ChangeScope
    
    at: parso.python.tree
        Newline()
    
    at: parso.python.tree.Newline
        __slots__ = ()
    
        type = 'newline'
    
    
===========changed ref 0===========
    # module: coeditor.code_change
    @dataclass
    class StatementSpan:
        """
        A statement span is a set of lines inside the same change scope. It is the basic unit of code changes handled by our model.
            - For a modified function, the span is the function itself.
            - For a modified module, the spans are the regions between the functions and classes plus
            the spans recursively generated.
            - For a modified class, the spans are the regions between methods plus
            the spans recursively generated.
        """
    
  +     nth_in_parent: int
        statements: Sequence[PyNode]
        scope: ChangeScope
    
===========changed ref 1===========
    # module: coeditor.code_change
  - def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  -     if not can_be_empty and start >= end:
  -         raise ValueError(f"Bad line range: {start=}, {end=}")
  -     return _LineRange((start, end))
  - 
===========changed ref 2===========
    # module: coeditor.code_change
  - def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  -     if not can_be_empty and start >= end:
  -         raise ValueError(f"Bad line range: {start=}, {end=}")
  -     return _LineRange((start, end))
  - 
===========changed ref 3===========
    # module: coeditor.code_change
    ScopeTree = ptree.Function | ptree.Class | ptree.Module
    PyNode = ptree.PythonBaseNode | ptree.PythonNode
  + LineRange = NewType("LineRange", tuple[int, int])
  - _LineRange = NewType("LineRange", tuple[int, int])
    
    _tlogger = TimeLogger()
    
===========changed ref 4===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @staticmethod
        def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
            spans = []
            subscopes = dict()
            scope = ChangeScope(path, tree, spans, subscopes, None)
            assert isinstance(tree, ScopeTree)
            is_func = isinstance(tree, ptree.Function)
    
            current_stmts = []
            content = (
                tree.children
                if isinstance(tree, ptree.Module)
                else cast(ptree.PythonNode, tree.get_suite()).children
            )
            for s in content:
                # we don't create inner scopes for function contents
                if is_func or _is_scope_statement(as_any(s)):
                    current_stmts.append(s)
                else:
                    if current_stmts:
  +                     spans.append(StatementSpan(len(spans), current_stmts, scope))
  -                     spans.append(StatementSpan(current_stmts, scope))
                        current_stmts = []
            if current_stmts:
  +             spans.append(StatementSpan(len(spans), current_stmts, scope))
  -             spans.append(StatementSpan(current_stmts, scope))
    
            if is_func:
                # we don't create inner scopes for function contents
                return scope
            for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
                stree: ptree.Function | ptree.Class
                name = cast(ptree.Name, stree.name).value
                spath = path.append(name)
                subscope = ChangeScope.from_tree(spath, stree)
                subscope.parent_scope = scope
                subscopes[name] = subscope
            return scope
    
===========changed ref 5===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        def __post_init__(self):
            # compute header
            if isinstance(self.tree, ptree.Module):
                header_code = f"# module: {self.path.module}"
  +             header_line_range = line_range(0, 0, can_be_empty=True)
  -             header_line_range = _line_range(0, 0, can_be_empty=True)
            else:
                h_start, h_end = 0, 0
                tree = self.tree
                to_visit = list[NodeOrLeaf]()
                parent = not_none(tree.parent)
                while parent.type in ("decorated", "async_funcdef"):
                    to_visit.insert(0, parent.children[0])
                    parent = not_none(parent.parent)
                to_visit.extend(tree.children)
                visited = list[NodeOrLeaf]()
                for c in to_visit:
                    if c.type == "suite":
                        break
                    visited.append(c)
                header_code = "".join(cast(str, c.get_code()) for c in visited)
                header_code, _, e_right = _strip_empty_lines(header_code)
                h_start = visited[0].start_pos[0]
                h_end = visited[-1].end_pos[0] + 1 - e_right
                assert_eq(count_lines(header_code) == h_end - h_start)
  +             header_line_range = line_range(h_start, h_end)
  -             header_line_range = _line_range(h_start, h_end)
                if self.spans and h_end > self.spans[0].line_range[0]:
                    raise ValueError(
                        f"Header covers the fisrt span: {self.path=}, {h_start=}, {h_end=} "
                        f"{self.spans[0].line_range=}"
                    )
    
            self.header_code: str = header_code + "\n"</s>
===========changed ref 6===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        def __post_init__(self):
    # offset: 1
    <s>spans[0].line_range=}"
                    )
    
            self.header_code: str = header_code + "\n"
  +         self.header_line_range: LineRange = header_line_range
  -         self.header_line_range: _LineRange = header_line_range
     | 
| 
	coeditor.ctx_change_encoder/PyDefinition.from_name | 
	Modified | 
	temp-1 | 
	d58fffa6f57746cb134bd98b5de3a8e668c4311d | 
	Finish unchanged ref encoding. | 
	 <0>:<add>                 raise ValueError(f"Inconsistent module: {full_name=}, {import_module=}")
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass(unsafe_hash=True)
      class PyDefinition:
          @staticmethod
          def from_name(name: classes.BaseName) -> Iterable["PyDefinition"]:
    -         cast(classes.Name, name).is_definition()
              if (
                  not name.in_builtin_module()
                  and (full_name := name.full_name)
                  and (import_module := name.module_name)
                  and (start_pos := name.get_definition_start_position())
                  and (end_pos := name.get_definition_end_position())
              ):
                  full_name = PyFullName(full_name)
    +             if not full_name.startswith(import_module):
 <0>              yield PyDefinition(full_name, import_module, start_pos, end_pos)
      
       | 
	===========unchanged ref 0===========
    at: coeditor.ctx_change_encoder
        PyFullName = NewType("PyFullName", str)
    
    at: coeditor.ctx_change_encoder.PyDefinition
        full_name: PyFullName
    
        import_module: ModuleName
    
        start_pos: tuple[int, int]
    
        end_pos: tuple[int, int]
    
    at: jedi.api.classes
        BaseName(inference_state, name)
    
    at: jedi.api.classes.BaseName
        _mapping = {
                'posixpath': 'os.path',
                'riscospath': 'os.path',
                'ntpath': 'os.path',
                'os2emxpath': 'os.path',
                'macpath': 'os.path',
                'genericpath': 'os.path',
                'posix': 'os',
                '_io': 'io',
                '_functools': 'functools',
                '_collections': 'collections',
                '_socket': 'socket',
                '_sqlite3': 'sqlite3',
            }
    
        _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
                'argparse._ActionsContainer': 'argparse.ArgumentParser',
            }.items())
    
        in_builtin_module()
    
        get_definition_start_position()
    
        get_definition_end_position()
    
    at: typing
        Iterable = _alias(collections.abc.Iterable, 1)
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
  + PyFullName = NewType("PyFullName", str)
  - PyFullName = NewType("PyPathStr", str)
    
===========changed ref 1===========
    # module: coeditor.code_change
  - def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  -     if not can_be_empty and start >= end:
  -         raise ValueError(f"Bad line range: {start=}, {end=}")
  -     return _LineRange((start, end))
  - 
===========changed ref 2===========
    # module: coeditor.code_change
  - def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  -     if not can_be_empty and start >= end:
  -         raise ValueError(f"Bad line range: {start=}, {end=}")
  -     return _LineRange((start, end))
  - 
===========changed ref 3===========
    # module: coeditor.code_change
    @dataclass
    class ChangedSpan:
        "Represents the changes made to a statement span."
        change: Change[str]
        parent_scopes: Sequence[Change[ChangeScope]]
  +     line_range: LineRange
  -     line_range: _LineRange
    
===========changed ref 4===========
    # module: coeditor.code_change
    ScopeTree = ptree.Function | ptree.Class | ptree.Module
    PyNode = ptree.PythonBaseNode | ptree.PythonNode
  + LineRange = NewType("LineRange", tuple[int, int])
  - _LineRange = NewType("LineRange", tuple[int, int])
    
    _tlogger = TimeLogger()
    
===========changed ref 5===========
    # module: coeditor.code_change
    @dataclass
    class StatementSpan:
        """
        A statement span is a set of lines inside the same change scope. It is the basic unit of code changes handled by our model.
            - For a modified function, the span is the function itself.
            - For a modified module, the spans are the regions between the functions and classes plus
            the spans recursively generated.
            - For a modified class, the spans are the regions between methods plus
            the spans recursively generated.
        """
    
  +     nth_in_parent: int
        statements: Sequence[PyNode]
        scope: ChangeScope
    
===========changed ref 6===========
    # module: coeditor.code_change
    @dataclass
    class StatementSpan:
        def __post_init__(self):
            assert self.statements
  +         # remove leading newlines
  +         n_leading_newlines = 0
  +         stmts = self.statements
  +         for s in stmts:
  +             if s.type == ptree.Newline.type:
  +                 n_leading_newlines += 1
  +             else:
  +                 break
  +         if n_leading_newlines:
  +             self.statements = stmts[n_leading_newlines:]
  + 
            origin_code = "".join(s.get_code() for s in self.statements)
            code, _, e_right = _strip_empty_lines(origin_code)
            start = self.statements[0].start_pos[0]
            end = self.statements[-1].end_pos[0] + 1 - e_right
    
            self.code: str = code + "\n"
            try:
  +             self.line_range: LineRange = line_range(start, end)
  -             self.line_range: _LineRange = _line_range(start, end)
            except ValueError:
                print_err(f"{origin_code=}, {e_right=}, {start=}, {end=}")
                raise
    
===========changed ref 7===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @staticmethod
        def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
            spans = []
            subscopes = dict()
            scope = ChangeScope(path, tree, spans, subscopes, None)
            assert isinstance(tree, ScopeTree)
            is_func = isinstance(tree, ptree.Function)
    
            current_stmts = []
            content = (
                tree.children
                if isinstance(tree, ptree.Module)
                else cast(ptree.PythonNode, tree.get_suite()).children
            )
            for s in content:
                # we don't create inner scopes for function contents
                if is_func or _is_scope_statement(as_any(s)):
                    current_stmts.append(s)
                else:
                    if current_stmts:
  +                     spans.append(StatementSpan(len(spans), current_stmts, scope))
  -                     spans.append(StatementSpan(current_stmts, scope))
                        current_stmts = []
            if current_stmts:
  +             spans.append(StatementSpan(len(spans), current_stmts, scope))
  -             spans.append(StatementSpan(current_stmts, scope))
    
            if is_func:
                # we don't create inner scopes for function contents
                return scope
            for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
                stree: ptree.Function | ptree.Class
                name = cast(ptree.Name, stree.name).value
                spath = path.append(name)
                subscope = ChangeScope.from_tree(spath, stree)
                subscope.parent_scope = scope
                subscopes[name] = subscope
            return scope
     | 
| 
	coeditor.ctx_change_encoder/TkCtxCodeChangeEncoder.encode_problem | 
	Modified | 
	temp-1 | 
	d58fffa6f57746cb134bd98b5de3a8e668c4311d | 
	Finish unchanged ref encoding. | 
	 <0>:<add>             named_references.append((f"changed ref {i}", chunk))
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class TkCtxCodeChangeEncoder:
          def encode_problem(
              self,
              problem: CtxCodeChangeProblem,
          ) -> Iterable[TkCtxCodeChangeProblem]:
              span = problem.span
              named_references = list[tuple[str, TokenSeq]]()
              # compute the references that are relevant to this span
    -         # FIXME
    +         relevant_chunks = self._group_encode_changed_refs(problem.relevant_changes)
    -         # relevant_chunks = self._group_encode_changed_refs(problem.relevant_changes)
    +         for i, chunk in enumerate(relevant_chunks):
    -         # for i, chunk in enumerate(relevant_chunks):
    -         #     named_references.append((f"changed ref {i}", chunk))
 <0>          relevant_chunks = self._group_encode_unchanged_refs(problem.relevant_unchanged)
              for i, chunk in enumerate(relevant_chunks):
                  named_references.append((f"unchanged ref {i}", chunk))
      
              diffs = change_to_line_diffs(span.change)
              original, delta = line_diffs_to_original_delta(diffs)
              origin_lines = split_list(encode_basic(original), Newline_id)
              tk_delta = delta.to_tk_delta()
              chunk_id = 0
              chunk_start_l = 0
              scope_tks = self._encode_parent_scopes(span.parent_scopes, 0)
              chunk_input = TokenSeq()
              input_limit = self.max_query_tks - len(scope_tks)
              chunk_lines = 0
              chunk_output = TokenSeq()
              prev_change_tks = TokenSeq()
      
              def get_problem(chunk_input, chunk_output):
                  # try move some prev_change_tks into the input
                  above_tks = prev_change_tks
                  below_tks = join_list(origin_lines[l:], Newline_id)
                  chunk_input, above_tks, below_tks = self._inline_some_</s> | 
	===========below chunk 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
        def encode_problem(
            self,
            problem: CtxCodeChangeProblem,
        ) -> Iterable[TkCtxCodeChangeProblem]:
    # offset: 1
    <s>l:], Newline_id)
                chunk_input, above_tks, below_tks = self._inline_some_context(
                    chunk_input, above_tks, below_tks, input_limit
                )
    
                # limit the input size if it's too long (can happen for later chunks)
                chunk_input = truncate_section(chunk_input, TruncateAt.Right, input_limit)
                chunk_output = truncate_output_tks(chunk_input, chunk_output)
                chunk_output = truncate_section(
                    chunk_output, TruncateAt.Right, self.max_output_tks, add_bos=False
                )
    
                above_chunks = break_into_chunks(
                    above_tks,
                    lambda i: self._encode_parent_scopes(span.parent_scopes, -1 - i),
                    chunk_size=self.max_ref_tks,
                    overlap=self.ref_chunk_overlap,
                    right_to_left=True,
                )
                if finished:
                    below_chunks = []
                else:
                    below_chunks = break_into_chunks(
                        below_tks,
                        lambda i: self._encode_parent_scopes(span.parent_scopes, i + 1),
                        chunk_size=self.max_ref_tks,
                        overlap=self.ref_chunk_overlap,
                    )
                above_chunks = [
                    (f"above chunk {i}", chunk) for i, chunk in enumerate(above_chunks)
                ]
                below_chunks = [
                    (f"below chunk {i}", chunk) for i, chunk in enumerate(below_chunks)
                ]
                return TkCtxCodeChangeProblem(
                    scope_tks + chunk_input,
                    chunk_output,
                    path=span.parent_scopes[-1].earlier().path,
                    change_type=</s>
===========below chunk 1===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
        def encode_problem(
            self,
            problem: CtxCodeChangeProblem,
        ) -> Iterable[TkCtxCodeChangeProblem]:
    # offset: 2
    <s>,
                    chunk_output,
                    path=span.parent_scopes[-1].earlier().path,
                    change_type=span.change.map(lambda _: None),
                    named_references=above_chunks + below_chunks + named_references,
                )
    
            for l in range(len(tk_delta.deltas) + 1):
                finished = l == len(tk_delta.deltas)
                input_growth = len(origin_lines[l]) + 2 if l < len(origin_lines) else 1
                if (
                    finished
                    or chunk_lines >= self.max_lines_to_edit
                    or len(chunk_input) + input_growth > input_limit
                ):
                    if has_change(chunk_output):
                        yield get_problem(chunk_input, chunk_output)
    
                    if finished:
                        break
    
                    chunk_main_input = join_list(origin_lines[chunk_start_l:l], Newline_id)
                    chunk_main_delta = tk_delta.for_input_range((chunk_start_l, l))
                    chunk_main_change = chunk_main_delta.to_change_tks(chunk_main_input)
                    prev_change_tks.extend(chunk_main_change)
                    prev_change_tks.append(Newline_id)
                    chunk_id += 1
                    chunk_input = TokenSeq()
                    chunk_lines = 0
                    chunk_output = TokenSeq()
                    chunk_start_l = l
    
                chunk_input.append(get_extra_id(chunk_lines))
                if l < len(origin_lines):
                    chunk_input.extend(origin_lines[l])
                    chunk_input.append(</s>
===========below chunk 2===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
        def encode_problem(
            self,
            problem: CtxCodeChangeProblem,
        ) -> Iterable[TkCtxCodeChangeProblem]:
    # offset: 3
    <s>id)
                line_change = join_list(tk_delta.deltas[l], Newline_id)
                chunk_output.append(get_extra_id(chunk_lines))
                chunk_output.extend(line_change)
                if line_change and line_change[-1] != Del_id:
                    chunk_output.append(Newline_id)
                chunk_lines += 1
    
    
===========unchanged ref 0===========
    at: cachetools
        FIFOCache(maxsize, getsizeof=None)
    
    at: coeditor.code_change.ChangedSpan
        change: Change[str]
    
        parent_scopes: Sequence[Change[ChangeScope]]
    
        line_range: _LineRange
    
    at: coeditor.common
        TokenSeq = list[Token]
    
        split_list(lst: list[T1], sep: T1) -> list[list[T1]]
    
        join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
    
    at: coeditor.ctx_change_encoder
        CtxCodeChangeProblem(span: ChangedSpan, relevant_changes: list[ChangedSpan], relevant_unchanged: list[ChangedSpan])
    
        TkCtxCodeChangeProblem(input_tks: TokenSeq, output_tks: TokenSeq, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TokenSeq]])
    
    at: coeditor.ctx_change_encoder.CtxCodeChangeProblem
        span: ChangedSpan
    
        relevant_changes: list[ChangedSpan]
    
        relevant_unchanged: list[ChangedSpan]
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder
        _encode_parent_scopes(scope_changes: Sequence[Change[ChangeScope]], offset: int) -> TokenSeq
    
        _inline_some_context(input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq]
    
        _group_encode_unchanged_refs(elems: Sequence[ChangedSpan]) -> Sequence[TokenSeq]
    
        _group_encode_changed_refs(changes: Sequence[ChangedSpan]) -> Sequence[TokenSeq]
        _group_encode_changed_refs(self, changes: Sequence[ChangedSpan]) -> Sequence[TokenSeq]
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder.encode_problem
        finished = l == len(tk_delta.deltas)
    
     | 
| 
	coeditor.ctx_change_encoder/JediUsageAnalyzer.get_line_usages | 
	Modified | 
	temp-1 | 
	d58fffa6f57746cb134bd98b5de3a8e668c4311d | 
	Finish unchanged ref encoding. | 
	 <0>:<add>                     follow_imports=True,
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class JediUsageAnalyzer:
          def get_line_usages(
              self,
              script: jedi.Script,
              proj_root: Path,
              lines_to_analyze: Collection[int],
              silent: bool = False,
          ):
              jmod: tree.Module = script._module_node
              line2usages = dict[int, set[PyDefinition]]()
              all_names = [
                  name for k, names in jmod.get_used_names()._dict.items() for name in names
              ]
              all_names.sort(key=lambda x: x.start_pos)
              errors = self.error_counts
    -         resolve_cache = dict[_ObjId, set[PyDefinition]]()
              for name in tqdm(all_names, f"Analyzing {script.path}", disable=silent):
                  name: tree.Name
                  line = name.start_pos[0]
                  if line not in lines_to_analyze:
                      continue
                  usages = line2usages.setdefault(line, set())
                  try:
                      defs = fast_goto(
                          script,
                          name,
    -                     follow_imports=self.follow_imports,
 <0>                      follow_builtin_imports=False,
                      )
                      for d in defs:
                          usages.update(PyDefinition.from_name(d))
      
                  except (AttributeError, AssertionError) as e:
                      text = repr(e)
                      errors[text] = errors.setdefault(text, 0) + 1
                  except ValueError as e:
                      # if the message is "not enough values to unpack"
                      if "not enough values to unpack (expected 2" in str(e):
                          errors[repr(e)] = errors.setdefault(str(e), 0) + 1
                      else:
                          raise
              return LineUsageAnalysis(line2usages)
      
       | 
	===========unchanged ref 0===========
    at: coeditor._utils
        groupby(iterable: Iterable[T1], keyfunc: Callable[[T1], T2]) -> dict[T2, list[T1]]
    
    at: coeditor.code_change
        ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: _LineRange)
    
    at: coeditor.code_change.ChangedSpan
        parent_scopes: Sequence[Change[ChangeScope]]
    
    at: coeditor.common
        TokenSeq = list[Token]
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder
        max_ref_tks: int = 512
    
        ref_chunk_overlap: int = 32
    
        max_chunks_per_ref: int = 4
    
        _encode_parent_scopes(scope_changes: Sequence[Change[ChangeScope]], offset: int) -> TokenSeq
    
        _encode_change(change: Change[str]) -> TokenSeq
    
    at: coeditor.encoding
        Newline_id = get_tk_id("\n")
    
        break_into_chunks(tks: TokenSeq, header_f: Callable[[int], TokenSeq], chunk_size: int, overlap: int, right_to_left: bool=False, add_bos: bool=True, max_return_chunks: int | None=None) -> list[TokenSeq]
    
    at: dataclasses
        dataclass(_cls: Type[_T]) -> Type[_T]
        dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
        dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.code_change
    @dataclass
    class ChangedSpan:
        "Represents the changes made to a statement span."
        change: Change[str]
        parent_scopes: Sequence[Change[ChangeScope]]
  +     line_range: LineRange
  -     line_range: _LineRange
    
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class JediUsageAnalyzer:
  -     follow_imports: bool = True
  - 
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
  + PyFullName = NewType("PyFullName", str)
  - PyFullName = NewType("PyPathStr", str)
    
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
    @dataclass(unsafe_hash=True)
    class PyDefinition:
        @staticmethod
        def from_name(name: classes.BaseName) -> Iterable["PyDefinition"]:
  -         cast(classes.Name, name).is_definition()
            if (
                not name.in_builtin_module()
                and (full_name := name.full_name)
                and (import_module := name.module_name)
                and (start_pos := name.get_definition_start_position())
                and (end_pos := name.get_definition_end_position())
            ):
                full_name = PyFullName(full_name)
  +             if not full_name.startswith(import_module):
  +                 raise ValueError(f"Inconsistent module: {full_name=}, {import_module=}")
                yield PyDefinition(full_name, import_module, start_pos, end_pos)
    
===========changed ref 4===========
    # module: coeditor.code_change
  - def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  -     if not can_be_empty and start >= end:
  -         raise ValueError(f"Bad line range: {start=}, {end=}")
  -     return _LineRange((start, end))
  - 
===========changed ref 5===========
    # module: coeditor.code_change
  - def _line_range(start: int, end: int, can_be_empty: bool = False) -> _LineRange:
  -     if not can_be_empty and start >= end:
  -         raise ValueError(f"Bad line range: {start=}, {end=}")
  -     return _LineRange((start, end))
  - 
===========changed ref 6===========
    # module: coeditor.code_change
    ScopeTree = ptree.Function | ptree.Class | ptree.Module
    PyNode = ptree.PythonBaseNode | ptree.PythonNode
  + LineRange = NewType("LineRange", tuple[int, int])
  - _LineRange = NewType("LineRange", tuple[int, int])
    
    _tlogger = TimeLogger()
    
===========changed ref 7===========
    # module: coeditor.code_change
    @dataclass
    class StatementSpan:
        """
        A statement span is a set of lines inside the same change scope. It is the basic unit of code changes handled by our model.
            - For a modified function, the span is the function itself.
            - For a modified module, the spans are the regions between the functions and classes plus
            the spans recursively generated.
            - For a modified class, the spans are the regions between methods plus
            the spans recursively generated.
        """
    
  +     nth_in_parent: int
        statements: Sequence[PyNode]
        scope: ChangeScope
    
===========changed ref 8===========
    # module: coeditor.code_change
    @dataclass
    class StatementSpan:
        def __post_init__(self):
            assert self.statements
  +         # remove leading newlines
  +         n_leading_newlines = 0
  +         stmts = self.statements
  +         for s in stmts:
  +             if s.type == ptree.Newline.type:
  +                 n_leading_newlines += 1
  +             else:
  +                 break
  +         if n_leading_newlines:
  +             self.statements = stmts[n_leading_newlines:]
  + 
            origin_code = "".join(s.get_code() for s in self.statements)
            code, _, e_right = _strip_empty_lines(origin_code)
            start = self.statements[0].start_pos[0]
            end = self.statements[-1].end_pos[0] + 1 - e_right
    
            self.code: str = code + "\n"
            try:
  +             self.line_range: LineRange = line_range(start, end)
  -             self.line_range: _LineRange = _line_range(start, end)
            except ValueError:
                print_err(f"{origin_code=}, {e_right=}, {start=}, {end=}")
                raise
    
===========changed ref 9===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @staticmethod
        def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
            spans = []
            subscopes = dict()
            scope = ChangeScope(path, tree, spans, subscopes, None)
            assert isinstance(tree, ScopeTree)
            is_func = isinstance(tree, ptree.Function)
    
            current_stmts = []
            content = (
                tree.children
                if isinstance(tree, ptree.Module)
                else cast(ptree.PythonNode, tree.get_suite()).children
            )
            for s in content:
                # we don't create inner scopes for function contents
                if is_func or _is_scope_statement(as_any(s)):
                    current_stmts.append(s)
                else:
                    if current_stmts:
  +                     spans.append(StatementSpan(len(spans), current_stmts, scope))
  -                     spans.append(StatementSpan(current_stmts, scope))
                        current_stmts = []
            if current_stmts:
  +             spans.append(StatementSpan(len(spans), current_stmts, scope))
  -             spans.append(StatementSpan(current_stmts, scope))
    
            if is_func:
                # we don't create inner scopes for function contents
                return scope
            for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
                stree: ptree.Function | ptree.Class
                name = cast(ptree.Name, stree.name).value
                spath = path.append(name)
                subscope = ChangeScope.from_tree(spath, stree)
                subscope.parent_scope = scope
                subscopes[name] = subscope
            return scope
     | 
| 
	coeditor.code_change/get_python_files | 
	Modified | 
	temp-1 | 
	c5644c5542948a30b4d5a99c74d50454fb62315a | 
	Speed up parsing with on-demand deepcopying. | 
	 <0>:<add>         files.append(to_rel_path(Path(f.path).relative_to(project)))
 | 
	      # module: coeditor.code_change
    + def get_python_files(project: RelPath) -> list[RelPath]:
    - def get_python_files(project: Path):
    +     files = list[RelPath]()
    -     files = list[Path]()
          for f in recurse_find_python_files(FolderIO(str(project))):
              f: FileIO
    -         files.append(Path(f.path).relative_to(project))
 <0>      return files
      
       | 
	===========unchanged ref 0===========
    at: coeditor.common
        RelPath = NewType("RelPath", Path)
    
        to_rel_path(path: os.PathLike | str) -> RelPath
    
    at: jedi.file_io
        FolderIO(path)
    
        FileIO(path: Union[os.PathLike, str])
    
    at: jedi.inference.references
        recurse_find_python_files(folder_io, except_paths=())
    
    at: parso.file_io.FileIO.__init__
        self.path = path
    
    at: pathlib
        Path()
    
    at: pathlib.PurePath
        __slots__ = (
                '_drv', '_root', '_parts',
                '_str', '_hash', '_pparts', '_cached_cparts',
            )
    
        drive = property(attrgetter('_drv'),
                             doc="""The drive prefix (letter or UNC path), if any.""")
    
        root = property(attrgetter('_root'),
                            doc="""The root of the path, if any.""")
    
        relative_to(*other: Union[str, _PathLike]) -> _P
    
    
===========changed ref 0===========
    # module: coeditor.common
  + def to_rel_path(path: Path) -> RelPath:
  +     if path.is_absolute():
  +         raise ValueError(f"Expected a relative path, got: {path}")
  +     return RelPath(path)
  + 
===========changed ref 1===========
    # module: coeditor.common
  + def to_abs_path(path: Path) -> AbsPath:
  +     return AbsPath(path.resolve())
  +  | 
| 
	coeditor.code_change/edits_from_commit_history | 
	Modified | 
	temp-1 | 
	c5644c5542948a30b4d5a99c74d50454fb62315a | 
	Speed up parsing with on-demand deepcopying. | 
	 <0>:<add>             tempdir, history, change_processor, edit_encoder, ignore_dirs, silent
 | 
	      <s>_from_commit_history(
          project_dir: Path,
          history: Sequence[CommitInfo],
          tempdir: Path,
    +     change_processor: ProjectChangeProcessor[TProb] = NoProcessing(),
    -     change_encoder: ProjectChangeProcessor[TEnc] = NoProcessing(),
    +     edit_encoder: Callable[[TProb], Iterable[T1]] = lambda x: [x],
          ignore_dirs=DefaultIgnoreDirs,
          silent: bool = False,
    + ) -> Sequence[T1]:
    - ) -> Iterable[TEnc]:
          """Incrementally compute the edits to a project from the git history.
          Note that this will change the file states in the project directory, so
          you should make a copy of the project before calling this function.
    - 
    -     Note that this returns an iterator, and the file state cleaning up will
    -     only happen when the iterator is exhausted.
          """
          tempdir = tempdir.resolve()
          if tempdir.exists():
              raise FileExistsError(f"Workdir '{tempdir}' already exists.")
          tempdir.mkdir(parents=True, exist_ok=False)
          use_fast_parser = jedi.settings.fast_parser
          try:
              run_command(
                  ["cp", "-r", str(project_dir / ".git"), str(tempdir)],
                  cwd=project_dir.parent,
              )
      
    +         return _edits_from_commit_history(
    -         yield from _edits_from_commit_history(
    -             tempdir, history, change_encoder, ignore_dirs, silent
 <0>          )
          finally:
              run_command(["rm", "-rf", str(tempdir)], cwd=tempdir.parent)
              jedi.settings.fast_parser = use_fast_parser
      
       | 
	===========unchanged ref 0===========
    at: coeditor.code_change
        DefaultIgnoreDirs = {".venv", ".mypy_cache", ".git", "venv", "build"}
    
        TProb = TypeVar("TProb", covariant=True)
    
        ProjectChangeProcessor()
    
        NoProcessing()
    
        _edits_from_commit_history(project: Path, history: Sequence[CommitInfo], change_processor: ProjectChangeProcessor[TProb], edit_encoder: Callable[[TProb], Iterable[T1]], ignore_dirs: set[str], silent: bool) -> Sequence[T1]
    
    at: coeditor.common
        T1 = TypeVar("T1")
    
        run_command(args: Sequence[str], cwd: str | Path) -> str
    
    at: jedi.settings
        fast_parser = True
    
    at: pathlib
        Path()
    
    at: pathlib.Path
        __slots__ = ()
    
        resolve(strict: bool=...) -> _P
    
        mkdir(mode: int=..., parents: bool=..., exist_ok: bool=...) -> None
    
        exists() -> bool
    
    at: typing
        Iterable = _alias(collections.abc.Iterable, 1)
    
        Callable = _CallableType(collections.abc.Callable, 2)
    
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.code_change
    class NoProcessing(ProjectChangeProcessor[JProjectChange]):
  -     def encode_change(
  -         self,
  -         pchange: JProjectChange,
  -         pre_analysis,
  -         post_analysis,
  -     ) -> Iterable[JProjectChange]:
  -         yield pchange
  - 
===========changed ref 1===========
    # module: coeditor.code_change
    class NoProcessing(ProjectChangeProcessor[JProjectChange]):
  -     def encode_change(
  -         self,
  -         pchange: JProjectChange,
  -         pre_analysis,
  -         post_analysis,
  -     ) -> Iterable[JProjectChange]:
  -         yield pchange
  - 
===========changed ref 2===========
    # module: coeditor.code_change
  + class ProjectChangeProcessor(Generic[TProb]):
  - class ProjectChangeProcessor(Generic[TEnc]):
  -     def encode_change(
  -         self, pchange: "JProjectChange", pre_analysis: Any, post_analysis: Any
  -     ) -> Iterable[TEnc]:
  -         ...
  - 
===========changed ref 3===========
    # module: coeditor.code_change
  + class ProjectChangeProcessor(Generic[TProb]):
  - class ProjectChangeProcessor(Generic[TEnc]):
  -     def encode_change(
  -         self, pchange: "JProjectChange", pre_analysis: Any, post_analysis: Any
  -     ) -> Iterable[TEnc]:
  -         ...
  - 
===========changed ref 4===========
    # module: coeditor.code_change
  + TProb = TypeVar("TProb", covariant=True)
  - TEnc = TypeVar("TEnc", covariant=True)
    
===========changed ref 5===========
    # module: coeditor.code_change
  + def get_python_files(project: RelPath) -> list[RelPath]:
  - def get_python_files(project: Path):
  +     files = list[RelPath]()
  -     files = list[Path]()
        for f in recurse_find_python_files(FolderIO(str(project))):
            f: FileIO
  +         files.append(to_rel_path(Path(f.path).relative_to(project)))
  -         files.append(Path(f.path).relative_to(project))
        return files
    
===========changed ref 6===========
    # module: coeditor.common
  + def to_abs_path(path: Path) -> AbsPath:
  +     return AbsPath(path.resolve())
  + 
===========changed ref 7===========
    # module: coeditor.common
  + def to_rel_path(path: Path) -> RelPath:
  +     if path.is_absolute():
  +         raise ValueError(f"Expected a relative path, got: {path}")
  +     return RelPath(path)
  +  | 
| 
	coeditor.ctx_change_encoder/CtxCodeChangeProblemGenerator.pre_edit_analysis | 
	Modified | 
	temp-1 | 
	65a052117c316099d5beea02c38dd4ed3cd1b187 | 
	Speed up analyses using script caching. | 
	 <0>:<add>             script = pstate.scripts[mod_path]
 | 
	      # module: coeditor.ctx_change_encoder
      class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
          def pre_edit_analysis(
              self,
    +         pstate: ProjectState,
    -         project: jedi.Project,
              modules: Mapping[RelPath, JModule],
              changes: Mapping[ModuleName, JModuleChange],
          ) -> Mapping[ModuleName, LineUsageAnalysis]:
              "Return the definition usages of each line."
    +         project = pstate.project
    -         # proot = Path(project._path)
              result = dict[ModuleName, LineUsageAnalysis]()
      
              src_map = {m.mname: f for f, m in modules.items()}
              for mname, mchange in changes.items():
                  if not isinstance(mchange.module_change, Modified):
                      continue
      
                  lines_to_analyze = set[int]()
                  for span in mchange.changed.values():
                      if span.change is Added:
                          continue
                      lines_to_analyze.update(range(*span.line_range))
                      lines_to_analyze.update(range(*span.header_line_range))
      
                  mod_path = src_map[mname]
    -             assert (
    -                 src_file := project.path / mod_path
    -             ).exists(), f"src file missing: {src_file}"
    -             script = jedi.Script(path=src_file, project=project)
 <0>              line_usages = self.analysis.get_line_usages(
                      script, project.path, lines_to_analyze, silent=True
                  )
                  result[mname] = line_usages
              return result
      
       | 
	===========unchanged ref 0===========
    at: coeditor.code_change.ChangedSpan
        change: Change[str]
    
        parent_scopes: Sequence[Change[ChangeScope]]
    
        line_range: LineRange
    
    at: coeditor.code_change.JModule
        mname: ModuleName
    
        tree: ptree.Module
    
    at: coeditor.code_change.JModuleChange
        module_change: Change[JModule]
    
        changed: Mapping[ProjectPath, ChangedSpan]
    
    at: coeditor.common
        RelPath = NewType("RelPath", Path)
    
    at: coeditor.ctx_change_encoder
        LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
    
    at: coeditor.ctx_change_encoder.CtxCodeChangeProblemGenerator.__init__
        self.analysis = analysis
    
    at: coeditor.ctx_change_encoder.JediUsageAnalyzer
        get_line_usages(script: jedi.Script, proj_root: Path, lines_to_analyze: Collection[int], silent: bool=False)
    
    at: spot.static_analysis
        ModuleName = str
    
    at: typing
        Mapping = _alias(collections.abc.Mapping, 2)
    
    at: typing.Mapping
        items() -> AbstractSet[Tuple[_KT, _VT_co]]
    
        values() -> ValuesView[_VT_co]
    
    
===========changed ref 0===========
    # module: coeditor.code_change
  + @dataclass
  + class ProjectState:
  +     project: jedi.Project
  +     scripts: Mapping[RelPath, jedi.Script]
  + 
===========changed ref 1===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_processor: ProjectChangeProcessor[TProb],
        edit_encoder: Callable[[TProb], Iterable[T1]],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Sequence[T1]:
  +     scripts = dict[RelPath, jedi.Script]()
  + 
  +     def parse_module(path: Path):
  -     def parse_module(path: Path, deep_copy: bool = False):
            with _tlogger.timed("parse_module"):
                s = jedi.Script(path=path, project=proj)
  +             scripts[to_rel_path(path.relative_to(proj._path))] = s
                mcontext = s._get_module_context()
                assert isinstance(mcontext, ModuleContext)
                mname = cast(str, mcontext.py__name__())
                if mname.startswith("src."):
                    e = ValueError(f"Bad module name: {mname}")
                    files = list(project.iterdir())
                    print_err(f"project: {proj}", file=sys.stderr)
                    print_err(f"files in root: {files}", file=sys.stderr)
                    raise e
                m = s._module_node
  -             if deep_copy:
  -                 m = copy.deepcopy(m)
  - 
                assert isinstance(m, ptree.Module)
                # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
                # m = parso.parse(path.read_text())
                return JModule(mname, m)
    
        def checkout_commit(commit_hash: str, force: bool = False):
            with _tlogger.timed("checkout"):
                subprocess.run(
                    ["git", "checkout", "-f", commit_hash],
                    cwd=project,
                    capture_output=True,
                    check=True,
                )
    
        # checkout to the first commit
        commit</s>
===========changed ref 2===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_processor: ProjectChangeProcessor[TProb],
        edit_encoder: Callable[[TProb], Iterable[T1]],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Sequence[T1]:
    # offset: 1
    <s>,
                    capture_output=True,
                    check=True,
                )
    
        # checkout to the first commit
        commit_now = history[-1]
        checkout_commit(commit_now.hash, force=True)
        proj = jedi.Project(path=project, added_sys_path=[project / "src"])
  +     pstate = ProjectState(proj, scripts)
    
        # now we can get the first project state, although this not needed for now
        # but we'll use it later for pre-edit analysis
        path2module = {
  +         f: parse_module(project / f)
  -         f: parse_module(project / f, deep_copy=False)
            for f in tqdm(
                get_python_files(project), desc="building initial project", disable=silent
            )
        }
    
        def is_src(path_s: str) -> bool:
            path = Path(path_s)
            return path.suffix == ".py" and all(p not in ignore_dirs for p in path.parts)
    
        future_commits = list(reversed(history[:-1]))
        results = list[T1]()
        for commit_next in tqdm(
            future_commits, smoothing=0, desc="processing commits", disable=silent
        ):
            # get changed files
            changed_files = run_command(
                ["git", "diff", commit_now.hash, commit_next.hash, "--name-status"],
                cwd=project,
            ).splitlines()
    
            path_changes = list[Change[str]]()
    
            for line</s>
===========changed ref 3===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_processor: ProjectChangeProcessor[TProb],
        edit_encoder: Callable[[TProb], Iterable[T1]],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Sequence[T1]:
    # offset: 2
    <s>_files:
                segs = line.split("\t")
                if len(segs) == 2:
                    tag, path = segs
                    if not is_src(path):
                        continue
                    if tag.endswith("A"):
                        path_changes.append(Added(path))
                    elif tag.endswith("D"):
                        path_changes.append(Deleted(path))
                    if tag.endswith("M"):
                        path_changes.append(Modified(path, path))
                elif len(segs) == 3:
                    tag, path1, path2 = segs
                    assert tag.startswith("R")
                    if not is_src(path1) or not is_src(path2):
                        continue
                    path_changes.append(Deleted(path1))
                    path_changes.append(Added(path2))
    
            # make deep copys of changed modules
            to_copy = {
                to_rel_path(Path(path_change.before))
                for path_change in path_changes
                if not isinstance(path_change, Added)
            }
            _deep_copy_subset_(path2module, to_copy)
    
            checkout_commit(commit_next.hash)
    
  -         proj = jedi.Project(path=project, added_sys_path=[project / "src"])
  - 
            new_path2module = path2module.copy()
            changed = dict[ModuleName, JModuleChange]()
            for path_change in path_changes:
                path = project / path_change.earlier()
                rel_path = to_rel_path(path.relative</s> | 
| 
	coeditor.code_change/ChangeScope.__post_init__ | 
	Modified | 
	temp-1 | 
	74b3e2333e9961157b553323ce501210bcaaebda | 
	Fix span ranges. | 
	 <0>:<add>             h_start = not_none(visited[0].get_start_pos_of_prefix())[0] + e_left
 | 
	      # module: coeditor.code_change
      @dataclass
      class ChangeScope:
          def __post_init__(self):
              # compute header
              if isinstance(self.tree, ptree.Module):
                  header_code = f"# module: {self.path.module}"
                  header_line_range = line_range(0, 0, can_be_empty=True)
              else:
                  h_start, h_end = 0, 0
                  tree = self.tree
                  to_visit = list[NodeOrLeaf]()
                  parent = not_none(tree.parent)
                  while parent.type in ("decorated", "async_funcdef"):
                      to_visit.insert(0, parent.children[0])
                      parent = not_none(parent.parent)
                  to_visit.extend(tree.children)
                  visited = list[NodeOrLeaf]()
                  for c in to_visit:
                      if c.type == "suite":
                          break
                      visited.append(c)
                  header_code = "".join(cast(str, c.get_code()) for c in visited)
    +             header_code, e_left, e_right = _strip_empty_lines(header_code)
    -             header_code, _, e_right = _strip_empty_lines(header_code)
    -             h_start = visited[0].start_pos[0]
 <0>              h_end = visited[-1].end_pos[0] + 1 - e_right
                  assert_eq(count_lines(header_code) == h_end - h_start)
                  header_line_range = line_range(h_start, h_end)
                  if self.spans and h_end > self.spans[0].line_range[0]:
                      raise ValueError(
                          f"Header covers the fisrt span: {self.path=}, {h_start=}, {h_end=} "
                          f"{self.spans[0].line_range=}"
                      )
      
              self.header_code: str = header_code + "\n"
              self.header_line_range: LineRange = header_line_range
      
       | 
	===========unchanged ref 0===========
    at: coeditor.code_change.ChangeScope
        path: ProjectPath
    
        tree: ScopeTree
    
        spans: Sequence["StatementSpan"]
    
        subscopes: Mapping[str, Self]
    
        parent_scope: "ChangeScope | None"
    
        _search_span(line: int) -> "StatementSpan | None"
    
    at: coeditor.code_change.ChangeScope.__post_init__
        self.header_code: str = header_code + "\n"
    
        self.header_line_range: LineRange = header_line_range
    
    at: coeditor.code_change.StatementSpan.__post_init__
        code, e_left, e_right = _strip_empty_lines(origin_code)
        self.code: str = code + "\n"
    
    at: functools
        cached_property(func: Callable[[Any], _T])
    
    at: spot.static_analysis
        ElemPath = str
    
     | 
| 
	coeditor.code_change/ChangeScope.from_tree | 
	Modified | 
	temp-1 | 
	74b3e2333e9961157b553323ce501210bcaaebda | 
	Fix span ranges. | 
	 <0>:<add>                 raise ValueError(f"Function with no spans: {path=}, {tree.get_code()=}")
 | 
	      # module: coeditor.code_change
      @dataclass
      class ChangeScope:
          @staticmethod
          def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
      <s>tree, ptree.Function)
    + 
    +         def mk_span(stmts):
    +             # remove leading newlines
    +             n_leading_newlines = 0
    +             for s in stmts:
    +                 if s.type == ptree.Newline.type:
    +                     n_leading_newlines += 1
    +                 else:
    +                     break
    +             if n_leading_newlines:
    +                 stmts = stmts[n_leading_newlines:]
    +             if stmts:
    +                 yield StatementSpan(len(spans), stmts, scope)
      
              current_stmts = []
              content = (
                  tree.children
                  if isinstance(tree, ptree.Module)
                  else cast(ptree.PythonNode, tree.get_suite()).children
              )
              for s in content:
                  # we don't create inner scopes for function contents
                  if is_func or _is_scope_statement(as_any(s)):
                      current_stmts.append(s)
                  else:
                      if current_stmts:
    +                     spans.extend(mk_span(current_stmts))
    -                     spans.append(StatementSpan(len(spans), current_stmts, scope))
                          current_stmts = []
              if current_stmts:
    +             spans.extend(mk_span(current_stmts))
    -             spans.append(StatementSpan(len(spans), current_stmts, scope))
      
              if is_func:
                  # we don't create inner scopes for function contents
    +             if not spans:
 <0>              return scope
              for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
                  stree: ptree.Function | ptree.Class
                  name = cast(ptree.Name, stree.name).value
                  spath = path.append(name)
                  subscope = ChangeScope.from_tree(spath, stree)
                  subscope.parent_scope = scope
                  subscopes[name] = subscope
              return scope
      
       | 
	===========above chunk 0===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @staticmethod
        def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
    # offset: -1
            spans = []
            subscopes = dict()
            scope = ChangeScope(path, tree, spans, subscopes, None)
            assert isinstance(tree, ScopeTree)
            is_func = isinstance(tree, ptree.Function)
  + 
  +         def mk_span(stmts):
  +             # remove leading newlines
  +             n</s>
===========unchanged ref 0===========
    at: coeditor._utils
        as_any(x) -> Any
    
    at: coeditor.code_change
        ScopeTree = ptree.Function | ptree.Class | ptree.Module
    
        ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None")
    
        _is_scope_statement(stmt: PyNode) -> bool
    
        StatementSpan(nth_in_parent: int, statements: Sequence[PyNode], scope: ChangeScope)
    
    at: parso.python.tree
        PythonNode()
    
        Newline()
    
        Module(children)
    
        Function(children)
    
    at: parso.python.tree.Newline
        __slots__ = ()
    
        type = 'newline'
    
    at: parso.python.tree.Scope
        __slots__ = ()
    
        get_suite()
    
    at: parso.tree.BaseNode.__init__
        self.children = children
    
    at: parso.tree.NodeOrLeaf
        __slots__ = ('parent',)
    
        type: str
    
        parent: 'Optional[BaseNode]'
    
    at: spot.static_analysis
        ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
    
    at: typing
        cast(typ: Type[_T], val: Any) -> _T
        cast(typ: str, val: Any) -> Any
        cast(typ: object, val: Any) -> Any
    
    
===========changed ref 0===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        def __post_init__(self):
            # compute header
            if isinstance(self.tree, ptree.Module):
                header_code = f"# module: {self.path.module}"
                header_line_range = line_range(0, 0, can_be_empty=True)
            else:
                h_start, h_end = 0, 0
                tree = self.tree
                to_visit = list[NodeOrLeaf]()
                parent = not_none(tree.parent)
                while parent.type in ("decorated", "async_funcdef"):
                    to_visit.insert(0, parent.children[0])
                    parent = not_none(parent.parent)
                to_visit.extend(tree.children)
                visited = list[NodeOrLeaf]()
                for c in to_visit:
                    if c.type == "suite":
                        break
                    visited.append(c)
                header_code = "".join(cast(str, c.get_code()) for c in visited)
  +             header_code, e_left, e_right = _strip_empty_lines(header_code)
  -             header_code, _, e_right = _strip_empty_lines(header_code)
  +             h_start = not_none(visited[0].get_start_pos_of_prefix())[0] + e_left
  -             h_start = visited[0].start_pos[0]
                h_end = visited[-1].end_pos[0] + 1 - e_right
                assert_eq(count_lines(header_code) == h_end - h_start)
                header_line_range = line_range(h_start, h_end)
                if self.spans and h_end > self.spans[0].line_range[0]:
                    raise ValueError(
                        f"Header covers the fisrt span: {self.path=}, {h_start=}, {h_end=} "
                        f"{self.spans[0].line_range=}"
                    )
    
            self.header_code</s>
===========changed ref 1===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        def __post_init__(self):
    # offset: 1
    <s>end=} "
                        f"{self.spans[0].line_range=}"
                    )
    
            self.header_code: str = header_code + "\n"
            self.header_line_range: LineRange = header_line_range
     | 
| 
	coeditor.code_change/StatementSpan.__post_init__ | 
	Modified | 
	temp-1 | 
	74b3e2333e9961157b553323ce501210bcaaebda | 
	Fix span ranges. | 
	 <0>:<add>                 print_err(s)
 | 
	      # module: coeditor.code_change
      @dataclass
      class StatementSpan:
          def __post_init__(self):
              assert self.statements
    -         # remove leading newlines
    -         n_leading_newlines = 0
    -         stmts = self.statements
    -         for s in stmts:
    -             if s.type == ptree.Newline.type:
    -                 n_leading_newlines += 1
    -             else:
    -                 break
    -         if n_leading_newlines:
    -             self.statements = stmts[n_leading_newlines:]
    - 
              origin_code = "".join(s.get_code() for s in self.statements)
    +         code, e_left, e_right = _strip_empty_lines(origin_code)
    -         code, _, e_right = _strip_empty_lines(origin_code)
    +         start = not_none(self.statements[0].get_start_pos_of_prefix())[0] + e_left
    -         start = self.statements[0].start_pos[0]
              end = self.statements[-1].end_pos[0] + 1 - e_right
      
              self.code: str = code + "\n"
              try:
                  self.line_range: LineRange = line_range(start, end)
              except ValueError:
    +             print_err(f"{e_right=}, {start=}, {end=}")
    -             print_err(f"{origin_code=}, {e_right=}, {start=}, {end=}")
    +             print_err("Origin code:")
    +             print_err(origin_code)
    +             print_err("Stmts:")
    +             for s in self.statements:
 <0>              raise
      
       | 
	===========unchanged ref 0===========
    at: coeditor._utils
        not_none(x: Optional[T1]) -> T1
    
    at: coeditor.code_change
        PyNode = ptree.PythonBaseNode | ptree.PythonNode
    
        _strip_empty_lines(s: str)
    
        ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None")
    
    at: dataclasses
        dataclass(_cls: Type[_T]) -> Type[_T]
        dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
        dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
    
    at: parso.tree.BaseNode
        __slots__ = ('children',)
    
        get_start_pos_of_prefix()
    
        get_code(include_prefix=True)
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.code_change
  + _non_scope_stmt_types = {
  +     "decorated",
  +     "async_stmt",
  +     ptree.Class.type,
  +     ptree.Function.type,
  + }
  - _non_scope_stmt_types = {"decorated", "async_stmt"}
    
===========changed ref 1===========
    # module: coeditor.code_change
    def _is_scope_statement(stmt: PyNode) -> bool:
  -     match stmt:
  -         case ptree.PythonNode(type=node_type) if node_type not in _non_scope_stmt_types:
  -             return stmt.children[0].type not in ptree._IMPORTS
  -         case ptree.Flow():
  -             return True
  -         case _:
  +     """Will only return False for functions, classes, and import statments"""
  +     if stmt.type in _non_scope_stmt_types:
  +         return False
  -             return False
  +     if stmt.type == "simple_stmt" and stmt.children[0].type in ptree._IMPORTS:
  +         return False
  +     return True
    
===========changed ref 2===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @staticmethod
        def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
            spans = []
            subscopes = dict()
            scope = ChangeScope(path, tree, spans, subscopes, None)
            assert isinstance(tree, ScopeTree)
            is_func = isinstance(tree, ptree.Function)
  + 
  +         def mk_span(stmts):
  +             # remove leading newlines
  +             n_leading_newlines = 0
  +             for s in stmts:
  +                 if s.type == ptree.Newline.type:
  +                     n_leading_newlines += 1
  +                 else:
  +                     break
  +             if n_leading_newlines:
  +                 stmts = stmts[n_leading_newlines:]
  +             if stmts:
  +                 yield StatementSpan(len(spans), stmts, scope)
    
            current_stmts = []
            content = (
                tree.children
                if isinstance(tree, ptree.Module)
                else cast(ptree.PythonNode, tree.get_suite()).children
            )
            for s in content:
                # we don't create inner scopes for function contents
                if is_func or _is_scope_statement(as_any(s)):
                    current_stmts.append(s)
                else:
                    if current_stmts:
  +                     spans.extend(mk_span(current_stmts))
  -                     spans.append(StatementSpan(len(spans), current_stmts, scope))
                        current_stmts = []
            if current_stmts:
  +             spans.extend(mk_span(current_stmts))
  -             spans.append(StatementSpan(len(spans), current_stmts, scope))
    
            if is_func:
                # we don't create inner scopes for function contents
  +             if not spans:
  +                 raise ValueError(f"Function with no spans: {path=}, {tree.get_code()=}")
                return scope
            for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
                stree: ptree.Function | ptree.</s>
===========changed ref 3===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @staticmethod
        def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
    # offset: 1
    <s>_scope(ptree.Function.type, ptree.Class.type):
                stree: ptree.Function | ptree.Class
                name = cast(ptree.Name, stree.name).value
                spath = path.append(name)
                subscope = ChangeScope.from_tree(spath, stree)
                subscope.parent_scope = scope
                subscopes[name] = subscope
            return scope
    
===========changed ref 4===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        def __post_init__(self):
            # compute header
            if isinstance(self.tree, ptree.Module):
                header_code = f"# module: {self.path.module}"
                header_line_range = line_range(0, 0, can_be_empty=True)
            else:
                h_start, h_end = 0, 0
                tree = self.tree
                to_visit = list[NodeOrLeaf]()
                parent = not_none(tree.parent)
                while parent.type in ("decorated", "async_funcdef"):
                    to_visit.insert(0, parent.children[0])
                    parent = not_none(parent.parent)
                to_visit.extend(tree.children)
                visited = list[NodeOrLeaf]()
                for c in to_visit:
                    if c.type == "suite":
                        break
                    visited.append(c)
                header_code = "".join(cast(str, c.get_code()) for c in visited)
  +             header_code, e_left, e_right = _strip_empty_lines(header_code)
  -             header_code, _, e_right = _strip_empty_lines(header_code)
  +             h_start = not_none(visited[0].get_start_pos_of_prefix())[0] + e_left
  -             h_start = visited[0].start_pos[0]
                h_end = visited[-1].end_pos[0] + 1 - e_right
                assert_eq(count_lines(header_code) == h_end - h_start)
                header_line_range = line_range(h_start, h_end)
                if self.spans and h_end > self.spans[0].line_range[0]:
                    raise ValueError(
                        f"Header covers the fisrt span: {self.path=}, {h_start=}, {h_end=} "
                        f"{self.spans[0].line_range=}"
                    )
    
            self.header_code</s>
===========changed ref 5===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        def __post_init__(self):
    # offset: 1
    <s>end=} "
                        f"{self.spans[0].line_range=}"
                    )
    
            self.header_code: str = header_code + "\n"
            self.header_line_range: LineRange = header_line_range
     | 
| 
	coeditor.code_change/_edits_from_commit_history | 
	Modified | 
	temp-1 | 
	74b3e2333e9961157b553323ce501210bcaaebda | 
	Fix span ranges. | 
	 <0>:<add>     results = list[TEnc]()
 | 
	      # module: coeditor.code_change
      def _edits_from_commit_history(
          project: Path,
          history: Sequence[CommitInfo],
          change_processor: ProjectChangeProcessor[TProb],
    +     edit_encoder: Callable[[TProb], Iterable[TEnc]],
    -     edit_encoder: Callable[[TProb], Iterable[T1]],
          ignore_dirs: set[str],
          silent: bool,
    + ) -> Sequence[TEnc]:
    - ) -> Sequence[T1]:
      <s>_sys_path=[project / "src"])
          pstate = ProjectState(proj, scripts)
      
          # now we can get the first project state, although this not needed for now
          # but we'll use it later for pre-edit analysis
          path2module = {
              f: parse_module(project / f)
              for f in tqdm(
                  get_python_files(project), desc="building initial project", disable=silent
              )
          }
      
          def is_src(path_s: str) -> bool:
              path = Path(path_s)
              return path.suffix == ".py" and all(p not in ignore_dirs for p in path.parts)
      
          future_commits = list(reversed(history[:-1]))
    -     results = list[T1]()
 <0>      for commit_next in tqdm(
              future_commits, smoothing=0, desc="processing commits", disable=silent
          ):
              # get changed files
              changed_files = run_command(
                  ["git", "diff", commit_now.hash, commit_next.hash, "--name-status"],
                  cwd=project,
              ).splitlines()
      
              path_changes = list[Change[str]]()
      
              for line in changed_files:
                  segs = line.split("\t")
                  if len(segs) == 2:
                      tag, path = segs
                      if not is_src(path):
                          continue
                      if tag.endswith("A"):
                          path_changes.append(Added(path))
                      elif tag.endswith("D"):
                          path_changes.append(Deleted(path))
                      if</s> | 
	===========above chunk 0===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_processor: ProjectChangeProcessor[TProb],
  +     edit_encoder: Callable[[TProb], Iterable[TEnc]],
  -     edit_encoder: Callable[[TProb], Iterable[T1]],
        ignore_dirs: set[str],
        silent: bool,
  + ) -> Sequence[TEnc]:
  - ) -> Sequence[T1]:
    # offset: -1
    <s> name: {mname}")
                    files = list(project.iterdir())
                    print_err(f"project: {proj}", file=sys.stderr)
                    print_err(f"files in root: {files}", file=sys.stderr)
                    raise e
                m = s._module_node
                assert isinstance(m, ptree.Module)
                # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
                # m = parso.parse(path.read_text())
                return JModule(mname, m)
    
  +     def checkout_commit(commit_hash: str):
  -     def checkout_commit(commit_hash: str, force: bool = False):
            with _tlogger.timed("checkout"):
                subprocess.run(
                    ["git", "checkout", "-f", commit_hash],
                    cwd=project,
                    capture_output=True,
                    check=True,
                )
    
  +     # to ensure sure we are not accidentally overriding real code changes
  +     if list(project.iterdir()) != [project / ".git"]:
  +         raise FileExistsError(f"Directory '{project}' should contain only '.git'.")
  + 
        # checkout to the first commit
        commit_now = history[-1]
  +     checkout_commit(commit_now.hash)
  -     checkout_commit(commit_now.hash, force=True)
        proj = jedi.Project(path=project, added_sys_path=[project / "src"])
        pstate = ProjectState(proj, scripts)
    
        # now we can</s>
===========above chunk 1===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_processor: ProjectChangeProcessor[TProb],
  +     edit_encoder: Callable[[TProb], Iterable[TEnc]],
  -     edit_encoder: Callable[[TProb], Iterable[T1]],
        ignore_dirs: set[str],
        silent: bool,
  + ) -> Sequence[TEnc]:
  - ) -> Sequence[T1]:
    # offset: -2
        scripts = dict[RelPath, jedi.Script]()
    
        def parse_module(path: Path):
            with _tlogger.timed("parse_module"):
                s = jedi.Script(path=path, project=proj)
                scripts[to_rel_path(path.relative_to(proj._path))] = s
                mcontext = s._get_module_context()
                assert isinstance(mcontext, ModuleContext)
                mname = cast(str, mcontext.py__name__())
                if mname.startswith("src."):
                    e = ValueError(f"Bad module name: {mname}")
                    files = list(project.iterdir())
                    print_err(f"project: {proj</s>
===========below chunk 0===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_processor: ProjectChangeProcessor[TProb],
  +     edit_encoder: Callable[[TProb], Iterable[TEnc]],
  -     edit_encoder: Callable[[TProb], Iterable[T1]],
        ignore_dirs: set[str],
        silent: bool,
  + ) -> Sequence[TEnc]:
  - ) -> Sequence[T1]:
    # offset: 1
    <s>Added(path))
                    elif tag.endswith("D"):
                        path_changes.append(Deleted(path))
                    if tag.endswith("M"):
                        path_changes.append(Modified(path, path))
                elif len(segs) == 3:
                    tag, path1, path2 = segs
                    assert tag.startswith("R")
                    if not is_src(path1) or not is_src(path2):
                        continue
                    path_changes.append(Deleted(path1))
                    path_changes.append(Added(path2))
    
            # make deep copys of changed modules
            to_copy = {
                to_rel_path(Path(path_change.before))
                for path_change in path_changes
                if not isinstance(path_change, Added)
            }
            _deep_copy_subset_(path2module, to_copy)
    
            checkout_commit(commit_next.hash)
    
            new_path2module = path2module.copy()
            changed = dict[ModuleName, JModuleChange]()
            for path_change in path_changes:
                path = project / path_change.earlier()
                rel_path = to_rel_path(path.relative_to(project))
                match path_change:
                    case Added():
                        mod = parse_module(path)
                        new_path2module[rel_path] = mod
                        changed[mod.mname] = JModuleChange.from_modules(Added(mod))
                    case Deleted():
                        mod = new_path2module.pop(rel_path)
                        changed[mod.mname] = J</s>
===========below chunk 1===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_processor: ProjectChangeProcessor[TProb],
  +     edit_encoder: Callable[[TProb], Iterable[TEnc]],
  -     edit_encoder: Callable[[TProb], Iterable[T1]],
        ignore_dirs: set[str],
        silent: bool,
  + ) -> Sequence[TEnc]:
  - ) -> Sequence[T1]:
    # offset: 2
    <s> case Deleted():
                        mod = new_path2module.pop(rel_path)
                        changed[mod.mname] = JModuleChange.from_modules(Deleted(mod))
                    case Modified(path1, path2):
                        assert path1 == path2
                        mod_old = new_path2module[rel_path]
                        new_path2module[rel_path] = mod_new = parse_module(path)
                        changed[mod_new.mname] = JModuleChange.from_modules(
                            Modified(mod_old, mod_new)
                        )
    
            with _tlogger.timed("post_edit_analysis"):
                post_analysis = change_processor.post_edit_analysis(
                    pstate,
                    new_path2module,
                    changed,
                )
    
            # now go backwards in time to perform pre-edit analysis
            checkout_commit(commit_now.hash)
            with _tlogger.timed("pre_edit_analysis"):
                pre_analysis = change_processor.pre_edit_analysis(
                    pstate,
                    path2module,
                    changed,
                )
            checkout_commit(commit_next.hash)
    
            modules_mod = Modified(path2module.values(), new_path2module.values())
            pchange = JProjectChange(changed, modules_mod, commit_next)
    
            with _tlogger.timed("process_change"):
                processed = list(
                    change_processor.process_change(pchange, pre</s>
===========below chunk 2===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_processor: ProjectChangeProcessor[TProb],
  +     edit_encoder: Callable[[TProb], Iterable[TEnc]],
  -     edit_encoder: Callable[[TProb], Iterable[T1]],
        ignore_dirs: set[str],
        silent: bool,
  + ) -> Sequence[TEnc]:
  - ) -> Sequence[T1]:
    # offset: 3
    <s>, post_analysis)
                )
            with _tlogger.timed("change_encoder"):
                for change in processed:
                    results.extend(edit_encoder(change))
            commit_now = commit_next
            path2module = new_path2module
        return results
    
     | 
| 
	coeditor.ctx_change_encoder/PyDefinition.from_name | 
	Modified | 
	temp-1 | 
	74b3e2333e9961157b553323ce501210bcaaebda | 
	Fix span ranges. | 
	 <0>:<add>             yield PyDefinition(full_name, start_pos, end_pos)
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass(unsafe_hash=True)
      class PyDefinition:
          @staticmethod
          def from_name(name: classes.BaseName) -> Iterable["PyDefinition"]:
              if (
                  not name.in_builtin_module()
                  and (full_name := name.full_name)
    +             # and (import_module := name.module_name)
    -             and (import_module := name.module_name)
                  and (start_pos := name.get_definition_start_position())
                  and (end_pos := name.get_definition_end_position())
              ):
                  full_name = PyFullName(full_name)
    +             # if not full_name.startswith(import_module):
    -             if not full_name.startswith(import_module):
    +             #     raise ValueError(f"Inconsistent module: {full_name=}, {import_module=}")
    -                 raise ValueError(f"Inconsistent module: {full_name=}, {import_module=}")
    -             yield PyDefinition(full_name, import_module, start_pos, end_pos)
 <0>  
       | 
	===========unchanged ref 0===========
    at: coeditor.ctx_change_encoder
        PyFullName = NewType("PyFullName", str)
    
        PyDefinition(full_name: PyFullName, start_pos: tuple[int, int], end_pos: tuple[int, int])
    
    at: coeditor.ctx_change_encoder.PyDefinition
        full_name: PyFullName
    
        start_pos: tuple[int, int]
    
        end_pos: tuple[int, int]
    
    at: jedi.api.classes
        BaseName(inference_state, name)
    
    at: jedi.api.classes.BaseName
        _mapping = {
                'posixpath': 'os.path',
                'riscospath': 'os.path',
                'ntpath': 'os.path',
                'os2emxpath': 'os.path',
                'macpath': 'os.path',
                'genericpath': 'os.path',
                'posix': 'os',
                '_io': 'io',
                '_functools': 'functools',
                '_collections': 'collections',
                '_socket': 'socket',
                '_sqlite3': 'sqlite3',
            }
    
        _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
                'argparse._ActionsContainer': 'argparse.ArgumentParser',
            }.items())
    
        in_builtin_module()
    
        get_definition_start_position()
    
        get_definition_end_position()
    
    at: typing
        Iterable = _alias(collections.abc.Iterable, 1)
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass(unsafe_hash=True)
    class PyDefinition:
        """Note that the module and positions can be referring to either the import
        statement or the actual definition."""
    
        full_name: PyFullName
  -     import_module: ModuleName
        start_pos: tuple[int, int]
        end_pos: tuple[int, int]
    
===========changed ref 1===========
    # module: coeditor.code_change
    TProb = TypeVar("TProb", covariant=True)
  + TEnc = TypeVar("TEnc", covariant=True)
    
===========changed ref 2===========
    # module: coeditor.code_change
  + _non_scope_stmt_types = {
  +     "decorated",
  +     "async_stmt",
  +     ptree.Class.type,
  +     ptree.Function.type,
  + }
  - _non_scope_stmt_types = {"decorated", "async_stmt"}
    
===========changed ref 3===========
    # module: coeditor.code_change
    def _is_scope_statement(stmt: PyNode) -> bool:
  -     match stmt:
  -         case ptree.PythonNode(type=node_type) if node_type not in _non_scope_stmt_types:
  -             return stmt.children[0].type not in ptree._IMPORTS
  -         case ptree.Flow():
  -             return True
  -         case _:
  +     """Will only return False for functions, classes, and import statments"""
  +     if stmt.type in _non_scope_stmt_types:
  +         return False
  -             return False
  +     if stmt.type == "simple_stmt" and stmt.children[0].type in ptree._IMPORTS:
  +         return False
  +     return True
    
===========changed ref 4===========
    # module: coeditor.code_change
    @dataclass
    class StatementSpan:
        def __post_init__(self):
            assert self.statements
  -         # remove leading newlines
  -         n_leading_newlines = 0
  -         stmts = self.statements
  -         for s in stmts:
  -             if s.type == ptree.Newline.type:
  -                 n_leading_newlines += 1
  -             else:
  -                 break
  -         if n_leading_newlines:
  -             self.statements = stmts[n_leading_newlines:]
  - 
            origin_code = "".join(s.get_code() for s in self.statements)
  +         code, e_left, e_right = _strip_empty_lines(origin_code)
  -         code, _, e_right = _strip_empty_lines(origin_code)
  +         start = not_none(self.statements[0].get_start_pos_of_prefix())[0] + e_left
  -         start = self.statements[0].start_pos[0]
            end = self.statements[-1].end_pos[0] + 1 - e_right
    
            self.code: str = code + "\n"
            try:
                self.line_range: LineRange = line_range(start, end)
            except ValueError:
  +             print_err(f"{e_right=}, {start=}, {end=}")
  -             print_err(f"{origin_code=}, {e_right=}, {start=}, {end=}")
  +             print_err("Origin code:")
  +             print_err(origin_code)
  +             print_err("Stmts:")
  +             for s in self.statements:
  +                 print_err(s)
                raise
    
===========changed ref 5===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        def __post_init__(self):
            # compute header
            if isinstance(self.tree, ptree.Module):
                header_code = f"# module: {self.path.module}"
                header_line_range = line_range(0, 0, can_be_empty=True)
            else:
                h_start, h_end = 0, 0
                tree = self.tree
                to_visit = list[NodeOrLeaf]()
                parent = not_none(tree.parent)
                while parent.type in ("decorated", "async_funcdef"):
                    to_visit.insert(0, parent.children[0])
                    parent = not_none(parent.parent)
                to_visit.extend(tree.children)
                visited = list[NodeOrLeaf]()
                for c in to_visit:
                    if c.type == "suite":
                        break
                    visited.append(c)
                header_code = "".join(cast(str, c.get_code()) for c in visited)
  +             header_code, e_left, e_right = _strip_empty_lines(header_code)
  -             header_code, _, e_right = _strip_empty_lines(header_code)
  +             h_start = not_none(visited[0].get_start_pos_of_prefix())[0] + e_left
  -             h_start = visited[0].start_pos[0]
                h_end = visited[-1].end_pos[0] + 1 - e_right
                assert_eq(count_lines(header_code) == h_end - h_start)
                header_line_range = line_range(h_start, h_end)
                if self.spans and h_end > self.spans[0].line_range[0]:
                    raise ValueError(
                        f"Header covers the fisrt span: {self.path=}, {h_start=}, {h_end=} "
                        f"{self.spans[0].line_range=}"
                    )
    
            self.header_code</s>
===========changed ref 6===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        def __post_init__(self):
    # offset: 1
    <s>end=} "
                        f"{self.spans[0].line_range=}"
                    )
    
            self.header_code: str = header_code + "\n"
            self.header_line_range: LineRange = header_line_range
     | 
| 
	coeditor.ctx_change_encoder/CtxCodeChangeProblemGenerator.process_change | 
	Modified | 
	temp-1 | 
	bfd57146137ab2ed5c1a9cd99939b8c49797f0e4 | 
	Record src_info in edits. | 
	 <0>:<add>                 processed_cspans.append(span)
 | 
	      # module: coeditor.ctx_change_encoder
      class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
          def process_change(
              self,
              pchange: JProjectChange,
              mod2usages: Mapping[ModuleName, LineUsageAnalysis],
              module_order: Sequence[ModuleName],
          ) -> Iterable[CtxCodeChangeProblem]:
      <s>            for l in all_lines:
                      for pydef in line_usages.line2usages.get(l, set()):
                          if (
                              pydef.full_name.startswith(path.module)
                              and pydef.start_pos[0] in all_lines
                          ):
                              # skip self references
                              continue
                          used_defs.add(pydef)
      
                  # only keep unique changed spans
                  seen = set[tuple[ModuleName, LineRange]]()
                  for cspan in other_changes:
                      seen.add((cspan.path.module, cspan.line_range))
                  result = list[ChangedSpan]()
                  for used in used_defs:
                      for cspan in get_def_spans(used):
                          key = (cspan.path.module, cspan.line_range)
                          if key not in seen:
                              result.append(cspan)
                              seen.add(key)
                  return result
      
    +         processed_cspans = list[ChangedSpan]()
    -         sorted_cspans = list[ChangedSpan]()
              for m in module_order:
                  if (mchange := pchange.changed.get(m)) is None:
                      continue
                  for span in mchange.changed.values():
                      if span.change.as_char() == Modified.as_char():
    +                     relevant_changes = processed_cspans.copy()
    -                     relevant_changes = sorted_cspans.copy()
                          yield CtxCodeChangeProblem(
                              span,
                              relevant_changes=relevant_changes,
                              relevant_unchanged=get_relevant_unchanged(
                                  span, relevant_changes
                              ),
    +                         src_info={"commit": pchange.commit_info},
                          )
    -                 sorted_cspans.append(span)
 <0>  
       | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: -1
    <s>")
                    if len(stmts) > 1:
                        ellipsis = "    " * (len(ancestors) - 1) + "# ...\n"
                        body_code = ellipsis + body_code
                    h_end = f_scope.header_line_range[1]
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        line_range(h_end, h_end + len(body_code)),
                    )
                    cspans.append(cspan)
    
                # add statement spans
                for stmt_span in stmt_spans:
                    ancestors = stmt_span.scope.ancestors()
                    body_code = stmt_span.code
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        stmt_span.line_range,
                    )
                    cspans.append(cspan)
    
                cspan_cache[used] = cspans
                return cspans
    
            def get_relevant_unchanged(
                this_change: ChangedSpan, other_changes: Sequence[ChangedSpan]
            ):
                if isinstance(this_change.change, Added):
                    # nothing to analyze
                    return []
                path = this_change.path
                line_usages = mod2usages[path.module]
                used_defs = set[PyDefinition]()
                all_lines = set(range(*this_change.line_range))
                all_lines.update(range(*this_change.header_line_range))
                for l in all_lines:
                    for pydef in line_usages.line2usages.get(l, set()</s>
===========above chunk 1===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: -2
    <s>) -> list[ChangedSpan]:
                "Get the (pre-edit) spans for the given definition."
                if used.full_name in cspan_cache:
                    return cspan_cache[used.full_name]
                path = mod_hier.resolve_path(used.full_name.split("."))
                cspans = list[ChangedSpan]()
                if path is None:
                    cspan_cache[used] = cspans
                    return cspans
                jmod = before_mod_map[path.module]
                scope = jmod.as_scope
                elem = scope._search(path.path, used.start_pos[0])
                func_scopes = list[ChangeScope]()
                stmt_spans = list[StatementSpan]()
                match elem:
                    case ChangeScope(tree=ptree.Function()):
                        func_scopes.append(elem)
                    case ChangeScope(tree=ptree.Class()):
                        # add all attrs and methods
                        stmt_spans.extend(elem.spans)
                        func_scopes.extend(
                            s
                            for s in elem.subscopes.values()
                            if isinstance(s.tree, ptree.Function)
                        )
                    case StatementSpan():
                        stmt_spans.append(elem)
    
                # add collapsed functions
                for f_scope in func_scopes:
                    ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-1].get_code().strip("\n")
                    if len(stmts) > 1:
                        ellipsis = "    " * (len(ancestors) - 1) + "#</s>
===========above chunk 2===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: -3
            before_mod_map = {m.mname: m for m in pchange.all_modules.before}
            mod_hier = ModuleHierarchy.from_modules(before_mod_map)
            cspan_cache = dict[PyDefinition, list[ChangedSpan]]()
    
            def get_def_spans(used:</s>
===========unchanged ref 0===========
    at: coeditor.code_change
        LineRange = NewType("LineRange", tuple[int, int])
    
        line_range(start: int, end: int, can_be_empty: bool=False) -> LineRange
    
        ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None")
    
        StatementSpan(nth_in_parent: int, statements: Sequence[PyNode], scope: ChangeScope)
    
        ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
    
    at: coeditor.code_change.ChangeScope
        path: ProjectPath
    
        tree: ScopeTree
    
        spans: Sequence["StatementSpan"]
    
        subscopes: Mapping[str, Self]
    
        parent_scope: "ChangeScope | None"
    
        ancestors() -> list[Self]
    
    at: coeditor.code_change.ChangeScope.__post_init__
        self.header_line_range: LineRange = header_line_range
    
    at: coeditor.code_change.ChangedSpan
        change: Change[str]
    
        parent_scopes: Sequence[Change[ChangeScope]]
    
        line_range: LineRange
    
    at: coeditor.code_change.StatementSpan
        nth_in_parent: int
    
        statements: Sequence[PyNode]
    
        scope: ChangeScope
    
    at: coeditor.code_change.StatementSpan.__post_init__
        self.code: str = code + "\n"
    
        self.line_range: LineRange = line_range(start, end)
    
    at: coeditor.ctx_change_encoder
        CtxCodeChangeProblem(span: ChangedSpan, relevant_changes: list[ChangedSpan], relevant_unchanged: list[ChangedSpan], src_info: dict[str, Any])
    
        PyDefinition(full_name: PyFullName, start_pos: tuple[int, int], end_pos: tuple[int, int])
    
        LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
    
     | 
| 
	coeditor.ctx_change_encoder/TkCtxCodeChangeProblem.meta_data_lines | 
	Modified | 
	temp-1 | 
	bfd57146137ab2ed5c1a9cd99939b8c49797f0e4 | 
	Record src_info in edits. | 
	 <0>:<add>             f"src_info: {self.src_info}",
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class TkCtxCodeChangeProblem(TokenizedEdit):
          def meta_data_lines(self) -> list[str]:
              return [
                  f"path: {self.path}",
                  f"n_references: {len(self.references)}",
                  f"total_reference_tks: {sum(len(ref) for ref in self.references)}",
 <0>          ]
      
       | 
	===========unchanged ref 0===========
    at: coeditor.common
        TokenSeq = list[Token]
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeProblem
        input_tks: TokenSeq
    
        output_tks: TokenSeq
    
        path: ProjectPath
    
        change_type: Change[None]
    
        named_references: Sequence[tuple[str, TokenSeq]]
    
        src_info: dict[str, Any]
    
    at: coeditor.encoding.TokenizedEdit
        input_tks: TokenSeq
    
        output_tks: TokenSeq
    
        main_tks: TokenSeq
    
        path: ProjectPath
    
        change_type: Change[None]
    
        BAD_DELETE = encode_single_line("((bad delete))")
    
        all_ctxs(self) -> dict[str, TokenSeq]
    
        meta_data_lines(self) -> list[str]
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeProblem(TokenizedEdit):
        input_tks: TokenSeq
        output_tks: TokenSeq
        path: ProjectPath
        change_type: Change[None]
        # most relevant to least relevant
        named_references: Sequence[tuple[str, TokenSeq]]
  +     src_info: dict[str, Any]
    
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    # jedi.cache.clear_time_caches = lambda: None
    
    
    @dataclass
    class CtxCodeChangeProblem:
        span: ChangedSpan
        # most relevant to least relevant
        relevant_changes: list[ChangedSpan]
        # most relevant to least relevant
        relevant_unchanged: list[ChangedSpan]
  +     # some optional information about how the problem was generated
  +     src_info: dict[str, Any]
    
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
            before_mod_map = {m.mname: m for m in pchange.all_modules.before}
            mod_hier = ModuleHierarchy.from_modules(before_mod_map)
            cspan_cache = dict[PyDefinition, list[ChangedSpan]]()
    
            def get_def_spans(used: PyDefinition) -> list[ChangedSpan]:
                "Get the (pre-edit) spans for the given definition."
                if used.full_name in cspan_cache:
                    return cspan_cache[used.full_name]
                path = mod_hier.resolve_path(used.full_name.split("."))
                cspans = list[ChangedSpan]()
                if path is None:
                    cspan_cache[used] = cspans
                    return cspans
                jmod = before_mod_map[path.module]
                scope = jmod.as_scope
                elem = scope._search(path.path, used.start_pos[0])
                func_scopes = list[ChangeScope]()
                stmt_spans = list[StatementSpan]()
                match elem:
                    case ChangeScope(tree=ptree.Function()):
                        func_scopes.append(elem)
                    case ChangeScope(tree=ptree.Class()):
                        # add all attrs and methods
                        stmt_spans.extend(elem.spans)
                        func_scopes.extend(
                            s
                            for s in elem.subscopes.values()
                            if isinstance(s.tree, ptree.Function)
                        )
                    case StatementSpan():
                        stmt_spans.append(elem)
    
                # add collapsed functions
                for f_scope in func_scopes:
                    ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-</s>
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: 1
    <s> ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-1].get_code().strip("\n")
                    if len(stmts) > 1:
                        ellipsis = "    " * (len(ancestors) - 1) + "# ...\n"
                        body_code = ellipsis + body_code
                    h_end = f_scope.header_line_range[1]
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        line_range(h_end, h_end + len(body_code)),
                    )
                    cspans.append(cspan)
    
                # add statement spans
                for stmt_span in stmt_spans:
                    ancestors = stmt_span.scope.ancestors()
                    body_code = stmt_span.code
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        stmt_span.line_range,
                    )
                    cspans.append(cspan)
    
                cspan_cache[used] = cspans
                return cspans
    
            def get_relevant_unchanged(
                this_change: ChangedSpan, other_changes: Sequence[ChangedSpan]
            ):
                if isinstance(this_change.change, Added):
                    # nothing to analyze
                    return []
                path = this_change.path
                line_usages = mod2usages[path.module]
                used_defs = set[PyDefinition]</s>
===========changed ref 4===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: 2
    <s>            all_lines = set(range(*this_change.line_range))
                all_lines.update(range(*this_change.header_line_range))
                for l in all_lines:
                    for pydef in line_usages.line2usages.get(l, set()):
                        if (
                            pydef.full_name.startswith(path.module)
                            and pydef.start_pos[0] in all_lines
                        ):
                            # skip self references
                            continue
                        used_defs.add(pydef)
    
                # only keep unique changed spans
                seen = set[tuple[ModuleName, LineRange]]()
                for cspan in other_changes:
                    seen.add((cspan.path.module, cspan.line_range))
                result = list[ChangedSpan]()
                for used in used_defs:
                    for cspan in get_def_spans(used):
                        key = (cspan.path.module, cspan.line_range)
                        if key not in seen:
                            result.append(cspan)
                            seen.add(key)
                return result
    
  +         processed_cspans = list[ChangedSpan]()
  -         sorted_cspans = list[ChangedSpan]()
            for m in module_order:
                if (mchange := pchange.changed.get(m)) is None:
                    continue
                for span in mchange.changed.values():
                    if span.change.as_char() == Modified.as_char():
  +                     relevant_changes = processed_cspans.copy()
  -                     relevant_changes = sorted_cspans.copy()
                        yield CtxCodeChange</s> | 
| 
	coeditor.ctx_change_encoder/TkCtxCodeChangeEncoder.encode_problem | 
	Modified | 
	temp-1 | 
	bfd57146137ab2ed5c1a9cd99939b8c49797f0e4 | 
	Record src_info in edits. | 
	 <0>:<add>                 src_info=problem.src_info,
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class TkCtxCodeChangeEncoder:
          def encode_problem(
              self,
              problem: CtxCodeChangeProblem,
          ) -> Iterable[TkCtxCodeChangeProblem]:
      <s>
                  else:
                      below_chunks = break_into_chunks(
                          below_tks,
                          lambda i: self._encode_parent_scopes(span.parent_scopes, i + 1),
                          chunk_size=self.max_ref_tks,
                          overlap=self.ref_chunk_overlap,
                      )
                  above_chunks = [
                      (f"above chunk {i}", chunk) for i, chunk in enumerate(above_chunks)
                  ]
                  below_chunks = [
                      (f"below chunk {i}", chunk) for i, chunk in enumerate(below_chunks)
                  ]
                  return TkCtxCodeChangeProblem(
                      scope_tks + chunk_input,
                      chunk_output,
                      path=span.parent_scopes[-1].earlier().path,
                      change_type=span.change.map(lambda _: None),
                      named_references=above_chunks + below_chunks + named_references,
 <0>              )
      
              for l in range(len(tk_delta.deltas) + 1):
                  finished = l == len(tk_delta.deltas)
                  input_growth = len(origin_lines[l]) + 2 if l < len(origin_lines) else 1
                  if (
                      finished
                      or chunk_lines >= self.max_lines_to_edit
                      or len(chunk_input) + input_growth > input_limit
                  ):
                      if has_change(chunk_output):
                          yield get_problem(chunk_input, chunk_output)
      
                      if finished:
                          break
      
                      chunk_main_input = join_list(origin_lines[chunk_start_l:l], Newline_id)
                      chunk_main_delta = tk_delta.for_input_range((chunk_start_l, l))
                      chunk_main_change = chunk_main_delta.to_change_tks(chunk_main</s> | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
        def encode_problem(
            self,
            problem: CtxCodeChangeProblem,
        ) -> Iterable[TkCtxCodeChangeProblem]:
    # offset: -1
    <s>to_tk_delta()
            chunk_id = 0
            chunk_start_l = 0
            scope_tks = self._encode_parent_scopes(span.parent_scopes, 0)
            chunk_input = TokenSeq()
            input_limit = self.max_query_tks - len(scope_tks)
            chunk_lines = 0
            chunk_output = TokenSeq()
            prev_change_tks = TokenSeq()
    
            def get_problem(chunk_input, chunk_output):
                # try move some prev_change_tks into the input
                above_tks = prev_change_tks
                below_tks = join_list(origin_lines[l:], Newline_id)
                chunk_input, above_tks, below_tks = self._inline_some_context(
                    chunk_input, above_tks, below_tks, input_limit
                )
    
                # limit the input size if it's too long (can happen for later chunks)
                chunk_input = truncate_section(chunk_input, TruncateAt.Right, input_limit)
                chunk_output = truncate_output_tks(chunk_input, chunk_output)
                chunk_output = truncate_section(
                    chunk_output, TruncateAt.Right, self.max_output_tks, add_bos=False
                )
    
                above_chunks = break_into_chunks(
                    above_tks,
                    lambda i: self._encode_parent_scopes(span.parent_scopes, -1 - i),
                    chunk_size=self.max_ref_tks,
                    overlap=self.ref_chunk_overlap,
                    right_to_left=True,
                )
                if finished:
                    below_chunks = []
                else:
                    below_chunks = break_into_chunks(
                        below_tks,
                        lambda i: self._</s>
===========above chunk 1===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
        def encode_problem(
            self,
            problem: CtxCodeChangeProblem,
        ) -> Iterable[TkCtxCodeChangeProblem]:
    # offset: -2
            span = problem.span
            named_references = list[tuple[str, TokenSeq]]()
            # compute the references that are relevant to this span
            relevant_chunks = self._group_encode_changed_refs(problem.relevant_changes)
            for i, chunk in enumerate(relevant_chunks):
                named_references.append((f"changed ref {i}", chunk))
            relevant_chunks = self._group_encode_unchanged_refs(problem.relevant_unchanged)
            for i, chunk in enumerate(relevant_chunks):
                named_references.append((f"unchanged ref {i}", chunk))
    
            diffs = change_to_line_diffs(span.change)
            original, delta = line_diffs_to_original_delta(diffs)
            origin_lines = split_list(encode_basic(original), Newline_id)
            tk_delta = delta.to_tk_delta()
            chunk_id = 0
            chunk_start_l = 0
            scope_tks = self</s>
===========below chunk 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
        def encode_problem(
            self,
            problem: CtxCodeChangeProblem,
        ) -> Iterable[TkCtxCodeChangeProblem]:
    # offset: 1
    <s>start_l, l))
                    chunk_main_change = chunk_main_delta.to_change_tks(chunk_main_input)
                    prev_change_tks.extend(chunk_main_change)
                    prev_change_tks.append(Newline_id)
                    chunk_id += 1
                    chunk_input = TokenSeq()
                    chunk_lines = 0
                    chunk_output = TokenSeq()
                    chunk_start_l = l
    
                chunk_input.append(get_extra_id(chunk_lines))
                if l < len(origin_lines):
                    chunk_input.extend(origin_lines[l])
                    chunk_input.append(Newline_id)
                line_change = join_list(tk_delta.deltas[l], Newline_id)
                chunk_output.append(get_extra_id(chunk_lines))
                chunk_output.extend(line_change)
                if line_change and line_change[-1] != Del_id:
                    chunk_output.append(Newline_id)
                chunk_lines += 1
    
    
===========unchanged ref 0===========
    at: cachetools
        FIFOCache(maxsize, getsizeof=None)
    
    at: coeditor.code_change.ChangedSpan
        change: Change[str]
    
        parent_scopes: Sequence[Change[ChangeScope]]
    
    at: coeditor.common
        TokenSeq = list[Token]
    
        split_list(lst: list[T1], sep: T1) -> list[list[T1]]
    
        join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
    
    at: coeditor.ctx_change_encoder
        CtxCodeChangeProblem(span: ChangedSpan, relevant_changes: list[ChangedSpan], relevant_unchanged: list[ChangedSpan], src_info: dict[str, Any])
    
        TkCtxCodeChangeProblem(input_tks: TokenSeq, output_tks: TokenSeq, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TokenSeq]], src_info: dict[str, Any])
    
        _ObjId = NewType("_ObjId", int)
    
    at: coeditor.ctx_change_encoder.CtxCodeChangeProblem
        span: ChangedSpan
    
        relevant_changes: list[ChangedSpan]
    
        relevant_unchanged: list[ChangedSpan]
    
        src_info: dict[str, Any]
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder
        VERSION = "0.0"
    
        max_ref_tks: int = 512
    
        max_query_tks: int = 512
    
        max_output_tks: int = 256
    
        max_scope_tks: int = 128
    
        max_lines_to_edit: int = 20
    
        ref_chunk_overlap: int = 32
    
        max_chunks_per_ref: int = 4
    
        max_lines_per_function: int = 500
    
        skip_unchanged_problems: bool = True
    
        _encode_parent_scopes(scope_changes: Sequence[Change[ChangeScope]], offset: int) -> TokenSeq
    
     | 
| 
	coeditor.ctx_change_encoder/CtxCodeChangeProblemGenerator.process_change | 
	Modified | 
	temp-1 | 
	265b47b3f37487794989decf11d99183abf08228 | 
	Add parent scopes as relevant unchanged refs. | 
	 <0>:<add>                     relevant_changes = list(reversed(processed_cspans))
 | 
	      # module: coeditor.ctx_change_encoder
      class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
          def process_change(
              self,
              pchange: JProjectChange,
              mod2usages: Mapping[ModuleName, LineUsageAnalysis],
              module_order: Sequence[ModuleName],
          ) -> Iterable[CtxCodeChangeProblem]:
      <s>startswith(path.module)
                              and pydef.start_pos[0] in all_lines
                          ):
                              # skip self references
                              continue
    +                     if pydef not in used_defs:
    +                         used_defs.add(pydef)
    -                     used_defs.add(pydef)
    +                         sorted_defs.append(pydef)
      
    +             # return unique cspans
    -             # only keep unique changed spans
                  seen = set[tuple[ModuleName, LineRange]]()
    +             # consider other changes as seen
                  for cspan in other_changes:
                      seen.add((cspan.path.module, cspan.line_range))
                  result = list[ChangedSpan]()
    +             for used in sorted_defs:
    -             for used in used_defs:
                      for cspan in get_def_spans(used):
                          key = (cspan.path.module, cspan.line_range)
                          if key not in seen:
                              result.append(cspan)
                              seen.add(key)
                  return result
      
              processed_cspans = list[ChangedSpan]()
              for m in module_order:
                  if (mchange := pchange.changed.get(m)) is None:
                      continue
                  for span in mchange.changed.values():
                      if span.change.as_char() == Modified.as_char():
    +                     # latest changes are more relevant
    -                     relevant_changes = processed_cspans.copy()
 <0>                      yield CtxCodeChangeProblem(
                              span,
                              relevant_changes=relevant_changes,
                              relevant_unchanged=get_relevant_unchanged(
                                  span, relevant_changes
                              ),
                              src_info={"commit": pchange.commit_info},
                          )
                      processed_cspans.append(span)
      
       | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: -1
    <s>(cspan)
    
                # add statement spans
                for stmt_span in stmt_spans:
                    ancestors = stmt_span.scope.ancestors()
                    body_code = stmt_span.code
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        stmt_span.line_range,
                    )
                    cspans.append(cspan)
    
                cspan_cache[used] = cspans
                return cspans
    
            def get_relevant_unchanged(
                this_change: ChangedSpan, other_changes: Sequence[ChangedSpan]
            ):
                if isinstance(this_change.change, Added):
                    # nothing to analyze
                    return []
                path = this_change.path
                line_usages = mod2usages[path.module]
  +             # parent defs are also considered as used
  +             parent_defs = [
  +                 PyDefinition.from_scope(c.earlier()) for c in this_change.parent_scopes
  +             ]
  +             # immediate parents are more relevant
  +             sorted_defs = list(reversed(parent_defs))
  +             used_defs = set(sorted_defs)
  -             used_defs = set[PyDefinition]()
                all_lines = set(range(*this_change.line_range))
                all_lines.update(range(*this_change.header_line_range))
                for l in all_lines:
                    for pydef in line_usages.line2usages.get(l, set()):
                        if (
                            pydef.full_name.startswith(path.module)
                            and pydef.start_pos[0] in all_lines
                        ):
                            #</s>
===========above chunk 1===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: -2
    <s>as_scope
                elem = scope._search(path.path, used.start_pos[0])
                func_scopes = list[ChangeScope]()
                stmt_spans = list[StatementSpan]()
                match elem:
                    case ChangeScope(tree=ptree.Function()):
                        func_scopes.append(elem)
                    case ChangeScope(tree=ptree.Class()):
                        # add all attrs and methods
                        stmt_spans.extend(elem.spans)
                        func_scopes.extend(
                            s
                            for s in elem.subscopes.values()
                            if isinstance(s.tree, ptree.Function)
                        )
                    case StatementSpan():
                        stmt_spans.append(elem)
    
                # add collapsed functions
                for f_scope in func_scopes:
                    ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-1].get_code().strip("\n")
                    if len(stmts) > 1:
                        ellipsis = "    " * (len(ancestors) - 1) + "# ...\n"
                        body_code = ellipsis + body_code
                    h_end = f_scope.header_line_range[1]
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        line_range(h_end, h_end + len(body_code)),
                    )
                    cspans.append(cspan)
    
                # add statement spans
                for stmt_span in stmt_spans:
                    ancestors = stmt_span.</s>
===========above chunk 2===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: -3
            before_mod_map = {m.mname: m for m in pchange.all_modules.before}
            mod_hier = ModuleHierarchy.from_modules(before_mod_map)
            cspan_cache = dict[PyDefinition, list[ChangedSpan]]()
    
            def get_def_spans(used: PyDefinition) -> list[ChangedSpan]:
                "Get the (pre-edit) spans for the given definition."
                if used.full_name in cspan_cache:
                    return cspan_cache[used.full_name]
                path = mod_hier.resolve_path(used.full_name.split("."))
                cspans = list[ChangedSpan]()
                if path is None:
                    cspan_cache[used] = cspans
                    return cspans
                jmod = before_mod_map[path.module]
                scope = j</s>
===========unchanged ref 0===========
    at: coeditor.code_change
        LineRange = NewType("LineRange", tuple[int, int])
    
        line_range(start: int, end: int, can_be_empty: bool=False) -> LineRange
    
        ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None")
    
        StatementSpan(nth_in_parent: int, statements: Sequence[PyNode], scope: ChangeScope)
    
        ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
    
    at: coeditor.code_change.ChangeScope
        path: ProjectPath
    
        tree: ScopeTree
    
        spans: Sequence["StatementSpan"]
    
        subscopes: Mapping[str, Self]
    
        parent_scope: "ChangeScope | None"
    
        ancestors() -> list[Self]
    
    at: coeditor.code_change.ChangeScope.__post_init__
        self.header_line_range: LineRange = header_line_range
    
    at: coeditor.code_change.ChangedSpan
        change: Change[str]
    
        parent_scopes: Sequence[Change[ChangeScope]]
    
        line_range: LineRange
    
    at: coeditor.code_change.StatementSpan
        nth_in_parent: int
    
        statements: Sequence[PyNode]
    
        scope: ChangeScope
    
    at: coeditor.code_change.StatementSpan.__post_init__
        self.code: str = code + "\n"
    
        self.line_range: LineRange = line_range(start, end)
    
    at: coeditor.ctx_change_encoder
        CtxCodeChangeProblem(span: ChangedSpan, relevant_changes: list[ChangedSpan], relevant_unchanged: list[ChangedSpan], src_info: dict[str, Any])
    
        PyDefinition(full_name: PyFullName, start_pos: tuple[int, int], end_pos: tuple[int, int])
    
        LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
    
     | 
| 
	coeditor.ctx_change_encoder/TkCtxCodeChangeEncoder._group_encode_changed_refs | 
	Modified | 
	temp-1 | 
	265b47b3f37487794989decf11d99183abf08228 | 
	Add parent scopes as relevant unchanged refs. | 
	 <0>:<add>                 file_tks.append(Newline_id)
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class TkCtxCodeChangeEncoder:
          def _group_encode_changed_refs(
              self, changes: Sequence[ChangedSpan]
          ) -> Sequence[TokenSeq]:
              module2changes = groupby(changes, lambda c: c.path.module)
              all_chunks = list[TokenSeq]()
              for change_group in module2changes.values():
                  change_group.sort(key=lambda c: c.line_range[0])
                  file_tks = TokenSeq()
                  # we'll add module as the chunk header, so we start within the module
                  last_scope = change_group[0].parent_scopes[:1]
                  for c in change_group:
                      scope_diff = []
                      for i, s in enumerate(c.parent_scopes):
                          if (
                              i >= len(last_scope)
                              or s.earlier().path != last_scope[i].earlier().path
                          ):
                              scope_diff.append(s)
                      if scope_diff:
                          header_tks = self._encode_parent_scopes(scope_diff, 0)
                          file_tks.extend(header_tks)
                      body_tks = self._encode_change(c.change)
                      file_tks.extend(body_tks)
                      file_tks.append(Newline_id)
 <0>                  last_scope = c.parent_scopes
      
                  mod_change = change_group[0].parent_scopes[:1]
                  mod_chunks = break_into_chunks(
                      file_tks,
                      lambda i: self._encode_parent_scopes(mod_change, i),
                      self.max_ref_tks,
                      overlap=self.ref_chunk_overlap,
                      max_return_chunks=self.max_chunks_per_ref,
                  )
                  all_chunks.extend(mod_chunks)
              return all_chunks
      
       | 
	===========unchanged ref 0===========
    at: coeditor._utils
        groupby(iterable: Iterable[T1], keyfunc: Callable[[T1], T2]) -> dict[T2, list[T1]]
    
    at: coeditor.code_change
        ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
    
    at: coeditor.code_change.ChangedSpan
        parent_scopes: Sequence[Change[ChangeScope]]
    
    at: coeditor.common
        TokenSeq = list[Token]
    
    at: coeditor.ctx_change_encoder
        _ObjId = NewType("_ObjId", int)
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder
        VERSION = "0.0"
    
        max_ref_tks: int = 512
    
        max_query_tks: int = 512
    
        max_output_tks: int = 256
    
        max_scope_tks: int = 128
    
        max_lines_to_edit: int = 20
    
        ref_chunk_overlap: int = 32
    
        max_chunks_per_ref: int = 4
    
        max_lines_per_function: int = 500
    
        skip_unchanged_problems: bool = True
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder.__post_init__
        self._id_cache = FIFOCache[_ObjId, TokenSeq](maxsize=1000)
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder._inline_some_context
        truncated_above, truncated_below = truncate_sections(
                        extra_space,
                        (above_ctx, TruncateAt.Left),
                        (below_ctx, TruncateAt.Right),
                        add_bos=True,
                    )
    
        truncated_above, truncated_below = truncate_sections(
                        extra_space,
                        (above_ctx, TruncateAt.Left),
                        (below_ctx, TruncateAt.Right),
                        add_bos=True,
                    )
    
    
===========unchanged ref 1===========
        above_ctx = truncate_section(
                            above_ctx, TruncateAt.Right, above_left + self.ref_chunk_overlap
                        )
        above_ctx = TokenSeq()
    
    at: coeditor.encoding
        change_to_tokens(change: Change[str]) -> TokenSeq
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass(unsafe_hash=True)
    class PyDefinition:
  +     @staticmethod
  +     def from_scope(scope: ChangeScope) -> "PyDefinition":
  +         path = scope.path
  +         full_name = PyFullName(f"{path.module}.{path.path}")
  +         start_pos = scope.header_line_range[0], 0
  +         end_pos = scope.header_line_range[1], 0
  +         return PyDefinition(full_name, start_pos, end_pos)
  + 
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
            before_mod_map = {m.mname: m for m in pchange.all_modules.before}
            mod_hier = ModuleHierarchy.from_modules(before_mod_map)
            cspan_cache = dict[PyDefinition, list[ChangedSpan]]()
    
            def get_def_spans(used: PyDefinition) -> list[ChangedSpan]:
                "Get the (pre-edit) spans for the given definition."
                if used.full_name in cspan_cache:
                    return cspan_cache[used.full_name]
                path = mod_hier.resolve_path(used.full_name.split("."))
                cspans = list[ChangedSpan]()
                if path is None:
                    cspan_cache[used] = cspans
                    return cspans
                jmod = before_mod_map[path.module]
                scope = jmod.as_scope
                elem = scope._search(path.path, used.start_pos[0])
                func_scopes = list[ChangeScope]()
                stmt_spans = list[StatementSpan]()
                match elem:
                    case ChangeScope(tree=ptree.Function()):
                        func_scopes.append(elem)
                    case ChangeScope(tree=ptree.Class()):
                        # add all attrs and methods
                        stmt_spans.extend(elem.spans)
                        func_scopes.extend(
                            s
                            for s in elem.subscopes.values()
                            if isinstance(s.tree, ptree.Function)
                        )
                    case StatementSpan():
                        stmt_spans.append(elem)
    
                # add collapsed functions
                for f_scope in func_scopes:
                    ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-</s>
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: 1
    <s> ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-1].get_code().strip("\n")
                    if len(stmts) > 1:
                        ellipsis = "    " * (len(ancestors) - 1) + "# ...\n"
                        body_code = ellipsis + body_code
                    h_end = f_scope.header_line_range[1]
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        line_range(h_end, h_end + len(body_code)),
                    )
                    cspans.append(cspan)
    
                # add statement spans
                for stmt_span in stmt_spans:
                    ancestors = stmt_span.scope.ancestors()
                    body_code = stmt_span.code
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        stmt_span.line_range,
                    )
                    cspans.append(cspan)
    
                cspan_cache[used] = cspans
                return cspans
    
            def get_relevant_unchanged(
                this_change: ChangedSpan, other_changes: Sequence[ChangedSpan]
            ):
                if isinstance(this_change.change, Added):
                    # nothing to analyze
                    return []
                path = this_change.path
                line_usages = mod2usages[path.module]
  +             # parent defs are also considered as used</s> | 
| 
	coeditor.ctx_change_encoder/CtxCodeChangeProblemGenerator.process_change | 
	Modified | 
	temp-1 | 
	f9c5006c3d426a199c472d33cc31b910793b0357 | 
	- Fix header trailing newline encoding. - Fix change hiding. | 
	 <0>:<add>             for cspan in (this_change, *other_changes):
 | 
	      # module: coeditor.ctx_change_encoder
      class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
          def process_change(
              self,
              pchange: JProjectChange,
              mod2usages: Mapping[ModuleName, LineUsageAnalysis],
              module_order: Sequence[ModuleName],
          ) -> Iterable[CtxCodeChangeProblem]:
      <s>(range(*this_change.line_range))
                  all_lines.update(range(*this_change.header_line_range))
                  for l in all_lines:
                      for pydef in line_usages.line2usages.get(l, set()):
                          if (
                              pydef.full_name.startswith(path.module)
                              and pydef.start_pos[0] in all_lines
                          ):
                              # skip self references
                              continue
                          if pydef not in used_defs:
                              used_defs.add(pydef)
                              sorted_defs.append(pydef)
      
                  # return unique cspans
                  seen = set[tuple[ModuleName, LineRange]]()
    +             # we don't need to show the changed parts again
    -             # consider other changes as seen
    -             for cspan in other_changes:
 <0>                  seen.add((cspan.path.module, cspan.line_range))
                  result = list[ChangedSpan]()
                  for used in sorted_defs:
                      for cspan in get_def_spans(used):
                          key = (cspan.path.module, cspan.line_range)
                          if key not in seen:
                              result.append(cspan)
                              seen.add(key)
                  return result
      
              processed_cspans = list[ChangedSpan]()
              for m in module_order:
                  if (mchange := pchange.changed.get(m)) is None:
                      continue
                  for span in mchange.changed.values():
                      if span.change.as_char() == Modified.as_char():
                          # latest changes are more relevant
                          relevant_changes = list(reversed(processed_cspans))
                          yield CtxCodeChangeProblem(
                              span,
                              relevant_changes=relevant_changes,</s> | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: -1
    <s>                    body_code = ellipsis + body_code
                    h_end = f_scope.header_line_range[1]
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        line_range(h_end, h_end + len(body_code)),
                    )
                    cspans.append(cspan)
    
                # add statement spans
                for stmt_span in stmt_spans:
                    ancestors = stmt_span.scope.ancestors()
                    body_code = stmt_span.code
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        stmt_span.line_range,
                    )
                    cspans.append(cspan)
    
                cspan_cache[used] = cspans
                return cspans
    
            def get_relevant_unchanged(
                this_change: ChangedSpan, other_changes: Sequence[ChangedSpan]
            ):
                if isinstance(this_change.change, Added):
                    # nothing to analyze
                    return []
                path = this_change.path
                line_usages = mod2usages[path.module]
                # parent defs are also considered as used
                parent_defs = [
                    PyDefinition.from_scope(c.earlier()) for c in this_change.parent_scopes
                ]
                # immediate parents are more relevant
                sorted_defs = list(reversed(parent_defs))
                used_defs = set(sorted_defs)
                all_lines = set(range(*this_change.line_range))
                all_lines.update(range(*this_change.header_line_range</s>
===========above chunk 1===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: -2
    <s>cache:
                    return cspan_cache[used.full_name]
                path = mod_hier.resolve_path(used.full_name.split("."))
                cspans = list[ChangedSpan]()
                if path is None:
                    cspan_cache[used] = cspans
                    return cspans
                jmod = before_mod_map[path.module]
                scope = jmod.as_scope
                elem = scope._search(path.path, used.start_pos[0])
                func_scopes = list[ChangeScope]()
                stmt_spans = list[StatementSpan]()
                match elem:
                    case ChangeScope(tree=ptree.Function()):
                        func_scopes.append(elem)
                    case ChangeScope(tree=ptree.Class()):
                        # add all attrs and methods
                        stmt_spans.extend(elem.spans)
                        func_scopes.extend(
                            s
                            for s in elem.subscopes.values()
                            if isinstance(s.tree, ptree.Function)
                        )
                    case StatementSpan():
                        stmt_spans.append(elem)
    
                # add collapsed functions
                for f_scope in func_scopes:
                    ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-1].get_code().strip("\n")
                    if len(stmts) > 1:
                        ellipsis = "    " * (len(ancestors) - 1) + "# ...\n"
                        body_code = ellipsis + body_code
                    h_end = f_scope.header_line_range[1]
                   </s>
===========above chunk 2===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: -3
            before_mod_map = {m.mname: m for m in pchange.all_modules.before}
            mod_hier = ModuleHierarchy.from_modules(before_mod_map)
            cspan_cache = dict[PyDefinition, list[ChangedSpan]]()
    
            def get_def_spans(used: PyDefinition) -> list[ChangedSpan]:
                "Get the (pre-edit) spans for the given definition."
                if used.full_name in c</s>
===========below chunk 0===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: 1
    <s>reversed(processed_cspans))
                        yield CtxCodeChangeProblem(
                            span,
                            relevant_changes=relevant_changes,
                            relevant_unchanged=get_relevant_unchanged(
                                span, relevant_changes
                            ),
                            src_info={"commit": pchange.commit_info},
                        )
                    processed_cspans.append(span)
    
    
===========unchanged ref 0===========
    at: coeditor.code_change
        LineRange = NewType("LineRange", tuple[int, int])
    
        line_range(start: int, end: int, can_be_empty: bool=False) -> LineRange
    
        ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None")
    
        StatementSpan(nth_in_parent: int, statements: Sequence[PyNode], scope: ChangeScope)
    
        ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
    
    at: coeditor.code_change.ChangeScope
        path: ProjectPath
    
        tree: ScopeTree
    
        spans: Sequence["StatementSpan"]
    
        subscopes: Mapping[str, Self]
    
        parent_scope: "ChangeScope | None"
    
        ancestors() -> list[Self]
    
    at: coeditor.code_change.ChangeScope.__post_init__
        self.header_line_range: LineRange = header_line_range
    
    at: coeditor.code_change.ChangedSpan
        change: Change[str]
    
        parent_scopes: Sequence[Change[ChangeScope]]
    
        line_range: LineRange
    
    at: coeditor.code_change.StatementSpan
        nth_in_parent: int
    
        statements: Sequence[PyNode]
    
        scope: ChangeScope
    
    at: coeditor.code_change.StatementSpan.__post_init__
        self.code: str = code + "\n"
    
        self.line_range: LineRange = line_range(start, end)
    
    at: coeditor.ctx_change_encoder
        CtxCodeChangeProblem(span: ChangedSpan, relevant_changes: list[ChangedSpan], relevant_unchanged: list[ChangedSpan], src_info: dict[str, Any])
    
        PyDefinition(full_name: PyFullName, start_pos: tuple[int, int], end_pos: tuple[int, int])
    
        LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
    
     | 
| 
	coeditor.ctx_change_encoder/TkCtxCodeChangeEncoder._encode_scope_change | 
	Modified | 
	temp-1 | 
	f9c5006c3d426a199c472d33cc31b910793b0357 | 
	- Fix header trailing newline encoding. - Fix change hiding. | 
	 <0>:<add>         hchange = c.map(lambda s: s.header_code.strip("\n"))
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class TkCtxCodeChangeEncoder:
          def _encode_scope_change(self, c: Change[ChangeScope]) -> TokenSeq:
              if (key := _ObjId(id(c))) in self._scope_cache:
                  return self._scope_cache[key]
    -         hchange = c.map(lambda s: s.header_code)
 <0>          tks = truncate_section(
                  change_to_tokens(hchange), TruncateAt.Left, self.max_scope_tks
              )
              self._scope_cache[key] = tks
              return tks
      
       | 
	===========unchanged ref 0===========
    at: coeditor.code_change
        ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None")
    
    at: coeditor.common
        TokenSeq = list[Token]
    
    at: coeditor.ctx_change_encoder
        _ObjId = NewType("_ObjId", int)
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder
        VERSION = "0.0"
    
        max_ref_tks: int = 512
    
        max_query_tks: int = 512
    
        max_output_tks: int = 256
    
        max_scope_tks: int = 128
    
        max_lines_to_edit: int = 20
    
        ref_chunk_overlap: int = 32
    
        max_chunks_per_ref: int = 4
    
        max_lines_per_function: int = 500
    
        skip_unchanged_problems: bool = True
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder.__post_init__
        self._scope_cache = FIFOCache[_ObjId, TokenSeq](maxsize=1000)
    
    at: coeditor.encoding
        change_to_tokens(change: Change[str]) -> TokenSeq
    
        TruncateAt()
    
        truncate_section(sec: TokenSeq, direction: TruncateAt.Value, limit: int, add_bos: bool=True, inplace: bool=False) -> TokenSeq
    
    at: coeditor.encoding.TruncateAt
        Value = int
    
        Left = 0
    
        Right = 1
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
            before_mod_map = {m.mname: m for m in pchange.all_modules.before}
            mod_hier = ModuleHierarchy.from_modules(before_mod_map)
            cspan_cache = dict[PyDefinition, list[ChangedSpan]]()
    
            def get_def_spans(used: PyDefinition) -> list[ChangedSpan]:
                "Get the (pre-edit) spans for the given definition."
                if used.full_name in cspan_cache:
                    return cspan_cache[used.full_name]
                path = mod_hier.resolve_path(used.full_name.split("."))
                cspans = list[ChangedSpan]()
                if path is None:
                    cspan_cache[used] = cspans
                    return cspans
                jmod = before_mod_map[path.module]
                scope = jmod.as_scope
                elem = scope._search(path.path, used.start_pos[0])
                func_scopes = list[ChangeScope]()
                stmt_spans = list[StatementSpan]()
                match elem:
                    case ChangeScope(tree=ptree.Function()):
                        func_scopes.append(elem)
                    case ChangeScope(tree=ptree.Class()):
                        # add all attrs and methods
                        stmt_spans.extend(elem.spans)
                        func_scopes.extend(
                            s
                            for s in elem.subscopes.values()
                            if isinstance(s.tree, ptree.Function)
                        )
                    case StatementSpan():
                        stmt_spans.append(elem)
    
                # add collapsed functions
                for f_scope in func_scopes:
                    ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-</s>
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: 1
    <s> ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-1].get_code().strip("\n")
                    if len(stmts) > 1:
                        ellipsis = "    " * (len(ancestors) - 1) + "# ...\n"
                        body_code = ellipsis + body_code
                    h_end = f_scope.header_line_range[1]
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        line_range(h_end, h_end + len(body_code)),
                    )
                    cspans.append(cspan)
    
                # add statement spans
                for stmt_span in stmt_spans:
                    ancestors = stmt_span.scope.ancestors()
                    body_code = stmt_span.code
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        stmt_span.line_range,
                    )
                    cspans.append(cspan)
    
                cspan_cache[used] = cspans
                return cspans
    
            def get_relevant_unchanged(
                this_change: ChangedSpan, other_changes: Sequence[ChangedSpan]
            ):
                if isinstance(this_change.change, Added):
                    # nothing to analyze
                    return []
                path = this_change.path
                line_usages = mod2usages[path.module]
                # parent defs are also considered as used
    </s>
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: 2
    <s>_defs = [
                    PyDefinition.from_scope(c.earlier()) for c in this_change.parent_scopes
                ]
                # immediate parents are more relevant
                sorted_defs = list(reversed(parent_defs))
                used_defs = set(sorted_defs)
                all_lines = set(range(*this_change.line_range))
                all_lines.update(range(*this_change.header_line_range))
                for l in all_lines:
                    for pydef in line_usages.line2usages.get(l, set()):
                        if (
                            pydef.full_name.startswith(path.module)
                            and pydef.start_pos[0] in all_lines
                        ):
                            # skip self references
                            continue
                        if pydef not in used_defs:
                            used_defs.add(pydef)
                            sorted_defs.append(pydef)
    
                # return unique cspans
                seen = set[tuple[ModuleName, LineRange]]()
  +             # we don't need to show the changed parts again
  -             # consider other changes as seen
  +             for cspan in (this_change, *other_changes):
  -             for cspan in other_changes:
                    seen.add((cspan.path.module, cspan.line_range))
                result = list[ChangedSpan]()
                for used in sorted_defs:
                    for cspan in get_def_spans(used):
                        key = (cspan.path.module, cspan.line_range)
                        if key not in seen:
                            result.append(cspan)
                            seen.add(key</s> | 
| 
	coeditor.ctx_change_encoder/TkCtxCodeChangeEncoder._encode_parent_scopes | 
	Modified | 
	temp-1 | 
	f9c5006c3d426a199c472d33cc31b910793b0357 | 
	- Fix header trailing newline encoding. - Fix change hiding. | 
	 <0>:<add>             scope_tks.append(Newline_id)
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class TkCtxCodeChangeEncoder:
          def _encode_parent_scopes(
              self, scope_changes: Sequence[Change[ChangeScope]], offset: int
          ) -> TokenSeq:
    +         scope_tks = join_list(
    +             (self._encode_scope_change(c) for c in scope_changes), Newline_id
    -         scope_tks = join_list((self._encode_scope_change(c) for c in scope_changes))
    +         )
              if offset != 0:
    +             scope_tks.extend(encode_basic(f"\n# offset: {offset}\n"))
    -             scope_tks.extend(encode_basic(f"# offset: {offset}\n"))
    +         else:
 <0>          scope_tks = truncate_section(scope_tks, TruncateAt.Left, self.max_scope_tks)
              return scope_tks
      
       | 
	===========unchanged ref 0===========
    at: coeditor.code_change
        ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None")
    
    at: coeditor.common
        TokenSeq = list[Token]
    
        join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder
        _encode_scope_change(self, c: Change[ChangeScope]) -> TokenSeq
        _encode_scope_change(c: Change[ChangeScope]) -> TokenSeq
    
    at: coeditor.encoding
        Newline_id = get_tk_id("\n")
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
        def _encode_scope_change(self, c: Change[ChangeScope]) -> TokenSeq:
            if (key := _ObjId(id(c))) in self._scope_cache:
                return self._scope_cache[key]
  +         hchange = c.map(lambda s: s.header_code.strip("\n"))
  -         hchange = c.map(lambda s: s.header_code)
            tks = truncate_section(
                change_to_tokens(hchange), TruncateAt.Left, self.max_scope_tks
            )
            self._scope_cache[key] = tks
            return tks
    
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
            before_mod_map = {m.mname: m for m in pchange.all_modules.before}
            mod_hier = ModuleHierarchy.from_modules(before_mod_map)
            cspan_cache = dict[PyDefinition, list[ChangedSpan]]()
    
            def get_def_spans(used: PyDefinition) -> list[ChangedSpan]:
                "Get the (pre-edit) spans for the given definition."
                if used.full_name in cspan_cache:
                    return cspan_cache[used.full_name]
                path = mod_hier.resolve_path(used.full_name.split("."))
                cspans = list[ChangedSpan]()
                if path is None:
                    cspan_cache[used] = cspans
                    return cspans
                jmod = before_mod_map[path.module]
                scope = jmod.as_scope
                elem = scope._search(path.path, used.start_pos[0])
                func_scopes = list[ChangeScope]()
                stmt_spans = list[StatementSpan]()
                match elem:
                    case ChangeScope(tree=ptree.Function()):
                        func_scopes.append(elem)
                    case ChangeScope(tree=ptree.Class()):
                        # add all attrs and methods
                        stmt_spans.extend(elem.spans)
                        func_scopes.extend(
                            s
                            for s in elem.subscopes.values()
                            if isinstance(s.tree, ptree.Function)
                        )
                    case StatementSpan():
                        stmt_spans.append(elem)
    
                # add collapsed functions
                for f_scope in func_scopes:
                    ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-</s>
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: 1
    <s> ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-1].get_code().strip("\n")
                    if len(stmts) > 1:
                        ellipsis = "    " * (len(ancestors) - 1) + "# ...\n"
                        body_code = ellipsis + body_code
                    h_end = f_scope.header_line_range[1]
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        line_range(h_end, h_end + len(body_code)),
                    )
                    cspans.append(cspan)
    
                # add statement spans
                for stmt_span in stmt_spans:
                    ancestors = stmt_span.scope.ancestors()
                    body_code = stmt_span.code
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        stmt_span.line_range,
                    )
                    cspans.append(cspan)
    
                cspan_cache[used] = cspans
                return cspans
    
            def get_relevant_unchanged(
                this_change: ChangedSpan, other_changes: Sequence[ChangedSpan]
            ):
                if isinstance(this_change.change, Added):
                    # nothing to analyze
                    return []
                path = this_change.path
                line_usages = mod2usages[path.module]
                # parent defs are also considered as used
    </s>
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: 2
    <s>_defs = [
                    PyDefinition.from_scope(c.earlier()) for c in this_change.parent_scopes
                ]
                # immediate parents are more relevant
                sorted_defs = list(reversed(parent_defs))
                used_defs = set(sorted_defs)
                all_lines = set(range(*this_change.line_range))
                all_lines.update(range(*this_change.header_line_range))
                for l in all_lines:
                    for pydef in line_usages.line2usages.get(l, set()):
                        if (
                            pydef.full_name.startswith(path.module)
                            and pydef.start_pos[0] in all_lines
                        ):
                            # skip self references
                            continue
                        if pydef not in used_defs:
                            used_defs.add(pydef)
                            sorted_defs.append(pydef)
    
                # return unique cspans
                seen = set[tuple[ModuleName, LineRange]]()
  +             # we don't need to show the changed parts again
  -             # consider other changes as seen
  +             for cspan in (this_change, *other_changes):
  -             for cspan in other_changes:
                    seen.add((cspan.path.module, cspan.line_range))
                result = list[ChangedSpan]()
                for used in sorted_defs:
                    for cspan in get_def_spans(used):
                        key = (cspan.path.module, cspan.line_range)
                        if key not in seen:
                            result.append(cspan)
                            seen.add(key</s> | 
| 
	coeditor.ctx_change_encoder/TkCtxCodeChangeEncoder._encode_change | 
	Modified | 
	temp-1 | 
	f9c5006c3d426a199c472d33cc31b910793b0357 | 
	- Fix header trailing newline encoding. - Fix change hiding. | 
	 <0>:<add>         change = change.map(lambda s: s.strip("\n"))
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class TkCtxCodeChangeEncoder:
          def _encode_change(self, change: Change[str]) -> TokenSeq:
              if (key := _ObjId(id(change))) in self._id_cache:
                  return self._id_cache[key]
 <0>          change_tks = change_to_tokens(change)
              self._id_cache[key] = change_tks
              return change_tks
      
       | 
	===========unchanged ref 0===========
    at: coeditor.common
        TokenSeq = list[Token]
    
    at: coeditor.ctx_change_encoder
        _ObjId = NewType("_ObjId", int)
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder.__post_init__
        self._id_cache = FIFOCache[_ObjId, TokenSeq](maxsize=1000)
    
    at: coeditor.ctx_change_encoder.TkCtxCodeChangeEncoder._inline_some_context
        truncated_above, truncated_below = truncate_sections(
                        extra_space,
                        (above_ctx, TruncateAt.Left),
                        (below_ctx, TruncateAt.Right),
                        add_bos=True,
                    )
    
        truncated_above, truncated_below = truncate_sections(
                        extra_space,
                        (above_ctx, TruncateAt.Left),
                        (below_ctx, TruncateAt.Right),
                        add_bos=True,
                    )
    
        above_ctx = truncate_section(
                            above_ctx, TruncateAt.Right, above_left + self.ref_chunk_overlap
                        )
        above_ctx = TokenSeq()
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
        def _encode_scope_change(self, c: Change[ChangeScope]) -> TokenSeq:
            if (key := _ObjId(id(c))) in self._scope_cache:
                return self._scope_cache[key]
  +         hchange = c.map(lambda s: s.header_code.strip("\n"))
  -         hchange = c.map(lambda s: s.header_code)
            tks = truncate_section(
                change_to_tokens(hchange), TruncateAt.Left, self.max_scope_tks
            )
            self._scope_cache[key] = tks
            return tks
    
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class TkCtxCodeChangeEncoder:
        def _encode_parent_scopes(
            self, scope_changes: Sequence[Change[ChangeScope]], offset: int
        ) -> TokenSeq:
  +         scope_tks = join_list(
  +             (self._encode_scope_change(c) for c in scope_changes), Newline_id
  -         scope_tks = join_list((self._encode_scope_change(c) for c in scope_changes))
  +         )
            if offset != 0:
  +             scope_tks.extend(encode_basic(f"\n# offset: {offset}\n"))
  -             scope_tks.extend(encode_basic(f"# offset: {offset}\n"))
  +         else:
  +             scope_tks.append(Newline_id)
            scope_tks = truncate_section(scope_tks, TruncateAt.Left, self.max_scope_tks)
            return scope_tks
    
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
            before_mod_map = {m.mname: m for m in pchange.all_modules.before}
            mod_hier = ModuleHierarchy.from_modules(before_mod_map)
            cspan_cache = dict[PyDefinition, list[ChangedSpan]]()
    
            def get_def_spans(used: PyDefinition) -> list[ChangedSpan]:
                "Get the (pre-edit) spans for the given definition."
                if used.full_name in cspan_cache:
                    return cspan_cache[used.full_name]
                path = mod_hier.resolve_path(used.full_name.split("."))
                cspans = list[ChangedSpan]()
                if path is None:
                    cspan_cache[used] = cspans
                    return cspans
                jmod = before_mod_map[path.module]
                scope = jmod.as_scope
                elem = scope._search(path.path, used.start_pos[0])
                func_scopes = list[ChangeScope]()
                stmt_spans = list[StatementSpan]()
                match elem:
                    case ChangeScope(tree=ptree.Function()):
                        func_scopes.append(elem)
                    case ChangeScope(tree=ptree.Class()):
                        # add all attrs and methods
                        stmt_spans.extend(elem.spans)
                        func_scopes.extend(
                            s
                            for s in elem.subscopes.values()
                            if isinstance(s.tree, ptree.Function)
                        )
                    case StatementSpan():
                        stmt_spans.append(elem)
    
                # add collapsed functions
                for f_scope in func_scopes:
                    ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-</s>
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
    class CtxCodeChangeProblemGenerator(ProjectChangeProcessor[CtxCodeChangeProblem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Iterable[CtxCodeChangeProblem]:
    # offset: 1
    <s> ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-1].get_code().strip("\n")
                    if len(stmts) > 1:
                        ellipsis = "    " * (len(ancestors) - 1) + "# ...\n"
                        body_code = ellipsis + body_code
                    h_end = f_scope.header_line_range[1]
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        line_range(h_end, h_end + len(body_code)),
                    )
                    cspans.append(cspan)
    
                # add statement spans
                for stmt_span in stmt_spans:
                    ancestors = stmt_span.scope.ancestors()
                    body_code = stmt_span.code
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        stmt_span.line_range,
                    )
                    cspans.append(cspan)
    
                cspan_cache[used] = cspans
                return cspans
    
            def get_relevant_unchanged(
                this_change: ChangedSpan, other_changes: Sequence[ChangedSpan]
            ):
                if isinstance(this_change.change, Added):
                    # nothing to analyze
                    return []
                path = this_change.path
                line_usages = mod2usages[path.module]
                # parent defs are also considered as used
    </s> | 
| 
	coeditor.dataset/_process_commits | 
	Modified | 
	temp-1 | 
	f9e7d3bbf0646c97dce2c651fb83fba9c8cfcca8 | 
	Switch data preparation to new format. | 
	 <0>:<add>         jedi.settings.cache_directory = old_cache
 | 
	      # module: coeditor.dataset
      def _process_commits(
          root: Path,
    +     workdir: Path,
          commits: Sequence[CommitInfo],
    -     training: bool,
    +     encoder: C3EditEncoder,
    -     encoder: EditEncoder[T1],
    -     drop_comments: bool,
    - ) -> list[T1]:
    + ) -> Sequence[TkC3Problem]:
    +     # use process-specific parso cache
    +     old_cache = jedi.settings.cache_directory
    +     jedi.settings.cache_directory = workdir / "jedi_cache"
          try:
    +         # cannot return here since subprocess will be killed after returning
    +         return edits_from_commit_history(
    +             root,
    +             commits,
    +             tempdir=workdir / "code",
    +             change_processor=encoder.change_processor,
    +             edit_encoder=encoder.edit_tokenizer.tokenize_problem,
    +             silent=True,
    -         edits = list(
    -             edits_from_commit_history(root, commits, drop_comments=drop_comments)
              )
          except UnicodeDecodeError as e:
              # this might happen in rare cases
              warnings.warn(f"Unable to process project: {root}\nError: {e}")
              return []
    -     tk_edits = list()
    -     if isinstance(encoder, AnalysisBasedEditEncoder) or isinstance(
    -         encoder, QueryRefEditEncoder
    -     ):
    -         tk_edits.extend(encoder.encode_pedits(edits, training))
    -     else:
    -         for pe in edits:
    -             tk_edits.extend(encoder.encode_pedit(pe, training))
    -     return tk_edits
    +     finally:
 <0>  
       | 
	===========unchanged ref 0===========
    at: _warnings
        warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
        warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
    
    at: coeditor._utils
        DefaultWorkers: int = multiprocessing.cpu_count() // 2
        global DefaultWorkers
    
    at: coeditor.dataset
        C3EditEncoder(change_processor: ProjectChangeProcessor[C3Problem]=field(
                default_factory=C3ProblemGenerator
            ), edit_tokenizer: C3ProblemTokenizer=field(default_factory=C3ProblemTokenizer))
    
    at: coeditor.dataset.C3EditEncoder
        change_processor: ProjectChangeProcessor[C3Problem] = field(
                default_factory=C3ProblemGenerator
            )
    
        edit_tokenizer: C3ProblemTokenizer = field(default_factory=C3ProblemTokenizer)
    
    at: coeditor.dataset._process_commits
        old_cache = jedi.settings.cache_directory
    
    at: jedi.settings
        cache_directory = os.path.expanduser(_cache_directory)
    
    at: pathlib
        Path()
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.dataset
  + @dataclass
  + class C3EditEncoder:
  +     change_processor: ProjectChangeProcessor[C3Problem] = field(
  +         default_factory=C3ProblemGenerator
  +     )
  +     edit_tokenizer: C3ProblemTokenizer = field(default_factory=C3ProblemTokenizer)
  + 
===========changed ref 1===========
    # module: coeditor.encoding
    @dataclass
    class TkDelta:
        def to_change_tks(self, input: TokenSeq) -> TokenSeq:
            lines = split_list(input, Newline_id)
  +         if len(lines) > len(self.deltas):
  -         assert len(lines) <= len(self.deltas)
  +             print_err(f"{self.deltas}")
  +             print_err(f"{input}")
  +             raise ValueError(
  +                 f"Delta is longer than input: {len(lines)=} > {len(self.deltas)=}"
  +             )
            new_lines = list[TokenSeq]()
            for line, delta in zip(lines, self.deltas):
                deleted = False
                if delta:
                    for action in delta:
                        if action[0] == Add_id:
                            new_lines.append(action)
                        elif action[0] == Del_id:
                            deleted = True
                if deleted:
                    new_lines.append([Del_id] + line)
                else:
                    new_lines.append(line)
            if len(self.deltas) == len(lines) + 1:
                delta = self.deltas[-1]
                for action in delta:
                    if action[0] == Add_id:
                        new_lines.append(action)
            return join_list(new_lines, Newline_id)
     | 
| 
	coeditor.code_change/NoProcessing.process_change | 
	Modified | 
	temp-1 | 
	f9e7d3bbf0646c97dce2c651fb83fba9c8cfcca8 | 
	Switch data preparation to new format. | 
	 <0>:<add>         return [pchange]
 | 
	      # module: coeditor.code_change
      class NoProcessing(ProjectChangeProcessor[JProjectChange]):
          def process_change(
              self,
              pchange: JProjectChange,
              pre_analysis,
              post_analysis,
    +     ) -> Sequence[JProjectChange]:
    -     ) -> Iterable[JProjectChange]:
    -         yield pchange
 <0>  
       | 
	===========unchanged ref 0===========
    at: coeditor.code_change
        JProjectChange(changed: Mapping[ModuleName, JModuleChange], all_modules: Modified[Collection[JModule]], commit_info: "CommitInfo | None")
    
        ProjectChangeProcessor()
    
    at: coeditor.code_change.ProjectChangeProcessor
        VERSION = "1.0"
    
        process_change(pchange: "JProjectChange", pre_analysis: Any, post_analysis: Any) -> Sequence[TProb]
        process_change(self, pchange: "JProjectChange", pre_analysis: Any, post_analysis: Any) -> Sequence[TProb]
    
    
===========changed ref 0===========
    # module: coeditor.dataset
  + @dataclass
  + class C3EditEncoder:
  +     change_processor: ProjectChangeProcessor[C3Problem] = field(
  +         default_factory=C3ProblemGenerator
  +     )
  +     edit_tokenizer: C3ProblemTokenizer = field(default_factory=C3ProblemTokenizer)
  + 
===========changed ref 1===========
    # module: coeditor.dataset
    def datasets_from_repos(
        repos_root: Path,
  +     encoder: C3EditEncoder,
  -     encoder: EditEncoder[TEdit],
  -     drop_comments: bool,
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
  + ) -> dict[str, TokenizedEditDataset[TkC3Problem]]:
  - ) -> dict[str, TokenizedEditDataset[TEdit]]:
        splits = ["test", "valid", "train"]
        projects = dict[str, list[Path]]()
        split_is_training = dict[str, list[bool]]()
        for split in splits:
            if not (repos_root / split).exists():
                warnings.warn(f"Split {split} not found at {repos_root / split}.")
                continue
            ps = [p for p in (repos_root / split).iterdir() if p.is_dir]
            projects[split] = ps
            training = split == "train"
            split_is_training[split] = [training] * len(ps)
            if not ps:
                warnings.warn(f"No projects found in {split} split")
    
        dataset = dataset_from_projects(
            join_list(projects.values()),
            encoder=encoder,
  -         drop_comments=drop_comments,
            repo_training=join_list(split_is_training.values()),
            max_history_per_repo=max_history_per_repo,
            workers=workers,
        )
        return {k: dataset.subset(v) for k, v in projects.items()}
    
===========changed ref 2===========
    # module: coeditor.dataset
  + def make_or_load_datasets(
  +     dataset_name: str,
  +     encoder: C3EditEncoder,
  +     recreate_data: bool = False,
  +     workers: int = DefaultWorkers,
  + ) -> dict[str, TokenizedEditDataset[TkC3Problem]]:
  +     config_str = (
  +         repr_modified_args(encoder.change_processor)
  +         + "-"
  +         + repr_modified_args(encoder.edit_tokenizer)
  +     )
  +     save_dir = get_dataset_dir(dataset_name) / config_str
  + 
  +     if recreate_data or not save_dir.exists():
  +         if dataset_name == "SPOT":
  +             datasets = {
  +                 "test": dataset_from_projects(
  +                     [proj_root()], encoder, [False], workers=workers
  +                 )
  +             }
  +         else:
  +             datasets = datasets_from_repos(
  +                 get_dataset_dir(dataset_name) / "repos",
  +                 encoder,
  +                 workers=workers,
  +             )
  +         with timed_action("Saving datasets to disk"):
  +             save_datasets(datasets, save_dir)
  +         print("Tokenized dataset saved to:", save_dir)
  +         print("Dataset stats:")
  +         for group, dataset in datasets.items():
  +             print("=" * 20, group, "=" * 20)
  +             pretty_print_dict(dataset.overall_stats())
  +     else:
  +         with timed_action("Loading datasets from disk"):
  +             datasets = load_datasets(save_dir)
  + 
  +     return datasets
  + 
===========changed ref 3===========
    # module: coeditor.encoding
    @dataclass
    class TkDelta:
        def to_change_tks(self, input: TokenSeq) -> TokenSeq:
            lines = split_list(input, Newline_id)
  +         if len(lines) > len(self.deltas):
  -         assert len(lines) <= len(self.deltas)
  +             print_err(f"{self.deltas}")
  +             print_err(f"{input}")
  +             raise ValueError(
  +                 f"Delta is longer than input: {len(lines)=} > {len(self.deltas)=}"
  +             )
            new_lines = list[TokenSeq]()
            for line, delta in zip(lines, self.deltas):
                deleted = False
                if delta:
                    for action in delta:
                        if action[0] == Add_id:
                            new_lines.append(action)
                        elif action[0] == Del_id:
                            deleted = True
                if deleted:
                    new_lines.append([Del_id] + line)
                else:
                    new_lines.append(line)
            if len(self.deltas) == len(lines) + 1:
                delta = self.deltas[-1]
                for action in delta:
                    if action[0] == Add_id:
                        new_lines.append(action)
            return join_list(new_lines, Newline_id)
    
===========changed ref 4===========
    # module: coeditor.dataset
    def _process_commits(
        root: Path,
  +     workdir: Path,
        commits: Sequence[CommitInfo],
  -     training: bool,
  +     encoder: C3EditEncoder,
  -     encoder: EditEncoder[T1],
  -     drop_comments: bool,
  - ) -> list[T1]:
  + ) -> Sequence[TkC3Problem]:
  +     # use process-specific parso cache
  +     old_cache = jedi.settings.cache_directory
  +     jedi.settings.cache_directory = workdir / "jedi_cache"
        try:
  +         # cannot return here since subprocess will be killed after returning
  +         return edits_from_commit_history(
  +             root,
  +             commits,
  +             tempdir=workdir / "code",
  +             change_processor=encoder.change_processor,
  +             edit_encoder=encoder.edit_tokenizer.tokenize_problem,
  +             silent=True,
  -         edits = list(
  -             edits_from_commit_history(root, commits, drop_comments=drop_comments)
            )
        except UnicodeDecodeError as e:
            # this might happen in rare cases
            warnings.warn(f"Unable to process project: {root}\nError: {e}")
            return []
  -     tk_edits = list()
  -     if isinstance(encoder, AnalysisBasedEditEncoder) or isinstance(
  -         encoder, QueryRefEditEncoder
  -     ):
  -         tk_edits.extend(encoder.encode_pedits(edits, training))
  -     else:
  -         for pe in edits:
  -             tk_edits.extend(encoder.encode_pedit(pe, training))
  -     return tk_edits
  +     finally:
  +         jedi.settings.cache_directory = old_cache
     | 
| 
	coeditor.code_change/edits_from_commit_history | 
	Modified | 
	temp-1 | 
	f9e7d3bbf0646c97dce2c651fb83fba9c8cfcca8 | 
	Switch data preparation to new format. | 
	 <0>:<add>         shutil.rmtree(tempdir)
 | 
	      # module: coeditor.code_change
      def edits_from_commit_history(
          project_dir: Path,
          history: Sequence[CommitInfo],
          tempdir: Path,
          change_processor: ProjectChangeProcessor[TProb] = NoProcessing(),
          edit_encoder: Callable[[TProb], Iterable[TEnc]] = lambda x: [x],
          ignore_dirs=DefaultIgnoreDirs,
          silent: bool = False,
      ) -> Sequence[TEnc]:
          """Incrementally compute the edits to a project from the git history.
          Note that this will change the file states in the project directory, so
          you should make a copy of the project before calling this function.
          """
          tempdir = tempdir.resolve()
          if tempdir.exists():
              raise FileExistsError(f"Workdir '{tempdir}' already exists.")
    +     use_fast_parser = jedi.settings.fast_parser
          tempdir.mkdir(parents=True, exist_ok=False)
    -     use_fast_parser = jedi.settings.fast_parser
          try:
              run_command(
                  ["cp", "-r", str(project_dir / ".git"), str(tempdir)],
                  cwd=project_dir.parent,
              )
      
              return _edits_from_commit_history(
                  tempdir, history, change_processor, edit_encoder, ignore_dirs, silent
              )
          finally:
    -         run_command(["rm", "-rf", str(tempdir)], cwd=tempdir.parent)
 <0>          jedi.settings.fast_parser = use_fast_parser
      
       | 
	===========unchanged ref 0===========
    at: coeditor.code_change
        DefaultIgnoreDirs = {".venv", ".mypy_cache", ".git", "venv", "build"}
    
        JProjectChange(changed: Mapping[ModuleName, JModuleChange], all_modules: Modified[Collection[JModule]], commit_info: "CommitInfo | None")
    
        TProb = TypeVar("TProb", covariant=True)
    
        TEnc = TypeVar("TEnc", covariant=True)
    
        ProjectChangeProcessor()
    
        NoProcessing()
    
        _edits_from_commit_history(project: Path, history: Sequence[CommitInfo], change_processor: ProjectChangeProcessor[TProb], edit_encoder: Callable[[TProb], Iterable[TEnc]], ignore_dirs: set[str], silent: bool) -> Sequence[TEnc]
    
    at: coeditor.common
        run_command(args: Sequence[str], cwd: str | Path) -> str
    
    at: jedi.settings
        fast_parser = True
    
    at: pathlib
        Path()
    
    at: pathlib.Path
        __slots__ = ()
    
        resolve(strict: bool=...) -> _P
    
        mkdir(mode: int=..., parents: bool=..., exist_ok: bool=...) -> None
    
        exists() -> bool
    
    at: pathlib.PurePath
        __slots__ = (
                '_drv', '_root', '_parts',
                '_str', '_hash', '_pparts', '_cached_cparts',
            )
    
        drive = property(attrgetter('_drv'),
                             doc="""The drive prefix (letter or UNC path), if any.""")
    
        root = property(attrgetter('_root'),
                            doc="""The root of the path, if any.""")
    
    at: typing
        Iterable = _alias(collections.abc.Iterable, 1)
    
        Callable = _CallableType(collections.abc.Callable, 2)
    
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.code_change
    class NoProcessing(ProjectChangeProcessor[JProjectChange]):
        def process_change(
            self,
            pchange: JProjectChange,
            pre_analysis,
            post_analysis,
  +     ) -> Sequence[JProjectChange]:
  -     ) -> Iterable[JProjectChange]:
  +         return [pchange]
  -         yield pchange
    
===========changed ref 1===========
    # module: coeditor.dataset
  + @dataclass
  + class C3EditEncoder:
  +     change_processor: ProjectChangeProcessor[C3Problem] = field(
  +         default_factory=C3ProblemGenerator
  +     )
  +     edit_tokenizer: C3ProblemTokenizer = field(default_factory=C3ProblemTokenizer)
  + 
===========changed ref 2===========
    # module: coeditor.dataset
    def datasets_from_repos(
        repos_root: Path,
  +     encoder: C3EditEncoder,
  -     encoder: EditEncoder[TEdit],
  -     drop_comments: bool,
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
  + ) -> dict[str, TokenizedEditDataset[TkC3Problem]]:
  - ) -> dict[str, TokenizedEditDataset[TEdit]]:
        splits = ["test", "valid", "train"]
        projects = dict[str, list[Path]]()
        split_is_training = dict[str, list[bool]]()
        for split in splits:
            if not (repos_root / split).exists():
                warnings.warn(f"Split {split} not found at {repos_root / split}.")
                continue
            ps = [p for p in (repos_root / split).iterdir() if p.is_dir]
            projects[split] = ps
            training = split == "train"
            split_is_training[split] = [training] * len(ps)
            if not ps:
                warnings.warn(f"No projects found in {split} split")
    
        dataset = dataset_from_projects(
            join_list(projects.values()),
            encoder=encoder,
  -         drop_comments=drop_comments,
            repo_training=join_list(split_is_training.values()),
            max_history_per_repo=max_history_per_repo,
            workers=workers,
        )
        return {k: dataset.subset(v) for k, v in projects.items()}
    
===========changed ref 3===========
    # module: coeditor.dataset
  + def make_or_load_datasets(
  +     dataset_name: str,
  +     encoder: C3EditEncoder,
  +     recreate_data: bool = False,
  +     workers: int = DefaultWorkers,
  + ) -> dict[str, TokenizedEditDataset[TkC3Problem]]:
  +     config_str = (
  +         repr_modified_args(encoder.change_processor)
  +         + "-"
  +         + repr_modified_args(encoder.edit_tokenizer)
  +     )
  +     save_dir = get_dataset_dir(dataset_name) / config_str
  + 
  +     if recreate_data or not save_dir.exists():
  +         if dataset_name == "SPOT":
  +             datasets = {
  +                 "test": dataset_from_projects(
  +                     [proj_root()], encoder, [False], workers=workers
  +                 )
  +             }
  +         else:
  +             datasets = datasets_from_repos(
  +                 get_dataset_dir(dataset_name) / "repos",
  +                 encoder,
  +                 workers=workers,
  +             )
  +         with timed_action("Saving datasets to disk"):
  +             save_datasets(datasets, save_dir)
  +         print("Tokenized dataset saved to:", save_dir)
  +         print("Dataset stats:")
  +         for group, dataset in datasets.items():
  +             print("=" * 20, group, "=" * 20)
  +             pretty_print_dict(dataset.overall_stats())
  +     else:
  +         with timed_action("Loading datasets from disk"):
  +             datasets = load_datasets(save_dir)
  + 
  +     return datasets
  + 
===========changed ref 4===========
    # module: coeditor.encoding
    @dataclass
    class TkDelta:
        def to_change_tks(self, input: TokenSeq) -> TokenSeq:
            lines = split_list(input, Newline_id)
  +         if len(lines) > len(self.deltas):
  -         assert len(lines) <= len(self.deltas)
  +             print_err(f"{self.deltas}")
  +             print_err(f"{input}")
  +             raise ValueError(
  +                 f"Delta is longer than input: {len(lines)=} > {len(self.deltas)=}"
  +             )
            new_lines = list[TokenSeq]()
            for line, delta in zip(lines, self.deltas):
                deleted = False
                if delta:
                    for action in delta:
                        if action[0] == Add_id:
                            new_lines.append(action)
                        elif action[0] == Del_id:
                            deleted = True
                if deleted:
                    new_lines.append([Del_id] + line)
                else:
                    new_lines.append(line)
            if len(self.deltas) == len(lines) + 1:
                delta = self.deltas[-1]
                for action in delta:
                    if action[0] == Add_id:
                        new_lines.append(action)
            return join_list(new_lines, Newline_id)
     | 
| 
	coeditor.code_change/_edits_from_commit_history | 
	Modified | 
	temp-1 | 
	f9e7d3bbf0646c97dce2c651fb83fba9c8cfcca8 | 
	Switch data preparation to new format. | 
	 <0>:<add>                 pchange, pre_analysis, post_analysis
 | 
	      # module: coeditor.code_change
      def _edits_from_commit_history(
          project: Path,
          history: Sequence[CommitInfo],
          change_processor: ProjectChangeProcessor[TProb],
          edit_encoder: Callable[[TProb], Iterable[TEnc]],
          ignore_dirs: set[str],
          silent: bool,
      ) -> Sequence[TEnc]:
      <s>)
                          changed[mod.mname] = JModuleChange.from_modules(Deleted(mod))
                      case Modified(path1, path2):
                          assert path1 == path2
                          mod_old = new_path2module[rel_path]
                          new_path2module[rel_path] = mod_new = parse_module(path)
                          changed[mod_new.mname] = JModuleChange.from_modules(
                              Modified(mod_old, mod_new)
                          )
      
              with _tlogger.timed("post_edit_analysis"):
                  post_analysis = change_processor.post_edit_analysis(
                      pstate,
                      new_path2module,
                      changed,
                  )
      
              # now go backwards in time to perform pre-edit analysis
              checkout_commit(commit_now.hash)
              with _tlogger.timed("pre_edit_analysis"):
                  pre_analysis = change_processor.pre_edit_analysis(
                      pstate,
                      path2module,
                      changed,
                  )
              checkout_commit(commit_next.hash)
      
              modules_mod = Modified(path2module.values(), new_path2module.values())
              pchange = JProjectChange(changed, modules_mod, commit_next)
      
              with _tlogger.timed("process_change"):
    +             processed = change_processor.process_change(
    -             processed = list(
    -                 change_processor.process_change(pchange, pre_analysis, post_analysis)
 <0>              )
              with _tlogger.timed("change_encoder"):
                  for change in processed:
                      results.extend(edit_encoder(change))
              commit_now = commit_next
              path2module = new_path2module
          return results
      
       | 
	===========above chunk 0===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_processor: ProjectChangeProcessor[TProb],
        edit_encoder: Callable[[TProb], Iterable[TEnc]],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Sequence[TEnc]:
    # offset: -1
    <s>"):
                        path_changes.append(Added(path))
                    elif tag.endswith("D"):
                        path_changes.append(Deleted(path))
                    if tag.endswith("M"):
                        path_changes.append(Modified(path, path))
                elif len(segs) == 3:
                    tag, path1, path2 = segs
                    assert tag.startswith("R")
                    if not is_src(path1) or not is_src(path2):
                        continue
                    path_changes.append(Deleted(path1))
                    path_changes.append(Added(path2))
    
            # make deep copys of changed modules
            to_copy = {
                to_rel_path(Path(path_change.before))
                for path_change in path_changes
                if not isinstance(path_change, Added)
            }
            _deep_copy_subset_(path2module, to_copy)
    
            checkout_commit(commit_next.hash)
    
            new_path2module = path2module.copy()
            changed = dict[ModuleName, JModuleChange]()
            for path_change in path_changes:
                path = project / path_change.earlier()
                rel_path = to_rel_path(path.relative_to(project))
                match path_change:
                    case Added():
                        mod = parse_module(path)
                        new_path2module[rel_path] = mod
                        changed[mod.mname] = JModuleChange.from_modules(Added(mod))
                    case Deleted():
                        mod = new_path2module.pop(rel_path)
                        changed[mod.mname] = JModuleChange.from_modules(Deleted(mod))
                    case Modified(path1</s>
===========above chunk 1===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_processor: ProjectChangeProcessor[TProb],
        edit_encoder: Callable[[TProb], Iterable[TEnc]],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Sequence[TEnc]:
    # offset: -2
    <s>project, added_sys_path=[project / "src"])
        pstate = ProjectState(proj, scripts)
    
        # now we can get the first project state, although this not needed for now
        # but we'll use it later for pre-edit analysis
        path2module = {
            f: parse_module(project / f)
            for f in tqdm(
                get_python_files(project), desc="building initial project", disable=silent
            )
        }
    
        def is_src(path_s: str) -> bool:
            path = Path(path_s)
            return path.suffix == ".py" and all(p not in ignore_dirs for p in path.parts)
    
        future_commits = list(reversed(history[:-1]))
        results = list[TEnc]()
        for commit_next in tqdm(
            future_commits, smoothing=0, desc="processing commits", disable=silent
        ):
            # get changed files
            changed_files = run_command(
                ["git", "diff", commit_now.hash, commit_next.hash, "--name-status"],
                cwd=project,
            ).splitlines()
    
            path_changes = list[Change[str]]()
    
            for line in changed_files:
                segs = line.split("\t")
                if len(segs) == 2:
                    tag, path = segs
                    if not is_src(path):
                        continue
                    if tag.endswith("A"):
                        path_changes.append(Added(path))
                    elif tag.endswith("D"):
                        path_changes</s>
===========above chunk 2===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_processor: ProjectChangeProcessor[TProb],
        edit_encoder: Callable[[TProb], Iterable[TEnc]],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Sequence[TEnc]:
    # offset: -3
    <s>.relative_to(proj._path))] = s
                mcontext = s._get_module_context()
                assert isinstance(mcontext, ModuleContext)
                mname = cast(str, mcontext.py__name__())
                if mname.startswith("src."):
                    e = ValueError(f"Bad module name: {mname}")
                    files = list(project.iterdir())
                    print_err(f"project: {proj}", file=sys.stderr)
                    print_err(f"files in root: {files}", file=sys.stderr)
                    raise e
                m = s._module_node
                assert isinstance(m, ptree.Module)
                # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
                # m = parso.parse(path.read_text())
                return JModule(mname, m)
    
        def checkout_commit(commit_hash: str):
            with _tlogger.timed("checkout"):
                subprocess.run(
                    ["git", "checkout", "-f", commit_hash],
                    cwd=project,
                    capture_output=True,
                    check=True,
                )
    
        # to ensure sure we are not accidentally overriding real code changes
        if list(project.iterdir()) != [project / ".git"]:
            raise FileExistsError(f"Directory '{project}' should contain only '.git'.")
    
        # checkout to the first commit
        commit_now = history[-1]
        checkout_commit(commit_now.hash)
        proj = jedi.Project(</s>
===========above chunk 3===========
    # module: coeditor.code_change
    def _edits_from_commit_history(
        project: Path,
        history: Sequence[CommitInfo],
        change_processor: ProjectChangeProcessor[TProb],
        edit_encoder: Callable[[TProb], Iterable[TEnc]],
        ignore_dirs: set[str],
        silent: bool,
    ) -> Sequence[TEnc]:
    # offset: -4
        scripts = dict[RelPath, jedi.Script]()
    
        def parse_module(path: Path):
            with _tlogger.timed("parse_module"):
  +             assert path.is_absolute(), f"Path is not absolute: {path=}"
                s = jedi.Script(path=path, project=proj)
                scripts[to_rel_path</s> | 
| 
	scripts.coeditor.train_retrieval_model/train_model | 
	Modified | 
	temp-1 | 
	f9e7d3bbf0646c97dce2c651fb83fba9c8cfcca8 | 
	Switch data preparation to new format. | 
	 <0>:<add>     datasets = make_or_load_datasets(dataset_name, encoder, recreate_data=recreate_data)
 | 
	      <s>model(
          dataset_name="medium",
          model_variant="-sig-analysis-post_usees",
    +     encoder: C3EditEncoder = C3EditEncoder(),
    -     encoder: QueryRefEditEncoder = QueryRefEditEncoder(),
    -     drop_comments: bool = True,
          batch_args=BatchArgs.train_default(),
          test_batch_args=BatchArgs.eval_default(),
          train_args=TrainingArgs(),
          recreate_data: bool = False,
          eval_only: bool = False,
      ):
          # model_variant = "-file"
          model_name = f"coeditor-{dataset_name}"
          model_name += model_variant
      
          dec_args = DecodingArgs()
          if train_args.quicktest:
              model_name = "quicktest-" + model_name
      
          if not eval_only:
              check_save_dir(model_name)
      
    -     datasets = make_or_load_datasets(
    -         dataset_name, encoder, drop_comments=drop_comments, recreate_data=recreate_data
    -     )
 <0>  
          config_dict = {
              k: get_modified_args(v)
              for k, v in {
                  "data_args": batch_args,
                  "train_args": train_args,
                  "dec_args": dec_args,
              }.items()
          }
      
          project = "Coeditor" if not train_args.quicktest else "Coeditor-quicktest"
          wandb.init(dir="..", project=project, name=model_name, config=config_dict)
      
          if train_args.quicktest:
              print("Using fewer data for quick test.")
              n_quick_exs = 20
              for name, dataset in datasets.items():
                  datasets[name] = TokenizedEditDataset.from_edits(
                      dataset.all_edits()[:n_quick_exs]
                  )
      
          if not eval_only:
              model = RetrievalEditorModel.from_code_t5(
                  "base", reuse_embed=True, reinit_weights=train_args.re</s> | 
	===========below chunk 0===========
    <s>_name="medium",
        model_variant="-sig-analysis-post_usees",
  +     encoder: C3EditEncoder = C3EditEncoder(),
  -     encoder: QueryRefEditEncoder = QueryRefEditEncoder(),
  -     drop_comments: bool = True,
        batch_args=BatchArgs.train_default(),
        test_batch_args=BatchArgs.eval_default(),
        train_args=TrainingArgs(),
        recreate_data: bool = False,
        eval_only: bool = False,
    ):
    # offset: 1
    <s>Model.from_code_t5(
                "base", reuse_embed=True, reinit_weights=train_args.reinit_weights
            )
        else:
            model = RetrievalEditorModel.load(get_model_dir() / model_name)
    
        if os.getenv("CUDA_VISIBLE_DEVICES") is None:
            warnings.warn(
                "CUDA_VISIBLE_DEVICES not set, using 0. Note that "
                "the Huggingface Trainer will use all visible GPUs for training."
            )
            os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    
        if not eval_only:
            with timed_action("Warm-up Training"):
                warmup_bargs = copy.deepcopy(batch_args)
                warmup_bargs.max_total_ref_tks //= 4
                warmup_bargs.min_queires *= 4
                warmup_bargs.max_queries *= 2
    
                warmup_targs = copy.deepcopy(train_args)
                warmup_targs.learning_rate *= 4
                warmup_targs.max_train_epochs = 1
                all_edits = datasets["train"].all_edits()
                warmup_edits = random_subset(all_edits, len(all_edits) // 4)
                model.train_on_data(
                    model_name,
                    TokenizedEditDataset.from_edits(warmup_edits),
                    datasets["valid"],
                    warmup_targs,
                    batch_args=warmup_b</s>
===========below chunk 1===========
    <s>_name="medium",
        model_variant="-sig-analysis-post_usees",
  +     encoder: C3EditEncoder = C3EditEncoder(),
  -     encoder: QueryRefEditEncoder = QueryRefEditEncoder(),
  -     drop_comments: bool = True,
        batch_args=BatchArgs.train_default(),
        test_batch_args=BatchArgs.eval_default(),
        train_args=TrainingArgs(),
        recreate_data: bool = False,
        eval_only: bool = False,
    ):
    # offset: 2
    <s>up_edits),
                    datasets["valid"],
                    warmup_targs,
                    batch_args=warmup_bargs,
                    eval_batch_args=test_batch_args,
                )
            with timed_action("Fine-tune Training"):
                model.train_on_data(
                    model_name,
                    datasets["train"],
                    datasets["valid"],
                    train_args,
                    batch_args=batch_args,
                    eval_batch_args=test_batch_args,
                )
    
        model.to("cuda")
        with timed_action("Loss Evaluation"):
            eval_result = model.eval_loss_on_data(datasets["test"], test_batch_args)
            eval_dict = {f"test/{k}": v.average() for k, v in eval_result.items()}
            wandb.log(eval_dict)
    
        max_saved_samples = 300
    
        with timed_action("Accuracy Evaluation"):
            dec_result = model.predict_on_data(datasets["test"], test_batch_args, dec_args)
            pickle_dump(get_model_dir() / model_name / "dec_result.pkl", dec_result)
            exact_acc, exact_correct_map = dec_result.exact_match_accuracy()
            wandb.log({"test/exact-acc": exact_acc.average()})
    
            out_dir = get_</s>
===========below chunk 2===========
    <s>_name="medium",
        model_variant="-sig-analysis-post_usees",
  +     encoder: C3EditEncoder = C3EditEncoder(),
  -     encoder: QueryRefEditEncoder = QueryRefEditEncoder(),
  -     drop_comments: bool = True,
        batch_args=BatchArgs.train_default(),
        test_batch_args=BatchArgs.eval_default(),
        train_args=TrainingArgs(),
        recreate_data: bool = False,
        eval_only: bool = False,
    ):
    # offset: 3
    <s>dir() / model_name / "exact_match_samples"
            dec_result.save_examples_to_dir(
                out_dir, random_subset(exact_correct_map, max_saved_samples)
            )
            print("Exact-match samples saved to:", out_dir)
    
        return model
    
    
===========unchanged ref 0===========
    at: _warnings
        warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
        warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
    
    at: coeditor._utils
        timed_action(name: str, silent: bool=False)
    
        pickle_dump(file: Path, obj: Any)
    
        get_modified_args(instance, flatten: bool=False) -> dict[str, Any] | None
    
    at: coeditor.common
        get_model_dir(trained=True) -> Path
    
        random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2]
        random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1]
    
    at: coeditor.dataset
        TokenizedEditDataset(_edits: list[TEdit])
    
    at: coeditor.dataset.TokenizedEditDataset
        _edits: list[TEdit]
    
        from_edits(edits: Iterable[TEdit]) -> "TokenizedEditDataset[TEdit]"
    
    at: coeditor.model
        DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1)
    
    at: copy
        deepcopy(x: _T, memo: Optional[Dict[int, Any]]=..., _nil: Any=...) -> _T
    
    at: os
        environ = _createenviron()
    
        getenv(key: str, default: _T) -> Union[str, _T]
        getenv(key: str) -> Optional[str]
    
    at: train_model
        check_save_dir(model_name: str) -> None
    
    at: wandb
        init = wandb_sdk.init
    
    
===========unchanged ref 1===========
        log = _preinit.PreInitCallable("wandb.log", wandb_sdk.wandb_run.Run.log)
    
    
===========changed ref 0===========
    # module: coeditor.code_change
    class NoProcessing(ProjectChangeProcessor[JProjectChange]):
        def process_change(
            self,
            pchange: JProjectChange,
            pre_analysis,
            post_analysis,
  +     ) -> Sequence[JProjectChange]:
  -     ) -> Iterable[JProjectChange]:
  +         return [pchange]
  -         yield pchange
    
===========changed ref 1===========
    # module: coeditor.dataset
  + @dataclass
  + class C3EditEncoder:
  +     change_processor: ProjectChangeProcessor[C3Problem] = field(
  +         default_factory=C3ProblemGenerator
  +     )
  +     edit_tokenizer: C3ProblemTokenizer = field(default_factory=C3ProblemTokenizer)
  +  | 
| 
	coeditor.dataset/dataset_from_projects | 
	Modified | 
	temp-1 | 
	c2672194b1a2abfcf00b08e9e73e6851407c3d95 | 
	Fix jedi cache error. Print processor errors. | 
	 <0>:<add>     pretty_print_dict(error_counts)
 | 
	      # module: coeditor.dataset
      def dataset_from_projects(
          project_roots: Sequence[Path],
          encoder: C3EditEncoder,
          repo_training: Sequence[bool],
          max_history_per_repo: int = 1000,
          workers: int = DefaultWorkers,
      ) -> "TokenizedEditDataset[TkC3Problem]":
      <s>, repo_training):
              history_chunk_size = max(50, math.ceil(len(h) / 4))
              for i in range(0, len(h), history_chunk_size):
                  roots.append(root)
                  chunk_training.append(train)
                  # note that we need 1 extra overlapping commit to get all diffs
                  chunked_histories.append(h[i : i + history_chunk_size + 1])
          workdirs = [workdir / f"chunk-{i}" for i in range(len(roots))]
          try:
    +         presults = pmap(
    -         tk_edits = pmap(
                  _process_commits,
                  roots,
                  workdirs,
                  chunked_histories,
                  key_args={"encoder": encoder},
                  desc="Create tokenized edits",
                  max_workers=workers,
                  tqdm_args={"unit": "chunk"},
              )
          finally:
              if workdir.exists():
                  shutil.rmtree(workdir)
                  print("Workdir removed:", workdir)
          project2edits = dict[Path, list[TkC3Problem]]()
    + 
    +     error_counts = dict[str, int]()
    +     for root, pr in zip(roots, presults):
    -     for root, edits in zip(roots, tk_edits):
    +         project2edits.setdefault(root, []).extend(pr.edits)
    -         project2edits.setdefault(root, []).extend(edits)
    +         for k, v in pr.processor_errors.items():
    +             error_counts[k] = error_counts.get(k, 0) + v
    + 
    +     print("Processor Errors:")
 <0>  
          return TokenizedEditDataset(project2edits)
      
       | 
	===========above chunk 0===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
        encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
    ) -> "TokenizedEditDataset[TkC3Problem]":
    # offset: -1
        """
        Create a TokenizedEditDataset from a list of project roots and a given encoder.
        Args:
            - max_history_per_repo (int, optional): When the repo history is longer than
            this value, only the oldest portion is going to be used. Defaults to 1000.
        """
        workdir = Path(tempfile.gettempdir()) / "dataset_from_projects"
        histories = pmap(
            get_commit_history,
            project_roots,
            max_workers=workers,
            desc="Getting commit histories",
            tqdm_args={"unit": "repo"},
        )
        # keep the oldest portion of the history
        histories = [commits[-max_history_per_repo:] for commits in histories]
        # break long commit sequences into chunks for parallelization
        roots = list[Path]()
        chunk_training = list[bool]()
        chunked_histories = list[list[CommitInfo]]()
        for root, h, train in zip(project_roots, histories, repo_training):
            history_chunk_size = max(50, math.ceil(len(h) / 4))
    </s>
===========unchanged ref 0===========
    at: coeditor._utils
        DefaultWorkers: int = multiprocessing.cpu_count() // 2
        global DefaultWorkers
    
        pmap(f: Callable[..., T1], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], /, *iterables: Iterable[Any], desc: str | None=None, key_args: Mapping[str, Any] | None=None, max_workers: int | None=None, chunksize: int | None=None, tqdm_args: Mapping[str, Any] | None=None) -> list[T1]
    
    at: coeditor.dataset
        C3EditEncoder(change_processor: ProjectChangeProcessor[C3Problem]=field(
                default_factory=C3ProblemGenerator
            ), edit_tokenizer: C3ProblemTokenizer=field(default_factory=C3ProblemTokenizer))
    
        _ProcessingResult(edits: Sequence[TkC3Problem], processor_errors: dict[str, int])
    
        _process_commits(root: Path, workdir: Path, commits: Sequence[CommitInfo], encoder: C3EditEncoder) -> _ProcessingResult
    
    at: coeditor.dataset.C3EditEncoder
        change_processor: ProjectChangeProcessor[C3Problem] = field(
                default_factory=C3ProblemGenerator
            )
    
    at: coeditor.dataset._process_commits
        edits = edits_from_commit_history(
                    root,
                    commits,
                    tempdir=workdir / "code",
                    change_processor=encoder.change_processor,
                    edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                    silent=True,
                )
        edits = []
    
    at: math
        ceil(x: SupportsFloat, /) -> int
    
    at: pathlib
        Path()
    
    at: pathlib.Path
        __slots__ = ()
    
        exists() -> bool
    
    at: shutil
        rmtree(path: Union[bytes, StrPath], ignore_errors: bool=..., onerror: Optional[Callable[[Any, Any, Any], Any]]=...) -> None
    
    at: tempfile
        gettempdir() -> str
    
    
===========unchanged ref 1===========
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.dataset
  + @dataclass
  + class _ProcessingResult:
  +     edits: Sequence[TkC3Problem]
  +     processor_errors: dict[str, int]
  + 
===========changed ref 1===========
    # module: coeditor.dataset
    def _process_commits(
        root: Path,
        workdir: Path,
        commits: Sequence[CommitInfo],
        encoder: C3EditEncoder,
  + ) -> _ProcessingResult:
  - ) -> Sequence[TkC3Problem]:
        # use process-specific parso cache
  -     old_cache = jedi.settings.cache_directory
  -     jedi.settings.cache_directory = workdir / "jedi_cache"
  +     _fix_jedi_cache(workdir)
        try:
            # cannot return here since subprocess will be killed after returning
  +         edits = edits_from_commit_history(
  -         return edits_from_commit_history(
                root,
                commits,
                tempdir=workdir / "code",
                change_processor=encoder.change_processor,
                edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                silent=True,
            )
        except UnicodeDecodeError as e:
            # this might happen in rare cases
            warnings.warn(f"Unable to process project: {root}\nError: {e}")
  +         edits = []
  +     return _ProcessingResult(
  +         edits,
  +         encoder.change_processor.get_errors(),
  +     )
  -         return []
  -     finally:
  -         jedi.settings.cache_directory = old_cache
     | 
| 
	coeditor.ctx_change_encoder/C3ProblemGenerator.__init__ | 
	Modified | 
	temp-1 | 
	c2672194b1a2abfcf00b08e9e73e6851407c3d95 | 
	Fix jedi cache error. Print processor errors. | 
	 <0>:<add>         self.analyzer = analyzer
 | 
	      # module: coeditor.ctx_change_encoder
      class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
    +     def __init__(self, analyzer: "JediUsageAnalyzer | None" = None):
    -     def __init__(self, analysis: "JediUsageAnalyzer | None" = None):
    +         if analyzer is None:
    -         if analysis is None:
    +             analyzer = JediUsageAnalyzer()
    -             analysis = JediUsageAnalyzer()
    -         self.analysis = analysis
 <0>  
       | 
	===========unchanged ref 0===========
    at: coeditor.ctx_change_encoder
        JediUsageAnalyzer()
    
    at: coeditor.ctx_change_encoder.C3ProblemGenerator
        VERSION = "1.0"
    
    
===========changed ref 0===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  +     def clear_errors(self):
  +         return None
  + 
===========changed ref 1===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  +     def get_errors(self) -> dict[str, int]:
  +         return dict()
  + 
===========changed ref 2===========
    # module: coeditor.dataset
  + @dataclass
  + class _ProcessingResult:
  +     edits: Sequence[TkC3Problem]
  +     processor_errors: dict[str, int]
  + 
===========changed ref 3===========
    # module: coeditor.dataset
    def _process_commits(
        root: Path,
        workdir: Path,
        commits: Sequence[CommitInfo],
        encoder: C3EditEncoder,
  + ) -> _ProcessingResult:
  - ) -> Sequence[TkC3Problem]:
        # use process-specific parso cache
  -     old_cache = jedi.settings.cache_directory
  -     jedi.settings.cache_directory = workdir / "jedi_cache"
  +     _fix_jedi_cache(workdir)
        try:
            # cannot return here since subprocess will be killed after returning
  +         edits = edits_from_commit_history(
  -         return edits_from_commit_history(
                root,
                commits,
                tempdir=workdir / "code",
                change_processor=encoder.change_processor,
                edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                silent=True,
            )
        except UnicodeDecodeError as e:
            # this might happen in rare cases
            warnings.warn(f"Unable to process project: {root}\nError: {e}")
  +         edits = []
  +     return _ProcessingResult(
  +         edits,
  +         encoder.change_processor.get_errors(),
  +     )
  -         return []
  -     finally:
  -         jedi.settings.cache_directory = old_cache
    
===========changed ref 4===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
        encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
    ) -> "TokenizedEditDataset[TkC3Problem]":
        """
        Create a TokenizedEditDataset from a list of project roots and a given encoder.
        Args:
            - max_history_per_repo (int, optional): When the repo history is longer than
            this value, only the oldest portion is going to be used. Defaults to 1000.
        """
        workdir = Path(tempfile.gettempdir()) / "dataset_from_projects"
        histories = pmap(
            get_commit_history,
            project_roots,
            max_workers=workers,
            desc="Getting commit histories",
            tqdm_args={"unit": "repo"},
        )
        # keep the oldest portion of the history
        histories = [commits[-max_history_per_repo:] for commits in histories]
        # break long commit sequences into chunks for parallelization
        roots = list[Path]()
        chunk_training = list[bool]()
        chunked_histories = list[list[CommitInfo]]()
        for root, h, train in zip(project_roots, histories, repo_training):
            history_chunk_size = max(50, math.ceil(len(h) / 4))
            for i in range(0, len(h), history_chunk_size):
                roots.append(root)
                chunk_training.append(train)
                # note that we need 1 extra overlapping commit to get all diffs
                chunked_histories.append(h[i : i + history_chunk_size + 1])
        workdirs = [workdir / f"chunk-{i}" for i in range(len(roots))]
        try:
  +         presults = pmap(
  -         tk_edits = pmap(
                _process_commits,
                roots,
                workdirs,
                chunked_histories,
                key_args={"encoder": encoder</s>
===========changed ref 5===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
        encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
    ) -> "TokenizedEditDataset[TkC3Problem]":
    # offset: 1
    <s>process_commits,
                roots,
                workdirs,
                chunked_histories,
                key_args={"encoder": encoder},
                desc="Create tokenized edits",
                max_workers=workers,
                tqdm_args={"unit": "chunk"},
            )
        finally:
            if workdir.exists():
                shutil.rmtree(workdir)
                print("Workdir removed:", workdir)
        project2edits = dict[Path, list[TkC3Problem]]()
  + 
  +     error_counts = dict[str, int]()
  +     for root, pr in zip(roots, presults):
  -     for root, edits in zip(roots, tk_edits):
  +         project2edits.setdefault(root, []).extend(pr.edits)
  -         project2edits.setdefault(root, []).extend(edits)
  +         for k, v in pr.processor_errors.items():
  +             error_counts[k] = error_counts.get(k, 0) + v
  + 
  +     print("Processor Errors:")
  +     pretty_print_dict(error_counts)
    
        return TokenizedEditDataset(project2edits)
     | 
| 
	coeditor.ctx_change_encoder/C3ProblemGenerator.pre_edit_analysis | 
	Modified | 
	temp-1 | 
	c2672194b1a2abfcf00b08e9e73e6851407c3d95 | 
	Fix jedi cache error. Print processor errors. | 
	 <0>:<add>             line_usages = self.analyzer.get_line_usages(
 | 
	      # module: coeditor.ctx_change_encoder
      class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
          def pre_edit_analysis(
              self,
              pstate: ProjectState,
              modules: Mapping[RelPath, JModule],
              changes: Mapping[ModuleName, JModuleChange],
          ) -> Mapping[ModuleName, LineUsageAnalysis]:
              "Return the definition usages of each line."
              project = pstate.project
              result = dict[ModuleName, LineUsageAnalysis]()
      
              src_map = {m.mname: f for f, m in modules.items()}
              for mname, mchange in changes.items():
                  if not isinstance(mchange.module_change, Modified):
                      continue
      
                  lines_to_analyze = set[int]()
                  for span in mchange.changed.values():
                      if span.change is Added:
                          continue
                      lines_to_analyze.update(range(*span.line_range))
                      lines_to_analyze.update(range(*span.header_line_range))
      
                  mod_path = src_map[mname]
                  script = pstate.scripts[mod_path]
    -             line_usages = self.analysis.get_line_usages(
 <0>                  script, project.path, lines_to_analyze, silent=True
                  )
                  result[mname] = line_usages
              return result
      
       | 
	===========unchanged ref 0===========
    at: coeditor.code_change.ChangedSpan
        change: Change[str]
    
        parent_scopes: Sequence[Change[ChangeScope]]
    
        line_range: LineRange
    
    at: coeditor.code_change.JModule
        mname: ModuleName
    
        tree: ptree.Module
    
    at: coeditor.code_change.JModuleChange
        module_change: Change[JModule]
    
        changed: Mapping[ProjectPath, ChangedSpan]
    
    at: coeditor.common
        RelPath = NewType("RelPath", Path)
    
    at: coeditor.ctx_change_encoder
        LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
    
    at: coeditor.ctx_change_encoder.C3ProblemGenerator.__init__
        analyzer = JediUsageAnalyzer()
        self.analyzer = analyzer
    
    at: coeditor.ctx_change_encoder.JediUsageAnalyzer.__post_init__
        self.error_counts = dict[str, int]()
    
    at: spot.static_analysis
        ModuleName = str
    
    at: typing
        Mapping = _alias(collections.abc.Mapping, 2)
    
    at: typing.Mapping
        items() -> AbstractSet[Tuple[_KT, _VT_co]]
    
        values() -> ValuesView[_VT_co]
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def clear_errors(self):
  +         return self.analyzer.error_counts.clear()
  + 
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def get_errors(self) -> dict[str, int]:
  +         return self.analyzer.error_counts
  + 
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def __init__(self, analyzer: "JediUsageAnalyzer | None" = None):
  -     def __init__(self, analysis: "JediUsageAnalyzer | None" = None):
  +         if analyzer is None:
  -         if analysis is None:
  +             analyzer = JediUsageAnalyzer()
  -             analysis = JediUsageAnalyzer()
  +         self.analyzer = analyzer
  -         self.analysis = analysis
    
===========changed ref 3===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  +     def clear_errors(self):
  +         return None
  + 
===========changed ref 4===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  +     def get_errors(self) -> dict[str, int]:
  +         return dict()
  + 
===========changed ref 5===========
    # module: coeditor.dataset
  + @dataclass
  + class _ProcessingResult:
  +     edits: Sequence[TkC3Problem]
  +     processor_errors: dict[str, int]
  + 
===========changed ref 6===========
    # module: coeditor.dataset
    def _process_commits(
        root: Path,
        workdir: Path,
        commits: Sequence[CommitInfo],
        encoder: C3EditEncoder,
  + ) -> _ProcessingResult:
  - ) -> Sequence[TkC3Problem]:
        # use process-specific parso cache
  -     old_cache = jedi.settings.cache_directory
  -     jedi.settings.cache_directory = workdir / "jedi_cache"
  +     _fix_jedi_cache(workdir)
        try:
            # cannot return here since subprocess will be killed after returning
  +         edits = edits_from_commit_history(
  -         return edits_from_commit_history(
                root,
                commits,
                tempdir=workdir / "code",
                change_processor=encoder.change_processor,
                edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                silent=True,
            )
        except UnicodeDecodeError as e:
            # this might happen in rare cases
            warnings.warn(f"Unable to process project: {root}\nError: {e}")
  +         edits = []
  +     return _ProcessingResult(
  +         edits,
  +         encoder.change_processor.get_errors(),
  +     )
  -         return []
  -     finally:
  -         jedi.settings.cache_directory = old_cache
    
===========changed ref 7===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
        encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
    ) -> "TokenizedEditDataset[TkC3Problem]":
        """
        Create a TokenizedEditDataset from a list of project roots and a given encoder.
        Args:
            - max_history_per_repo (int, optional): When the repo history is longer than
            this value, only the oldest portion is going to be used. Defaults to 1000.
        """
        workdir = Path(tempfile.gettempdir()) / "dataset_from_projects"
        histories = pmap(
            get_commit_history,
            project_roots,
            max_workers=workers,
            desc="Getting commit histories",
            tqdm_args={"unit": "repo"},
        )
        # keep the oldest portion of the history
        histories = [commits[-max_history_per_repo:] for commits in histories]
        # break long commit sequences into chunks for parallelization
        roots = list[Path]()
        chunk_training = list[bool]()
        chunked_histories = list[list[CommitInfo]]()
        for root, h, train in zip(project_roots, histories, repo_training):
            history_chunk_size = max(50, math.ceil(len(h) / 4))
            for i in range(0, len(h), history_chunk_size):
                roots.append(root)
                chunk_training.append(train)
                # note that we need 1 extra overlapping commit to get all diffs
                chunked_histories.append(h[i : i + history_chunk_size + 1])
        workdirs = [workdir / f"chunk-{i}" for i in range(len(roots))]
        try:
  +         presults = pmap(
  -         tk_edits = pmap(
                _process_commits,
                roots,
                workdirs,
                chunked_histories,
                key_args={"encoder": encoder</s>
===========changed ref 8===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
        encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
    ) -> "TokenizedEditDataset[TkC3Problem]":
    # offset: 1
    <s>process_commits,
                roots,
                workdirs,
                chunked_histories,
                key_args={"encoder": encoder},
                desc="Create tokenized edits",
                max_workers=workers,
                tqdm_args={"unit": "chunk"},
            )
        finally:
            if workdir.exists():
                shutil.rmtree(workdir)
                print("Workdir removed:", workdir)
        project2edits = dict[Path, list[TkC3Problem]]()
  + 
  +     error_counts = dict[str, int]()
  +     for root, pr in zip(roots, presults):
  -     for root, edits in zip(roots, tk_edits):
  +         project2edits.setdefault(root, []).extend(pr.edits)
  -         project2edits.setdefault(root, []).extend(edits)
  +         for k, v in pr.processor_errors.items():
  +             error_counts[k] = error_counts.get(k, 0) + v
  + 
  +     print("Processor Errors:")
  +     pretty_print_dict(error_counts)
    
        return TokenizedEditDataset(project2edits)
     | 
| 
	coeditor.ctx_change_encoder/C3ProblemGenerator.post_edit_analysis | 
	Modified | 
	temp-1 | 
	c2672194b1a2abfcf00b08e9e73e6851407c3d95 | 
	Fix jedi cache error. Print processor errors. | 
	 <0>:<add>                 for source in _fast_goto(
 | 
	      # module: coeditor.ctx_change_encoder
      class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
          def post_edit_analysis(
              self,
              pstate: ProjectState,
              modules: Mapping[RelPath, JModule],
              changes: Mapping[ModuleName, JModuleChange],
          ) -> list[ModuleName]:
              "Return the topological order among the modules."
              # sort modules topologically
              module_deps = dict[ModuleName, set[ModuleName]]()
              for rel_path, module in modules.items():
                  names = {n for n in module.imported_names}
                  script = pstate.scripts[rel_path]
                  deps = module_deps.setdefault(module.mname, set())
                  for n in names:
    -                 for source in fast_goto(
 <0>                      script, n, follow_imports=True, follow_builtin_imports=False
                      ):
                          deps.add(source.module_name)
              module_order = sort_modules_by_imports(module_deps)
              return module_order
      
       | 
	===========unchanged ref 0===========
    at: coeditor.code_change.JModule
        mname: ModuleName
    
    at: coeditor.common
        RelPath = NewType("RelPath", Path)
    
    at: coeditor.ctx_change_encoder.C3ProblemGenerator.__init__
        analyzer = JediUsageAnalyzer()
        self.analyzer = analyzer
    
    at: coeditor.ctx_change_encoder.C3ProblemGenerator.pre_edit_analysis
        project = pstate.project
    
        lines_to_analyze = set[int]()
    
        script = pstate.scripts[mod_path]
    
    at: coeditor.ctx_change_encoder.JediUsageAnalyzer
        get_line_usages(self, script: jedi.Script, proj_root: Path, lines_to_analyze: Collection[int], silent: bool=False)
        get_line_usages(script: jedi.Script, proj_root: Path, lines_to_analyze: Collection[int], silent: bool=False)
    
    at: spot.static_analysis
        ModuleName = str
    
    at: typing
        Mapping = _alias(collections.abc.Mapping, 2)
    
    at: typing.Mapping
        items() -> AbstractSet[Tuple[_KT, _VT_co]]
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def clear_errors(self):
  +         return self.analyzer.error_counts.clear()
  + 
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def get_errors(self) -> dict[str, int]:
  +         return self.analyzer.error_counts
  + 
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def __init__(self, analyzer: "JediUsageAnalyzer | None" = None):
  -     def __init__(self, analysis: "JediUsageAnalyzer | None" = None):
  +         if analyzer is None:
  -         if analysis is None:
  +             analyzer = JediUsageAnalyzer()
  -             analysis = JediUsageAnalyzer()
  +         self.analyzer = analyzer
  -         self.analysis = analysis
    
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
        def pre_edit_analysis(
            self,
            pstate: ProjectState,
            modules: Mapping[RelPath, JModule],
            changes: Mapping[ModuleName, JModuleChange],
        ) -> Mapping[ModuleName, LineUsageAnalysis]:
            "Return the definition usages of each line."
            project = pstate.project
            result = dict[ModuleName, LineUsageAnalysis]()
    
            src_map = {m.mname: f for f, m in modules.items()}
            for mname, mchange in changes.items():
                if not isinstance(mchange.module_change, Modified):
                    continue
    
                lines_to_analyze = set[int]()
                for span in mchange.changed.values():
                    if span.change is Added:
                        continue
                    lines_to_analyze.update(range(*span.line_range))
                    lines_to_analyze.update(range(*span.header_line_range))
    
                mod_path = src_map[mname]
                script = pstate.scripts[mod_path]
  +             line_usages = self.analyzer.get_line_usages(
  -             line_usages = self.analysis.get_line_usages(
                    script, project.path, lines_to_analyze, silent=True
                )
                result[mname] = line_usages
            return result
    
===========changed ref 4===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  +     def clear_errors(self):
  +         return None
  + 
===========changed ref 5===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  +     def get_errors(self) -> dict[str, int]:
  +         return dict()
  + 
===========changed ref 6===========
    # module: coeditor.dataset
  + @dataclass
  + class _ProcessingResult:
  +     edits: Sequence[TkC3Problem]
  +     processor_errors: dict[str, int]
  + 
===========changed ref 7===========
    # module: coeditor.dataset
    def _process_commits(
        root: Path,
        workdir: Path,
        commits: Sequence[CommitInfo],
        encoder: C3EditEncoder,
  + ) -> _ProcessingResult:
  - ) -> Sequence[TkC3Problem]:
        # use process-specific parso cache
  -     old_cache = jedi.settings.cache_directory
  -     jedi.settings.cache_directory = workdir / "jedi_cache"
  +     _fix_jedi_cache(workdir)
        try:
            # cannot return here since subprocess will be killed after returning
  +         edits = edits_from_commit_history(
  -         return edits_from_commit_history(
                root,
                commits,
                tempdir=workdir / "code",
                change_processor=encoder.change_processor,
                edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                silent=True,
            )
        except UnicodeDecodeError as e:
            # this might happen in rare cases
            warnings.warn(f"Unable to process project: {root}\nError: {e}")
  +         edits = []
  +     return _ProcessingResult(
  +         edits,
  +         encoder.change_processor.get_errors(),
  +     )
  -         return []
  -     finally:
  -         jedi.settings.cache_directory = old_cache
    
===========changed ref 8===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
        encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
    ) -> "TokenizedEditDataset[TkC3Problem]":
        """
        Create a TokenizedEditDataset from a list of project roots and a given encoder.
        Args:
            - max_history_per_repo (int, optional): When the repo history is longer than
            this value, only the oldest portion is going to be used. Defaults to 1000.
        """
        workdir = Path(tempfile.gettempdir()) / "dataset_from_projects"
        histories = pmap(
            get_commit_history,
            project_roots,
            max_workers=workers,
            desc="Getting commit histories",
            tqdm_args={"unit": "repo"},
        )
        # keep the oldest portion of the history
        histories = [commits[-max_history_per_repo:] for commits in histories]
        # break long commit sequences into chunks for parallelization
        roots = list[Path]()
        chunk_training = list[bool]()
        chunked_histories = list[list[CommitInfo]]()
        for root, h, train in zip(project_roots, histories, repo_training):
            history_chunk_size = max(50, math.ceil(len(h) / 4))
            for i in range(0, len(h), history_chunk_size):
                roots.append(root)
                chunk_training.append(train)
                # note that we need 1 extra overlapping commit to get all diffs
                chunked_histories.append(h[i : i + history_chunk_size + 1])
        workdirs = [workdir / f"chunk-{i}" for i in range(len(roots))]
        try:
  +         presults = pmap(
  -         tk_edits = pmap(
                _process_commits,
                roots,
                workdirs,
                chunked_histories,
                key_args={"encoder": encoder</s> | 
| 
	coeditor.ctx_change_encoder/JediUsageAnalyzer.get_line_usages | 
	Modified | 
	temp-1 | 
	c2672194b1a2abfcf00b08e9e73e6851407c3d95 | 
	Fix jedi cache error. Print processor errors. | 
	 <0>:<add>                 errors[err_text] = errors.get(err_text, 0) + 1
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class JediUsageAnalyzer:
          def get_line_usages(
              self,
              script: jedi.Script,
              proj_root: Path,
              lines_to_analyze: Collection[int],
              silent: bool = False,
          ):
      <s>names.sort(key=lambda x: x.start_pos)
              errors = self.error_counts
              unexpected = dict[str, int]()
              for name in tqdm(all_names, f"Analyzing {script.path}", disable=silent):
                  name: tree.Name
                  line = name.start_pos[0]
                  if line not in lines_to_analyze:
                      continue
                  usages = line2usages.setdefault(line, set())
                  try:
    +                 defs = _fast_goto(
    -                 defs = fast_goto(
                          script,
                          name,
                          follow_imports=True,
                          follow_builtin_imports=False,
                      )
                      for d in defs:
                          usages.update(PyDefinition.from_name(d))
      
    +             except KeyError:
    +                 # for debugging
    +                 raise
                  except Exception as e:
    -                 # if the message is "not enough values to unpack"
                      err_text = repr(e)
                      is_known = any(err in err_text for err in _KnownJediErrors)
    -                 errors[err_text] = errors.setdefault(err_text, 0) + 1
 <0>                  if not is_known:
                          unexpected[err_text] = unexpected.get(err_text, 0) + 1
              if unexpected:
                  project_name = proj_root.name
                  if script.path:
                      file_path = script.path.relative_to(proj_root)
                  else:
                      file_path = "<unknown>"
                  for err, count in unexpected.items():
                      logging.warn(
                          f"Unexpected error when analyzing '{project_name}/{file_path}' ({count=}): {err}"
                      )
              return LineUsageAnalysis(line2usages)
      
       | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class JediUsageAnalyzer:
        def get_line_usages(
            self,
            script: jedi.Script,
            proj_root: Path,
            lines_to_analyze: Collection[int],
            silent: bool = False,
        ):
    # offset: -1
            jmod: tree.Module = script._module_node
            line2usages = dict[int, set[PyDefinition]]()
            all_names = [
                name for k, names in jmod.get_used_names()._dict.items() for name in names
            ]
            all_names.sort(key=lambda x: x.start_pos)
            errors = self.error_counts
            unexpected = dict[</s>
===========unchanged ref 0===========
    at: coeditor._utils
        TimeLogger(times: dict[str, list[float]]=field(default_factory=dict))
    
    at: coeditor.ctx_change_encoder
        PyDefinition(full_name: PyFullName, start_pos: tuple[int, int], end_pos: tuple[int, int])
    
        _KnownJediErrors = {
            "not enough values to unpack (expected 2",
            "'Newline' object has no attribute 'children'",
            "trailer_op is actually <AssertStmt:",
            "There's a scope that was not managed: <Module",
            "maximum recursion depth exceeded",
        }
    
        _fast_goto(script: jedi.Script, tree_name: tree.Name, *, follow_imports=False, follow_builtin_imports=False, only_stubs=False, prefer_stubs=False) -> set[classes.Name]
    
    at: coeditor.ctx_change_encoder.PyDefinition
        full_name: PyFullName
    
        start_pos: tuple[int, int]
    
        end_pos: tuple[int, int]
    
        from_name(name: classes.BaseName) -> Iterable["PyDefinition"]
    
    at: dataclasses
        dataclass(_cls: Type[_T]) -> Type[_T]
        dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
        dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
    
    at: jedi.api
        Script(code=None, *, path=None, environment=None, project=None)
    
    at: jedi.api.Script.__init__
        self.path = path.absolute() if path else None
    
    
===========unchanged ref 1===========
        self._module_node, code = self._inference_state.parse_and_get_code(
                    code=code,
                    path=self.path,
                    use_latest_grammar=path and path.suffix == '.pyi',
                    cache=False,  # No disk cache, because the current script often changes.
                    diff_cache=settings.fast_parser,
                    cache_path=settings.cache_directory,
                )
    
    at: parso.python.tree
        Name()
    
        Module(children)
    
    at: parso.python.tree.Module
        __slots__ = ('_used_names',)
    
        type = 'file_input'
    
        get_used_names()
    
    at: parso.python.tree.UsedNamesMapping.__init__
        self._dict = dct
    
    at: parso.tree.Leaf.__init__
        self.start_pos = start_pos
    
    at: pathlib
        Path()
    
    at: pathlib.PurePath
        __slots__ = (
                '_drv', '_root', '_parts',
                '_str', '_hash', '_pparts', '_cached_cparts',
            )
    
        drive = property(attrgetter('_drv'),
                             doc="""The drive prefix (letter or UNC path), if any.""")
    
        root = property(attrgetter('_root'),
                            doc="""The root of the path, if any.""")
    
    at: tqdm.std
        tqdm(iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, write_bytes=False, lock_args=None, nrows=None, colour=None, delay=0, gui=False, **kwargs)
    
    at: typing
        Collection = _alias(collections.abc.Collection, 1)
    
    
===========unchanged ref 2===========
    at: typing.Mapping
        get(key: _KT) -> Optional[_VT_co]
        get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def clear_errors(self):
  +         return self.analyzer.error_counts.clear()
  + 
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def get_errors(self) -> dict[str, int]:
  +         return self.analyzer.error_counts
  + 
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def __init__(self, analyzer: "JediUsageAnalyzer | None" = None):
  -     def __init__(self, analysis: "JediUsageAnalyzer | None" = None):
  +         if analyzer is None:
  -         if analysis is None:
  +             analyzer = JediUsageAnalyzer()
  -             analysis = JediUsageAnalyzer()
  +         self.analyzer = analyzer
  -         self.analysis = analysis
    
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
        def post_edit_analysis(
            self,
            pstate: ProjectState,
            modules: Mapping[RelPath, JModule],
            changes: Mapping[ModuleName, JModuleChange],
        ) -> list[ModuleName]:
            "Return the topological order among the modules."
            # sort modules topologically
            module_deps = dict[ModuleName, set[ModuleName]]()
            for rel_path, module in modules.items():
                names = {n for n in module.imported_names}
                script = pstate.scripts[rel_path]
                deps = module_deps.setdefault(module.mname, set())
                for n in names:
  +                 for source in _fast_goto(
  -                 for source in fast_goto(
                        script, n, follow_imports=True, follow_builtin_imports=False
                    ):
                        deps.add(source.module_name)
            module_order = sort_modules_by_imports(module_deps)
            return module_order
     | 
| 
	coeditor.encoding/TkDelta.to_change_tks | 
	Modified | 
	temp-1 | 
	ba5ac8335a7bc3fa47400469334a769b1be553e4 | 
	Fix TkDelta for empty inputs. | 
	 <0>:<add>                 f"Input is longer than delta: {len(lines)=} > {len(self.deltas)=}"
 | 
	      # module: coeditor.encoding
      @dataclass
      class TkDelta:
          def to_change_tks(self, input: TokenSeq) -> TokenSeq:
    +         lines = split_list(input, Newline_id) if input else []
    -         lines = split_list(input, Newline_id)
              if len(lines) > len(self.deltas):
    +             print_err(f"{self.deltas=}")
    -             print_err(f"{self.deltas}")
    +             print_err(f"{input=}")
    -             print_err(f"{input}")
                  raise ValueError(
    -                 f"Delta is longer than input: {len(lines)=} > {len(self.deltas)=}"
 <0>              )
              new_lines = list[TokenSeq]()
              for line, delta in zip(lines, self.deltas):
                  deleted = False
                  if delta:
                      for action in delta:
                          if action[0] == Add_id:
                              new_lines.append(action)
                          elif action[0] == Del_id:
                              deleted = True
                  if deleted:
                      new_lines.append([Del_id] + line)
                  else:
                      new_lines.append(line)
              if len(self.deltas) == len(lines) + 1:
                  delta = self.deltas[-1]
                  for action in delta:
                      if action[0] == Add_id:
                          new_lines.append(action)
              return join_list(new_lines, Newline_id)
      
       | 
	===========unchanged ref 0===========
    at: coeditor.common
        TokenSeq = list[Token]
    
        split_list(lst: list[T1], sep: T1) -> list[list[T1]]
    
        join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
    
        print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
    
    at: coeditor.encoding
        Add_id = get_tk_id(Add)
    
        Del_id = get_tk_id(Del)
    
        Newline_id = get_tk_id("\n")
    
    at: coeditor.encoding.TkDelta
        deltas: list[tuple[TokenSeq, ...]]
    
     | 
| 
	spot.static_analysis/ProjectPath.pop | 
	Modified | 
	temp-1 | 
	2282c6c0bcb9fea8d0b3c73335181e13605c6c00 | 
	Optimize `split_dots`. | 
	 <0>:<add>         p1 = ".".join(split_dots(self.path)[:-1])
 | 
	      # module: spot.static_analysis
      class ProjectPath(NamedTuple):
          def pop(self) -> "ProjectPath":
    -         p1 = ".".join(self.path.split(".")[:-1])
 <0>          return ProjectPath(self.module, p1)
      
       | 
	===========unchanged ref 0===========
    at: spot.static_analysis
        ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
    
    at: spot.static_analysis.ProjectPath
        module: ModuleName
    
        path: ElemPath
    
    at: spot.utils
        _lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
    
    
===========changed ref 0===========
    # module: spot.utils
  + @cache
  + def split_dots(path: str) -> tuple[str, ...]:
  +     return tuple(path.split("."))
  +  | 
| 
	spot.static_analysis/PythonFunction.is_test_func | 
	Modified | 
	temp-1 | 
	2282c6c0bcb9fea8d0b3c73335181e13605c6c00 | 
	Optimize `split_dots`. | 
	 <0>:<add>             (file := split_dots(self.path.module)[-1]).startswith("test_")
 | 
	      # module: spot.static_analysis
      @dataclass
      class PythonFunction:
          @cached_property
          def is_test_func(self) -> bool:
              # follow the pytest rules (but ignore the method requirements):
              # https://docs.pytest.org/en/7.1.x/explanation/goodpractices.html#conventions-for-python-test-discovery
              return self.name.startswith("test") and (
    -             (file := self.path.module.split(".")[-1]).startswith("test_")
 <0>              or file.endswith("_test")
              )
      
       | 
	===========unchanged ref 0===========
    at: functools
        cached_property(func: Callable[[Any], _T])
    
    at: spot.static_analysis.ProjectPath
        module: ModuleName
    
    at: spot.static_analysis.PythonFunction
        name: str
    
        path: ProjectPath
    
        parent_class: ProjectPath | None
    
        tree: cst.FunctionDef
    
    at: spot.utils
        _lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
    
    
===========changed ref 0===========
    # module: spot.utils
  + @cache
  + def split_dots(path: str) -> tuple[str, ...]:
  +     return tuple(path.split("."))
  + 
===========changed ref 1===========
    # module: spot.static_analysis
    class ProjectPath(NamedTuple):
        def pop(self) -> "ProjectPath":
  +         p1 = ".".join(split_dots(self.path)[:-1])
  -         p1 = ".".join(self.path.split(".")[:-1])
            return ProjectPath(self.module, p1)
     | 
| 
	spot.static_analysis/to_abs_import_path | 
	Modified | 
	temp-1 | 
	2282c6c0bcb9fea8d0b3c73335181e13605c6c00 | 
	Optimize `split_dots`. | 
	 <0>:<add>     result_segs = list(mod_segs[:-dots])
 | 
	      # module: spot.static_analysis
      def to_abs_import_path(
          current_mod: ModuleName,
          path: str,
          allow_implicit: bool = True,
      ) -> Generator[ModuleName, None, None]:
          """Given the current module and an import path, return the list of modules
          (in absolute path) that import could potentially be referring to."""
          dots = 0
          while dots < len(path) and path[dots] == ".":
              dots += 1
          if dots == 0:
              yield path
              if allow_implicit:
                  yield path_join(path_up(current_mod), path)
              return
    +     mod_segs = split_dots(current_mod)
    -     mod_segs = split_import_path(current_mod)
          assert len(mod_segs) >= dots, "Cannot go up more levels."
    -     result_segs = mod_segs[:-dots]
 <0>      rest = path[dots:]
          if rest:
              result_segs.append(rest)
          yield ".".join(result_segs)
      
       | 
	===========unchanged ref 0===========
    at: spot.static_analysis
        ModuleName = str
    
        _lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
    
        _lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
    
    at: spot.utils
        _lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
    
    at: typing
        Generator = _alias(collections.abc.Generator, 3)
    
    
===========changed ref 0===========
    # module: spot.utils
  + @cache
  + def split_dots(path: str) -> tuple[str, ...]:
  +     return tuple(path.split("."))
  + 
===========changed ref 1===========
    # module: spot.static_analysis
    @dataclass
    class PythonFunction:
        @cached_property
        def is_test_func(self) -> bool:
            # follow the pytest rules (but ignore the method requirements):
            # https://docs.pytest.org/en/7.1.x/explanation/goodpractices.html#conventions-for-python-test-discovery
            return self.name.startswith("test") and (
  +             (file := split_dots(self.path.module)[-1]).startswith("test_")
  -             (file := self.path.module.split(".")[-1]).startswith("test_")
                or file.endswith("_test")
            )
    
===========changed ref 2===========
    # module: spot.static_analysis
    class ProjectPath(NamedTuple):
        def pop(self) -> "ProjectPath":
  +         p1 = ".".join(split_dots(self.path)[:-1])
  -         p1 = ".".join(self.path.split(".")[:-1])
            return ProjectPath(self.module, p1)
     | 
| 
	spot.static_analysis/path_up | 
	Modified | 
	temp-1 | 
	2282c6c0bcb9fea8d0b3c73335181e13605c6c00 | 
	Optimize `split_dots`. | 
	 <0>:<add>     segs = split_dots(path)
 | 
	      # module: spot.static_analysis
      @cache
      def path_up(path: str) -> str:
    -     segs = split_import_path(path)
 <0>      return ".".join(segs[:-1])
      
       | 
	===========unchanged ref 0===========
    at: functools
        cache(user_function: Callable[..., _T], /) -> _lru_cache_wrapper[_T]
    
    
===========changed ref 0===========
    # module: spot.static_analysis
  - @cache
  - def split_import_path(path: str):
  -     return path.split(".")
  - 
===========changed ref 1===========
    # module: spot.static_analysis
    def to_abs_import_path(
        current_mod: ModuleName,
        path: str,
        allow_implicit: bool = True,
    ) -> Generator[ModuleName, None, None]:
        """Given the current module and an import path, return the list of modules
        (in absolute path) that import could potentially be referring to."""
        dots = 0
        while dots < len(path) and path[dots] == ".":
            dots += 1
        if dots == 0:
            yield path
            if allow_implicit:
                yield path_join(path_up(current_mod), path)
            return
  +     mod_segs = split_dots(current_mod)
  -     mod_segs = split_import_path(current_mod)
        assert len(mod_segs) >= dots, "Cannot go up more levels."
  +     result_segs = list(mod_segs[:-dots])
  -     result_segs = mod_segs[:-dots]
        rest = path[dots:]
        if rest:
            result_segs.append(rest)
        yield ".".join(result_segs)
    
===========changed ref 2===========
    # module: spot.static_analysis
    @dataclass
    class PythonFunction:
        @cached_property
        def is_test_func(self) -> bool:
            # follow the pytest rules (but ignore the method requirements):
            # https://docs.pytest.org/en/7.1.x/explanation/goodpractices.html#conventions-for-python-test-discovery
            return self.name.startswith("test") and (
  +             (file := split_dots(self.path.module)[-1]).startswith("test_")
  -             (file := self.path.module.split(".")[-1]).startswith("test_")
                or file.endswith("_test")
            )
    
===========changed ref 3===========
    # module: spot.static_analysis
    class ProjectPath(NamedTuple):
        def pop(self) -> "ProjectPath":
  +         p1 = ".".join(split_dots(self.path)[:-1])
  -         p1 = ".".join(self.path.split(".")[:-1])
            return ProjectPath(self.module, p1)
    
===========changed ref 4===========
    # module: spot.utils
  + @cache
  + def split_dots(path: str) -> tuple[str, ...]:
  +     return tuple(path.split("."))
  +  | 
| 
	spot.static_analysis/ModuleHierarchy.from_modules | 
	Modified | 
	temp-1 | 
	2282c6c0bcb9fea8d0b3c73335181e13605c6c00 | 
	Optimize `split_dots`. | 
	 <0>:<add>             root.add_module(split_dots(m))
 | 
	      # module: spot.static_analysis
      class ModuleHierarchy:
          @staticmethod
          def from_modules(modules: Iterable[str]) -> "ModuleHierarchy":
              root = ModuleHierarchy()
              for m in modules:
    -             root.add_module(split_import_path(m))
 <0>          return root
      
       | 
	===========unchanged ref 0===========
    at: spot.static_analysis
        ModuleName = str
    
    at: spot.static_analysis.ModuleHierarchy.from_modules
        root = ModuleHierarchy()
    
    at: typing
        Mapping = _alias(collections.abc.Mapping, 2)
    
    
===========changed ref 0===========
    # module: spot.static_analysis
  - @cache
  - def split_import_path(path: str):
  -     return path.split(".")
  - 
===========changed ref 1===========
    # module: spot.static_analysis
    @cache
    def path_up(path: str) -> str:
  +     segs = split_dots(path)
  -     segs = split_import_path(path)
        return ".".join(segs[:-1])
    
===========changed ref 2===========
    # module: spot.static_analysis
    def to_abs_import_path(
        current_mod: ModuleName,
        path: str,
        allow_implicit: bool = True,
    ) -> Generator[ModuleName, None, None]:
        """Given the current module and an import path, return the list of modules
        (in absolute path) that import could potentially be referring to."""
        dots = 0
        while dots < len(path) and path[dots] == ".":
            dots += 1
        if dots == 0:
            yield path
            if allow_implicit:
                yield path_join(path_up(current_mod), path)
            return
  +     mod_segs = split_dots(current_mod)
  -     mod_segs = split_import_path(current_mod)
        assert len(mod_segs) >= dots, "Cannot go up more levels."
  +     result_segs = list(mod_segs[:-dots])
  -     result_segs = mod_segs[:-dots]
        rest = path[dots:]
        if rest:
            result_segs.append(rest)
        yield ".".join(result_segs)
    
===========changed ref 3===========
    # module: spot.static_analysis
    @dataclass
    class PythonFunction:
        @cached_property
        def is_test_func(self) -> bool:
            # follow the pytest rules (but ignore the method requirements):
            # https://docs.pytest.org/en/7.1.x/explanation/goodpractices.html#conventions-for-python-test-discovery
            return self.name.startswith("test") and (
  +             (file := split_dots(self.path.module)[-1]).startswith("test_")
  -             (file := self.path.module.split(".")[-1]).startswith("test_")
                or file.endswith("_test")
            )
    
===========changed ref 4===========
    # module: spot.static_analysis
    class ProjectPath(NamedTuple):
        def pop(self) -> "ProjectPath":
  +         p1 = ".".join(split_dots(self.path)[:-1])
  -         p1 = ".".join(self.path.split(".")[:-1])
            return ProjectPath(self.module, p1)
    
===========changed ref 5===========
    # module: spot.utils
  + @cache
  + def split_dots(path: str) -> tuple[str, ...]:
  +     return tuple(path.split("."))
  +  | 
| 
	spot.static_analysis/UsageAnalysis.find_class | 
	Modified | 
	temp-1 | 
	2282c6c0bcb9fea8d0b3c73335181e13605c6c00 | 
	Optimize `split_dots`. | 
	 <0>:<add>                     segs = split_dots(abs_p)
 | 
	      # module: spot.static_analysis
      class UsageAnalysis:
          def find_class(self, mname: ModuleName, qname: QualifiedName) -> PythonClass | None:
              cls_path = None
              match qname.source:
                  case QualifiedNameSource.IMPORT:
                      for abs_p in to_abs_import_path(
                          mname, qname.name, self.add_implicit_rel_imports
                      ):
    -                     segs = split_import_path(abs_p)
 <0>                      if len(segs) >= 2:
                              cls_path = self.ns_hier.resolve_path(segs)
                              break
                  case QualifiedNameSource.LOCAL:
                      cls_path = ProjectPath(mname, qname.name)
      
              if cls_path in self.path2class:
                  return self.path2class[cls_path]
              return None
      
       | 
	===========unchanged ref 0===========
    at: spot.static_analysis
        ModuleName = str
    
        ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
    
    at: spot.static_analysis.ModuleHierarchy
        resolve_path(segs: Sequence[str]) -> ProjectPath | None
    
    at: spot.static_analysis.UsageAnalysis
        all_usages: list[ProjectUsage]
    
        path2elem: dict[ProjectPath, PythonElem]
    
        user2used: dict[ProjectPath, list[ProjectUsage]]
    
        used2user: dict[ProjectPath, list[ProjectUsage]]
    
        TLogger = TimeLogger()
    
    at: spot.static_analysis.UsageAnalysis.__init__
        self.add_implicit_rel_imports = add_implicit_rel_imports
    
        self.ns_hier = ModuleHierarchy.from_modules(project.modules.keys())
    
        self.path2class = {
                    cls.path: cls
                    for mod in project.modules.values()
                    for cls in mod.all_classes()
                }
    
    at: spot.utils
        _lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
    
    
===========changed ref 0===========
    # module: spot.utils
  + @cache
  + def split_dots(path: str) -> tuple[str, ...]:
  +     return tuple(path.split("."))
  + 
===========changed ref 1===========
    # module: spot.static_analysis
    class ModuleHierarchy:
        @staticmethod
        def from_modules(modules: Iterable[str]) -> "ModuleHierarchy":
            root = ModuleHierarchy()
            for m in modules:
  +             root.add_module(split_dots(m))
  -             root.add_module(split_import_path(m))
            return root
    
===========changed ref 2===========
    # module: spot.static_analysis
  - @cache
  - def split_import_path(path: str):
  -     return path.split(".")
  - 
===========changed ref 3===========
    # module: spot.static_analysis
    @cache
    def path_up(path: str) -> str:
  +     segs = split_dots(path)
  -     segs = split_import_path(path)
        return ".".join(segs[:-1])
    
===========changed ref 4===========
    # module: spot.static_analysis
    def to_abs_import_path(
        current_mod: ModuleName,
        path: str,
        allow_implicit: bool = True,
    ) -> Generator[ModuleName, None, None]:
        """Given the current module and an import path, return the list of modules
        (in absolute path) that import could potentially be referring to."""
        dots = 0
        while dots < len(path) and path[dots] == ".":
            dots += 1
        if dots == 0:
            yield path
            if allow_implicit:
                yield path_join(path_up(current_mod), path)
            return
  +     mod_segs = split_dots(current_mod)
  -     mod_segs = split_import_path(current_mod)
        assert len(mod_segs) >= dots, "Cannot go up more levels."
  +     result_segs = list(mod_segs[:-dots])
  -     result_segs = mod_segs[:-dots]
        rest = path[dots:]
        if rest:
            result_segs.append(rest)
        yield ".".join(result_segs)
    
===========changed ref 5===========
    # module: spot.static_analysis
    @dataclass
    class PythonFunction:
        @cached_property
        def is_test_func(self) -> bool:
            # follow the pytest rules (but ignore the method requirements):
            # https://docs.pytest.org/en/7.1.x/explanation/goodpractices.html#conventions-for-python-test-discovery
            return self.name.startswith("test") and (
  +             (file := split_dots(self.path.module)[-1]).startswith("test_")
  -             (file := self.path.module.split(".")[-1]).startswith("test_")
                or file.endswith("_test")
            )
    
===========changed ref 6===========
    # module: spot.static_analysis
    class ProjectPath(NamedTuple):
        def pop(self) -> "ProjectPath":
  +         p1 = ".".join(split_dots(self.path)[:-1])
  -         p1 = ".".join(self.path.split(".")[:-1])
            return ProjectPath(self.module, p1)
     | 
| 
	spot.static_analysis/UsageAnalysis.generate_usages | 
	Modified | 
	temp-1 | 
	2282c6c0bcb9fea8d0b3c73335181e13605c6c00 | 
	Optimize `split_dots`. | 
	 <0>:<add>                     segs = split_dots(abs_p)
 | 
	      # module: spot.static_analysis
      class UsageAnalysis:
          def generate_usages(
              self,
              mname: ModuleName,
              caller: ProjectPath,
              qname: QualifiedName,
              parent_call: cst.Call | None,
          ) -> Iterable[ProjectUsage]:
      <s>fixture>", m]:
                              yield from gen_fixture_usages(m)
                              break
                          case [*prefix, cls, _, "<locals>", "self", m]:
                              segs = [*prefix, cls, m]
      
                      callee = ProjectPath(mname, ".".join(segs))
                      if callee in self.path2class:
                          if self.record_type_usages:
                              yield usage(callee, is_certain=True)
                          yield from gen_constructor_usages(self.path2class[callee])
                      elif callee in self.path2elem:
                          yield usage(self.path2elem[callee].path, is_certain=True)
                      elif len(segs) >= 2 and segs[-2] != "<locals>":
                          # method fuzzy match case 3
                          yield from gen_class_usages(segs[-1])
                          segs.pop()
                          continue
                      break
      
              match qname.source:
                  case QualifiedNameSource.IMPORT:
                      for abs_p in to_abs_import_path(
                          mname, qname.name, self.add_implicit_rel_imports
                      ):
    -                     segs = split_import_path(abs_p)
 <0>                      callee = self.ns_hier.resolve_path(segs)
                          if callee is None:
                              continue
                          if callee in self.path2class:
                              if self.record_type_usages:
                                  yield usage(callee, is_certain=True)
                              yield from gen_constructor_usages(self.path2class[callee])
                              break
                          elif callee in self.path2elem:
                              yield usage(self.path2elem[callee].path, is_certain=True)
                              break
                  case QualifiedNameSource.LOCAL:
                      yield from resolve_local_usages(qname.name)
      
       | 
	===========above chunk 0===========
    # module: spot.static_analysis
    class UsageAnalysis:
        def generate_usages(
            self,
            mname: ModuleName,
            caller: ProjectPath,
            qname: QualifiedName,
            parent_call: cst.Call | None,
        ) -> Iterable[ProjectUsage]:
    # offset: -1
    <s> return
                for vf in visible_fixtures(fix_name):
                    if vf in candidates:
                        yield usage(vf, is_certain=True)
                        return
    
            def gen_constructor_usages(cls: PythonClass):
                if not parent_call:
                    return
                cpath = cls.path
                used_elems = list[tuple[ProjectPath, bool]]()
                cons_path = cpath.append("__init__")
                if cons_path in self.path2elem:
                    used_elems.append((cons_path, True))
                else:
                    for v in cls.attributes.values():
                        used_elems.append((v.path, True))
                    # maybe also used members from parent class
                    for el in self.cls2members[cpath].values():
                        if isinstance(el, PythonVariable):
                            used_elems.append((el.path, False))
                for u, certain in used_elems:
                    yield usage(self.path2elem[u].path, is_certain=certain)
    
            def resolve_local_usages(name: str):
                segs = name.split(".")
                # try resolve all usages in the access chain until a certain usage is found
                # For example, if we have `a.b.c.d` and we can resolve `a.b` to a certain element,
                # but not `a.b.c`, we will then also generate class usages for `.c` and `.d`.
                while segs:
                    match segs:
                        case ["<attr>", m]:
                            # method fuzzy match case 1
                            yield from gen_class_usages(m)
                            break
                        case ["<fixture>", m]:
                            yield from gen_fixture_usages(m)
                            break
                        case [*prefix,</s>
===========above chunk 1===========
    # module: spot.static_analysis
    class UsageAnalysis:
        def generate_usages(
            self,
            mname: ModuleName,
            caller: ProjectPath,
            qname: QualifiedName,
            parent_call: cst.Call | None,
        ) -> Iterable[ProjectUsage]:
    # offset: -2
            def usage(used: ProjectPath, is_certain: bool):
                return ProjectUsage(caller, used, is_certain, parent_call)
    
            def gen_class_usages(member_name: str):
                if member_name.startswith("__") and member_name.endswith("__"):
                    # skip common methods like __init__
                    return
                for e in self.name2class_member.get(member_name, []):
                    yield usage(e.path, is_certain=False)
    
            def visible_fixtures(fix_name: str) -> Generator[ProjectPath, None, None]:
                psegs = caller.path.split(".")
                # try finding in the local module
                while psegs:
                    psegs.pop()
                    psegs.append(fix_name)
                    yield ProjectPath(caller.module, ".".join(psegs))
                    psegs.pop()
                # try finding in conftest files
                msegs = mname.split(".")
                while msegs:
                    msegs.pop()
                    msegs.append("conftest")
                    yield ProjectPath(".".join(msegs), fix_name)
                    msegs.pop()
    
            def gen_fixture_usages(fix_name: str):
                candidates = self.name2fixtures.get(fix_name, None)
                if not candidates:
                    return
                for vf in visible_fixtures(fix_name):
                    if vf in candidates:
                        yield usage(vf,</s>
===========unchanged ref 0===========
    at: spot.static_analysis
        ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
    
        PythonVariable(name: str, path: ProjectPath, parent_class: ProjectPath | None, tree: cst.Name | cst.Attribute, assignments: list[
                cst.Assign | cst.AnnAssign
            ], wrapped_assignments: list[
                cst.Assign | cst.AnnAssign
            ])
    
        PythonClass(name: str, path: ProjectPath, attributes: dict[str, PythonVariable], methods: dict[str, PythonFunction], inner_classes: dict[str, "PythonClass"], tree: cst.ClassDef, parent_class: ProjectPath | None)
    
        to_abs_import_path(current_mod: ModuleName, path: str, allow_implicit: bool=True) -> Generator[ModuleName, None, None]
    
        ProjectUsage(user: ProjectPath, used: ProjectPath, is_certain: bool, callsite: cst.Call | None)
    
    at: spot.static_analysis.ModuleHierarchy
        resolve_path(segs: Sequence[str]) -> ProjectPath | None
    
    at: spot.static_analysis.ProjectPath
        module: ModuleName
    
        path: ElemPath
    
        append(path: ElemPath) -> "ProjectPath"
    
        from_str(s: str) -> "ProjectPath"
    
    at: spot.static_analysis.PythonClass
        name: str
    
        path: ProjectPath
    
        attributes: dict[str, PythonVariable]
    
        methods: dict[str, PythonFunction]
    
        inner_classes: dict[str, "PythonClass"]
    
        tree: cst.ClassDef
    
        parent_class: ProjectPath | None
    
    at: spot.static_analysis.PythonFunction
        path: ProjectPath
    
    at: spot.static_analysis.PythonVariable
        name: str
    
        path: ProjectPath
    
        parent_class: ProjectPath | None
    
        tree: cst.Name | cst.Attribute
    
    
===========unchanged ref 1===========
        assignments: list[
                cst.Assign | cst.AnnAssign
            ]  # only record assignments outside of functions
    
        wrapped_assignments: list[
                cst.Assign | cst.AnnAssign
            ]  # assignments inside functions
    
    at: spot.static_analysis.UsageAnalysis.__init__
        self.add_implicit_rel_imports = add_implicit_rel_imports
    
        self.record_type_usages = record_type_usages
    
        self.ns_hier = ModuleHierarchy.from_modules(project.modules.keys())
    
        self.path2elem = {v.path: v for v in project.all_elems()}
    
        self.path2class = {
                    cls.path: cls
                    for mod in project.modules.values()
                    for cls in mod.all_classes()
                }
    
        self.cls2members = cls2members = dict[ProjectPath, dict[str, PythonElem]]()
    
        self.name2class_member = groupby(
                    [self.path2elem[p] for p in all_class_members], lambda e: e.name
                )
    
        self.name2fixtures = {
                    n: {f.path for f in fs} for n, fs in name2fixtures.items()
                }
    
    at: spot.utils
        _lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
    
    at: typing
        Iterable = _alias(collections.abc.Iterable, 1)
    
        Generator = _alias(collections.abc.Generator, 3)
    
    at: typing.Mapping
        get(key: _KT) -> Optional[_VT_co]
        get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
    
     | 
| 
	coeditor.history/_select_ast_calls | 
	Modified | 
	temp-1 | 
	2282c6c0bcb9fea8d0b3c73335181e13605c6c00 | 
	Optimize `split_dots`. | 
	 <0>:<add>     segs = split_dots(path.path)
 | 
	      # module: coeditor.history
      def _select_ast_calls(
          node: ast.AST, path: ProjectPath
      ) -> Generator[ast.Call, None, None]:
          """Return all call nodes with the mathcing function name in the AST."""
    -     segs = path.path.split(".")
 <0>      if segs[-1] == "__init__":
              f_name = segs[-2]
          else:
              f_name = segs[-1]
          for n in ast.walk(node):
              if isinstance(n, ast.Call) and isinstance(n.func, ast.Name):
                  if n.func.id == f_name:
                      yield n
      
       | 
	===========unchanged ref 0===========
    at: _ast
        AST(*args: Any, **kwargs: Any)
    
        Call(*args: Any, **kwargs: Any)
    
        Name(*args: Any, **kwargs: Any)
    
    at: ast
        walk(node: AST) -> Iterator[AST]
    
    at: coeditor._utils
        _lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
    
    at: spot.static_analysis
        ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
    
    at: spot.static_analysis.ProjectPath
        module: ModuleName
    
        path: ElemPath
    
    at: typing
        Generator = _alias(collections.abc.Generator, 3)
    
    
===========changed ref 0===========
    # module: spot.static_analysis
  - @cache
  - def split_import_path(path: str):
  -     return path.split(".")
  - 
===========changed ref 1===========
    # module: spot.utils
  + @cache
  + def split_dots(path: str) -> tuple[str, ...]:
  +     return tuple(path.split("."))
  + 
===========changed ref 2===========
    # module: spot.static_analysis
    @cache
    def path_up(path: str) -> str:
  +     segs = split_dots(path)
  -     segs = split_import_path(path)
        return ".".join(segs[:-1])
    
===========changed ref 3===========
    # module: spot.static_analysis
    class ModuleHierarchy:
        @staticmethod
        def from_modules(modules: Iterable[str]) -> "ModuleHierarchy":
            root = ModuleHierarchy()
            for m in modules:
  +             root.add_module(split_dots(m))
  -             root.add_module(split_import_path(m))
            return root
    
===========changed ref 4===========
    # module: spot.static_analysis
    class ProjectPath(NamedTuple):
        def pop(self) -> "ProjectPath":
  +         p1 = ".".join(split_dots(self.path)[:-1])
  -         p1 = ".".join(self.path.split(".")[:-1])
            return ProjectPath(self.module, p1)
    
===========changed ref 5===========
    # module: spot.static_analysis
    @dataclass
    class PythonFunction:
        @cached_property
        def is_test_func(self) -> bool:
            # follow the pytest rules (but ignore the method requirements):
            # https://docs.pytest.org/en/7.1.x/explanation/goodpractices.html#conventions-for-python-test-discovery
            return self.name.startswith("test") and (
  +             (file := split_dots(self.path.module)[-1]).startswith("test_")
  -             (file := self.path.module.split(".")[-1]).startswith("test_")
                or file.endswith("_test")
            )
    
===========changed ref 6===========
    # module: spot.static_analysis
    class UsageAnalysis:
        def find_class(self, mname: ModuleName, qname: QualifiedName) -> PythonClass | None:
            cls_path = None
            match qname.source:
                case QualifiedNameSource.IMPORT:
                    for abs_p in to_abs_import_path(
                        mname, qname.name, self.add_implicit_rel_imports
                    ):
  +                     segs = split_dots(abs_p)
  -                     segs = split_import_path(abs_p)
                        if len(segs) >= 2:
                            cls_path = self.ns_hier.resolve_path(segs)
                            break
                case QualifiedNameSource.LOCAL:
                    cls_path = ProjectPath(mname, qname.name)
    
            if cls_path in self.path2class:
                return self.path2class[cls_path]
            return None
    
===========changed ref 7===========
    # module: spot.static_analysis
    def to_abs_import_path(
        current_mod: ModuleName,
        path: str,
        allow_implicit: bool = True,
    ) -> Generator[ModuleName, None, None]:
        """Given the current module and an import path, return the list of modules
        (in absolute path) that import could potentially be referring to."""
        dots = 0
        while dots < len(path) and path[dots] == ".":
            dots += 1
        if dots == 0:
            yield path
            if allow_implicit:
                yield path_join(path_up(current_mod), path)
            return
  +     mod_segs = split_dots(current_mod)
  -     mod_segs = split_import_path(current_mod)
        assert len(mod_segs) >= dots, "Cannot go up more levels."
  +     result_segs = list(mod_segs[:-dots])
  -     result_segs = mod_segs[:-dots]
        rest = path[dots:]
        if rest:
            result_segs.append(rest)
        yield ".".join(result_segs)
    
===========changed ref 8===========
    # module: spot.static_analysis
    class UsageAnalysis:
        def generate_usages(
            self,
            mname: ModuleName,
            caller: ProjectPath,
            qname: QualifiedName,
            parent_call: cst.Call | None,
        ) -> Iterable[ProjectUsage]:
            def usage(used: ProjectPath, is_certain: bool):
                return ProjectUsage(caller, used, is_certain, parent_call)
    
            def gen_class_usages(member_name: str):
                if member_name.startswith("__") and member_name.endswith("__"):
                    # skip common methods like __init__
                    return
                for e in self.name2class_member.get(member_name, []):
                    yield usage(e.path, is_certain=False)
    
            def visible_fixtures(fix_name: str) -> Generator[ProjectPath, None, None]:
                psegs = caller.path.split(".")
                # try finding in the local module
                while psegs:
                    psegs.pop()
                    psegs.append(fix_name)
                    yield ProjectPath(caller.module, ".".join(psegs))
                    psegs.pop()
                # try finding in conftest files
                msegs = mname.split(".")
                while msegs:
                    msegs.pop()
                    msegs.append("conftest")
                    yield ProjectPath(".".join(msegs), fix_name)
                    msegs.pop()
    
            def gen_fixture_usages(fix_name: str):
                candidates = self.name2fixtures.get(fix_name, None)
                if not candidates:
                    return
                for vf in visible_fixtures(fix_name):
                    if vf in candidates:
                        yield usage(vf, is_certain=True)
                        return
    
            def gen_constructor_usages(cls: PythonClass):
                if not parent_call:
                    return
                cpath = cls.path
                used_elems = list[tuple[ProjectPath, bool]]()
                cons_path = cpath.</s> | 
| 
	coeditor.code_change/ChangeScope._search_scope | 
	Modified | 
	temp-1 | 
	2282c6c0bcb9fea8d0b3c73335181e13605c6c00 | 
	Optimize `split_dots`. | 
	 <0>:<add>         segs = split_dots(path)
 | 
	      # module: coeditor.code_change
      @dataclass
      class ChangeScope:
          def _search_scope(self, path: ElemPath) -> Self:
              """Find the scope that can potentially contain the given path. Follow the
              path segments until no more subscopes are found."""
    -         segs = path.split(".")
 <0>          scope = self
              for s in segs:
                  if s in scope.subscopes:
                      scope = scope.subscopes[s]
                  else:
                      break
              return scope
      
       | 
	===========unchanged ref 0===========
    at: coeditor._utils
        _lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
    
    at: coeditor.code_change.ChangeScope
        path: ProjectPath
    
        tree: ScopeTree
    
        spans: Sequence["StatementSpan"]
    
        subscopes: Mapping[str, Self]
    
        parent_scope: "ChangeScope | None"
    
    at: spot.static_analysis
        ElemPath = str
    
    
===========changed ref 0===========
    # module: spot.static_analysis
  - @cache
  - def split_import_path(path: str):
  -     return path.split(".")
  - 
===========changed ref 1===========
    # module: spot.utils
  + @cache
  + def split_dots(path: str) -> tuple[str, ...]:
  +     return tuple(path.split("."))
  + 
===========changed ref 2===========
    # module: spot.static_analysis
    @cache
    def path_up(path: str) -> str:
  +     segs = split_dots(path)
  -     segs = split_import_path(path)
        return ".".join(segs[:-1])
    
===========changed ref 3===========
    # module: spot.static_analysis
    class ModuleHierarchy:
        @staticmethod
        def from_modules(modules: Iterable[str]) -> "ModuleHierarchy":
            root = ModuleHierarchy()
            for m in modules:
  +             root.add_module(split_dots(m))
  -             root.add_module(split_import_path(m))
            return root
    
===========changed ref 4===========
    # module: spot.static_analysis
    class ProjectPath(NamedTuple):
        def pop(self) -> "ProjectPath":
  +         p1 = ".".join(split_dots(self.path)[:-1])
  -         p1 = ".".join(self.path.split(".")[:-1])
            return ProjectPath(self.module, p1)
    
===========changed ref 5===========
    # module: coeditor.history
    def _select_ast_calls(
        node: ast.AST, path: ProjectPath
    ) -> Generator[ast.Call, None, None]:
        """Return all call nodes with the mathcing function name in the AST."""
  +     segs = split_dots(path.path)
  -     segs = path.path.split(".")
        if segs[-1] == "__init__":
            f_name = segs[-2]
        else:
            f_name = segs[-1]
        for n in ast.walk(node):
            if isinstance(n, ast.Call) and isinstance(n.func, ast.Name):
                if n.func.id == f_name:
                    yield n
    
===========changed ref 6===========
    # module: spot.static_analysis
    @dataclass
    class PythonFunction:
        @cached_property
        def is_test_func(self) -> bool:
            # follow the pytest rules (but ignore the method requirements):
            # https://docs.pytest.org/en/7.1.x/explanation/goodpractices.html#conventions-for-python-test-discovery
            return self.name.startswith("test") and (
  +             (file := split_dots(self.path.module)[-1]).startswith("test_")
  -             (file := self.path.module.split(".")[-1]).startswith("test_")
                or file.endswith("_test")
            )
    
===========changed ref 7===========
    # module: spot.static_analysis
    class UsageAnalysis:
        def find_class(self, mname: ModuleName, qname: QualifiedName) -> PythonClass | None:
            cls_path = None
            match qname.source:
                case QualifiedNameSource.IMPORT:
                    for abs_p in to_abs_import_path(
                        mname, qname.name, self.add_implicit_rel_imports
                    ):
  +                     segs = split_dots(abs_p)
  -                     segs = split_import_path(abs_p)
                        if len(segs) >= 2:
                            cls_path = self.ns_hier.resolve_path(segs)
                            break
                case QualifiedNameSource.LOCAL:
                    cls_path = ProjectPath(mname, qname.name)
    
            if cls_path in self.path2class:
                return self.path2class[cls_path]
            return None
    
===========changed ref 8===========
    # module: spot.static_analysis
    def to_abs_import_path(
        current_mod: ModuleName,
        path: str,
        allow_implicit: bool = True,
    ) -> Generator[ModuleName, None, None]:
        """Given the current module and an import path, return the list of modules
        (in absolute path) that import could potentially be referring to."""
        dots = 0
        while dots < len(path) and path[dots] == ".":
            dots += 1
        if dots == 0:
            yield path
            if allow_implicit:
                yield path_join(path_up(current_mod), path)
            return
  +     mod_segs = split_dots(current_mod)
  -     mod_segs = split_import_path(current_mod)
        assert len(mod_segs) >= dots, "Cannot go up more levels."
  +     result_segs = list(mod_segs[:-dots])
  -     result_segs = mod_segs[:-dots]
        rest = path[dots:]
        if rest:
            result_segs.append(rest)
        yield ".".join(result_segs)
    
===========changed ref 9===========
    # module: spot.static_analysis
    class UsageAnalysis:
        def generate_usages(
            self,
            mname: ModuleName,
            caller: ProjectPath,
            qname: QualifiedName,
            parent_call: cst.Call | None,
        ) -> Iterable[ProjectUsage]:
            def usage(used: ProjectPath, is_certain: bool):
                return ProjectUsage(caller, used, is_certain, parent_call)
    
            def gen_class_usages(member_name: str):
                if member_name.startswith("__") and member_name.endswith("__"):
                    # skip common methods like __init__
                    return
                for e in self.name2class_member.get(member_name, []):
                    yield usage(e.path, is_certain=False)
    
            def visible_fixtures(fix_name: str) -> Generator[ProjectPath, None, None]:
                psegs = caller.path.split(".")
                # try finding in the local module
                while psegs:
                    psegs.pop()
                    psegs.append(fix_name)
                    yield ProjectPath(caller.module, ".".join(psegs))
                    psegs.pop()
                # try finding in conftest files
                msegs = mname.split(".")
                while msegs:
                    msegs.pop()
                    msegs.append("conftest")
                    yield ProjectPath(".".join(msegs), fix_name)
                    msegs.pop()
    
            def gen_fixture_usages(fix_name: str):
                candidates = self.name2fixtures.get(fix_name, None)
                if not candidates:
                    return
                for vf in visible_fixtures(fix_name):
                    if vf in candidates:
                        yield usage(vf, is_certain=True)
                        return
    
            def gen_constructor_usages(cls: PythonClass):
                if not parent_call:
                    return
                cpath = cls.path
                used_elems = list[tuple[ProjectPath, bool]]()
                cons_path = cpath.</s> | 
| 
	spot.utils/get_pushover_config | 
	Modified | 
	temp-1 | 
	6d7e1f96fce615c099ceff3e3fc430818d70a407 | 
	Switch to frozen classes. | 
	 <0>:<add>         warnings.warn(
 | 
	      # module: spot.utils
      def get_pushover_config() -> dict[str, str] | None:
          config_file = proj_root() / "config/pushover.json"
          if config_file.exists():
              match json.loads(config_file.read_text()):
                  case {"user": user, "token": token}:
                      return {"user": user, "token": token}
          if not _pushover_warned[0]:
    -         logging.warning(
 <0>              f"No pushover config file found at {config_file}. Not able to push message."
              )
              _pushover_warned[0] = True
          return None
      
       | 
	===========unchanged ref 0===========
    at: _warnings
        warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
        warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
    
    at: json
        loads(s: Union[str, bytes], *, cls: Optional[Type[JSONDecoder]]=..., object_hook: Optional[Callable[[Dict[Any, Any]], Any]]=..., parse_float: Optional[Callable[[str], Any]]=..., parse_int: Optional[Callable[[str], Any]]=..., parse_constant: Optional[Callable[[str], Any]]=..., object_pairs_hook: Optional[Callable[[List[Tuple[Any, Any]]], Any]]=..., **kwds: Any) -> Any
    
    at: pathlib.Path
        __slots__ = ()
    
        read_text(encoding: Optional[str]=..., errors: Optional[str]=...) -> str
    
        exists() -> bool
    
    at: spot.utils
        proj_root() -> Path
    
        _pushover_warned = [False]
    
     | 
| 
	spot.utils/PickleCache.remove | 
	Modified | 
	temp-1 | 
	6d7e1f96fce615c099ceff3e3fc430818d70a407 | 
	Switch to frozen classes. | 
	 <0>:<add>             warnings.warn(f"[PickleCache] File not found: '{path}'")
 | 
	      # module: spot.utils
      class PickleCache:
          def remove(self, rel_path: Path | str):
              path = self.cache_dir / rel_path
              if path.exists():
                  path.unlink()
              else:
    -             logging.warning(f"[PickleCache] File not found: '{path}'")
 <0>  
       | 
	===========unchanged ref 0===========
    at: pathlib
        Path()
    
    at: pathlib.Path
        unlink(missing_ok: bool=...) -> None
    
        exists() -> bool
    
    at: spot.utils.PickleCache.__init__
        self.cache_dir = cache_dir
    
    
===========changed ref 0===========
    # module: spot.utils
    def get_pushover_config() -> dict[str, str] | None:
        config_file = proj_root() / "config/pushover.json"
        if config_file.exists():
            match json.loads(config_file.read_text()):
                case {"user": user, "token": token}:
                    return {"user": user, "token": token}
        if not _pushover_warned[0]:
  +         warnings.warn(
  -         logging.warning(
                f"No pushover config file found at {config_file}. Not able to push message."
            )
            _pushover_warned[0] = True
        return None
     | 
| 
	spot.utils/PickleCache.clear | 
	Modified | 
	temp-1 | 
	6d7e1f96fce615c099ceff3e3fc430818d70a407 | 
	Switch to frozen classes. | 
	 <0>:<add>             warnings.warn(f"No cache found at: {self.cache_dir}, skip clearing.")
 | 
	      # module: spot.utils
      class PickleCache:
          def clear(self):
              if self.cache_dir.exists():
                  logging.info(f"Clearing cache: at: {self.cache_dir}")
                  shutil.rmtree(self.cache_dir)
              else:
    -             logging.warning(f"No cache found at: {self.cache_dir}, skip clearing.")
 <0>  
       | 
	===========unchanged ref 0===========
    at: logging
        info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
    
    at: pathlib.Path
        exists() -> bool
    
    at: shutil
        rmtree(path: Union[bytes, StrPath], ignore_errors: bool=..., onerror: Optional[Callable[[Any, Any, Any], Any]]=...) -> None
    
    at: spot.utils.PickleCache.__init__
        self.cache_dir = cache_dir
    
    
===========changed ref 0===========
    # module: spot.utils
    class PickleCache:
        def remove(self, rel_path: Path | str):
            path = self.cache_dir / rel_path
            if path.exists():
                path.unlink()
            else:
  +             warnings.warn(f"[PickleCache] File not found: '{path}'")
  -             logging.warning(f"[PickleCache] File not found: '{path}'")
    
===========changed ref 1===========
    # module: spot.utils
    def get_pushover_config() -> dict[str, str] | None:
        config_file = proj_root() / "config/pushover.json"
        if config_file.exists():
            match json.loads(config_file.read_text()):
                case {"user": user, "token": token}:
                    return {"user": user, "token": token}
        if not _pushover_warned[0]:
  +         warnings.warn(
  -         logging.warning(
                f"No pushover config file found at {config_file}. Not able to push message."
            )
            _pushover_warned[0] = True
        return None
     | 
| 
	coeditor.retrieval_model/RetrievalEditorModel._reorder_cache | 
	Modified | 
	temp-1 | 
	6d7e1f96fce615c099ceff3e3fc430818d70a407 | 
	Switch to frozen classes. | 
	 <0>:<add>             warnings.warn(
 | 
	      # module: coeditor.retrieval_model
      class RetrievalEditorModel(T5PreTrainedModel):
          def _reorder_cache(self, past, beam_idx):
              if past is None:
    -             logging.warning(
 <0>                  "You might want to consider setting `use_cache=True` to speed up decoding"
                  )
                  return past
      
              reordered_decoder_past = ()
              for layer_past_states in past:
                  # get the correct batch idx from layer past batch dim
                  # batch dim of `past` is at 2nd position
                  reordered_layer_past_states = ()
                  for layer_past_state in layer_past_states:
                      # need to set correct `past` for each of the four key / value states
                      reordered_layer_past_states = reordered_layer_past_states + (
                          layer_past_state.index_select(
                              0, beam_idx.to(layer_past_state.device)
                          ),
                      )
      
                  # assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
                  assert len(reordered_layer_past_states) == len(layer_past_states)
      
                  reordered_decoder_past = reordered_decoder_past + (
                      reordered_layer_past_states,
                  )
              return reordered_decoder_past
      
       | 
	===========unchanged ref 0===========
    at: _warnings
        warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
        warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
    
    at: transformers.generation.utils.GenerationMixin
        _reorder_cache(self, past_key_values, beam_idx)
    
    
===========changed ref 0===========
    # module: coeditor.encoding
  + @dataclass(frozen=True)
  - @dataclass
    class TkDelta:
        """The Tokenized version of :class:`StrDelta`."""
    
  +     deltas: Sequence[tuple[TokenSeq, ...]]
  -     deltas: list[tuple[TokenSeq, ...]]
    
===========changed ref 1===========
    # module: coeditor.encoding
  + @dataclass(frozen=True)
  - @dataclass
    class StrDelta:
        """Stores the line deltas for each line. A line delta is a list of added lines
        (starting with a '+') followed by optionally a `-` line
        (for deleting the current line)."""
    
  +     deltas: Sequence[tuple[str, ...]]
  -     deltas: list[tuple[str, ...]]
    
===========changed ref 2===========
    # module: spot.utils
    class PickleCache:
        def remove(self, rel_path: Path | str):
            path = self.cache_dir / rel_path
            if path.exists():
                path.unlink()
            else:
  +             warnings.warn(f"[PickleCache] File not found: '{path}'")
  -             logging.warning(f"[PickleCache] File not found: '{path}'")
    
===========changed ref 3===========
    # module: spot.utils
    class PickleCache:
        def clear(self):
            if self.cache_dir.exists():
                logging.info(f"Clearing cache: at: {self.cache_dir}")
                shutil.rmtree(self.cache_dir)
            else:
  +             warnings.warn(f"No cache found at: {self.cache_dir}, skip clearing.")
  -             logging.warning(f"No cache found at: {self.cache_dir}, skip clearing.")
    
===========changed ref 4===========
    # module: spot.utils
    def get_pushover_config() -> dict[str, str] | None:
        config_file = proj_root() / "config/pushover.json"
        if config_file.exists():
            match json.loads(config_file.read_text()):
                case {"user": user, "token": token}:
                    return {"user": user, "token": token}
        if not _pushover_warned[0]:
  +         warnings.warn(
  -         logging.warning(
                f"No pushover config file found at {config_file}. Not able to push message."
            )
            _pushover_warned[0] = True
        return None
     | 
| 
	coeditor.common/normalize_code_by_ast | 
	Modified | 
	temp-1 | 
	c7532373c41c6d26f05afc40e4d1377108596556 | 
	Remove module docstrings when normalizing code. | 
	 <0>:<add>             node = as_any(self.generic_visit(node))
 | 
	      # module: coeditor.common
      def normalize_code_by_ast(
          code: str, sort_keyargs: bool = True, remove_doc_string: bool = True
      ) -> str:
          """Normalize the code by parsing and unparsing it using the AST module.
          If parsing fails, return the original code."""
      
          class KeyargSorter(ast.NodeTransformer):
              def visit_Call(self, node: ast.Call):
                  if node.keywords:
                      node.keywords.sort(key=lambda x: x.arg or "None")
                  return node
      
          class DocStringremover(ast.NodeTransformer):
              def visit_FunctionDef(self, node: ast.FunctionDef):
    +             return self._visit_def(node)
    + 
    +         def visit_Module(self, node: ast.Module) -> Any:
    +             return self._visit_def(node)
    -             match node.body:
    -                 case [ast.Expr(value=ast.Constant(value=str())), *body]:
    -                     node.body = body
    -             return node
      
              def visit_ClassDef(self, node: ast.ClassDef):
    +             return self._visit_def(node)
    + 
    +         def _visit_def(self, node):
    -             node = cast(ast.ClassDef, self.generic_visit(node))
 <0>              match node.body:
                      case [ast.Expr(value=ast.Constant(value=str())), *body]:
                          node.body = body
                  return node
      
          try:
              tree = ast.parse(dedent(code))
              if remove_doc_string:
                  tree = DocStringremover().visit(tree)
              if sort_keyargs:
                  tree = KeyargSorter().visit(tree)
              return ast.unparse(tree)
          except SyntaxError:
              return code
      
       | 
	===========unchanged ref 0===========
    at: _ast
        Module(*args: Any, **kwargs: Any)
    
        FunctionDef(*args: Any, **kwargs: Any)
    
        ClassDef(*args: Any, **kwargs: Any)
    
        Expr(*args: Any, **kwargs: Any)
    
        Call(*args: Any, **kwargs: Any)
    
        Constant(*args: Any, **kwargs: Any)
    
    at: _ast.Call
        func: expr
    
        args: typing.List[expr]
    
        keywords: typing.List[keyword]
    
    at: ast
        parse(source: Union[str, bytes], filename: Union[str, bytes]=..., mode: Literal["exec"]=..., *, type_comments: bool=..., feature_version: Union[None, int, _typing.Tuple[int, int]]=...) -> Module
        parse(source: Union[str, bytes], filename: Union[str, bytes]=..., mode: str=..., *, type_comments: bool=..., feature_version: Union[None, int, _typing.Tuple[int, int]]=...) -> AST
    
        NodeTransformer()
    
    at: ast.NodeTransformer
        generic_visit(node: AST) -> AST
    
    at: ast.NodeVisitor
        visit(node: AST) -> Any
    
        visit_Module(self, node: Module) -> Any
    
        visit_FunctionDef(self, node: FunctionDef) -> Any
    
        visit_ClassDef(self, node: ClassDef) -> Any
    
        visit_Call(self, node: Call) -> Any
    
    at: spot.utils
        as_any(x) -> Any
    
    at: textwrap
        dedent(text: str) -> str
    
     | 
| 
	coeditor.code_change/_deep_copy_subset_ | 
	Modified | 
	temp-1 | 
	68df1a188774d0da509adfb2b256212eec6e146e | 
	Fix dataset creation. | 
	 <0>:<add>     keys = {k for k in keys if k in dict}
 | 
	      # module: coeditor.code_change
      def _deep_copy_subset_(dict: dict[T1, T2], keys: Collection[T1]) -> dict[T1, T2]:
          "This is more efficient than deepcopying each value individually if they share common data."
 <0>      to_copy = {k: dict[k] for k in keys}
          copies = copy.deepcopy(to_copy)
          for k in keys:
              dict[k] = copies[k]
          return dict
      
       | 
	===========unchanged ref 0===========
    at: coeditor.common
        T1 = TypeVar("T1")
    
        T2 = TypeVar("T2")
    
    at: copy
        deepcopy(x: _T, memo: Optional[Dict[int, Any]]=..., _nil: Any=...) -> _T
    
    at: typing
        Collection = _alias(collections.abc.Collection, 1)
    
    
===========changed ref 0===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     def get_errors(self) -> dict[str, int]:
  -         return dict()
  - 
===========changed ref 1===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     def get_errors(self) -> dict[str, int]:
  -         return dict()
  - 
===========changed ref 2===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     def clear_errors(self):
  -         return None
  - 
===========changed ref 3===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     def clear_errors(self):
  -         return None
  - 
===========changed ref 4===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     VERSION = "1.0"
  - 
===========changed ref 5===========
    # module: coeditor.dataset
    @dataclass
    class _ProcessingResult:
        edits: Sequence[TkC3Problem]
  +     stats: dict[str, dict | Any]
  -     processor_errors: dict[str, int]
    
===========changed ref 6===========
    # module: spot.utils
    @dataclass
    class TimeLogger:
  +     @staticmethod
  +     def times_to_dataframe(times: dict[str, list[float]]):
  +         names = list(times.keys())
  +         total_times = [sum(ts) for ts in times.values()]
  +         counts = [len(ts) for ts in times.values()]
  +         avg_times = [sum(ts) / len(ts) for ts in times.values()]
  + 
  +         df = pd.DataFrame(
  +             {
  +                 "name": names,
  +                 "count": counts,
  +                 "avg_time": avg_times,
  +                 "total_time": total_times,
  +             }
  +         )
  +         df.sort_values(by="total_time", ascending=False, inplace=True)
  +         return df
  + 
===========changed ref 7===========
    # module: spot.utils
    @dataclass
    class TimeLogger:
        def as_dataframe(self):
  -         names = list(self.times.keys())
  -         total_times = [sum(ts) for ts in self.times.values()]
  -         counts = [len(ts) for ts in self.times.values()]
  -         avg_times = [sum(ts) / len(ts) for ts in self.times.values()]
  +         return self.times_to_dataframe(self.times)
    
  -         df = pd.DataFrame(
  -             {
  -                 "name": names,
  -                 "count": counts,
  -                 "avg_time": avg_times,
  -                 "total_time": total_times,
  -             }
  -         )
  -         df.sort_values(by="total_time", ascending=False, inplace=True)
  -         return df
  - 
===========changed ref 8===========
    # module: coeditor.dataset
    def _process_commits(
        root: Path,
        workdir: Path,
        commits: Sequence[CommitInfo],
        encoder: C3EditEncoder,
    ) -> _ProcessingResult:
        # use process-specific parso cache
        _fix_jedi_cache(workdir)
  +     coeditor.code_change._tlogger.clear()
        try:
            # cannot return here since subprocess will be killed after returning
            edits = edits_from_commit_history(
                root,
                commits,
                tempdir=workdir / "code",
                change_processor=encoder.change_processor,
                edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                silent=True,
            )
        except UnicodeDecodeError as e:
            # this might happen in rare cases
            warnings.warn(f"Unable to process project: {root}\nError: {e}")
            edits = []
  +     stats = dict()
  +     encoder.change_processor.append_stats(stats)
  +     rec_add_dict_to(stats, {"tlogger": coeditor.code_change._tlogger.times})
  +     return _ProcessingResult(edits, stats)
  -     return _ProcessingResult(
  -         edits,
  -         encoder.change_processor.get_errors(),
  -     )
    
===========changed ref 9===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
        encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
    ) -> "TokenizedEditDataset[TkC3Problem]":
        """
        Create a TokenizedEditDataset from a list of project roots and a given encoder.
        Args:
            - max_history_per_repo (int, optional): When the repo history is longer than
            this value, only the oldest portion is going to be used. Defaults to 1000.
        """
        workdir = Path(tempfile.gettempdir()) / "dataset_from_projects"
        histories = pmap(
            get_commit_history,
            project_roots,
            max_workers=workers,
            desc="Getting commit histories",
            tqdm_args={"unit": "repo"},
        )
        # keep the oldest portion of the history
        histories = [commits[-max_history_per_repo:] for commits in histories]
        # break long commit sequences into chunks for parallelization
        roots = list[Path]()
        chunk_training = list[bool]()
        chunked_histories = list[list[CommitInfo]]()
        for root, h, train in zip(project_roots, histories, repo_training):
            history_chunk_size = max(50, math.ceil(len(h) / 4))
            for i in range(0, len(h), history_chunk_size):
                roots.append(root)
                chunk_training.append(train)
                # note that we need 1 extra overlapping commit to get all diffs
                chunked_histories.append(h[i : i + history_chunk_size + 1])
        workdirs = [workdir / f"chunk-{i}" for i in range(len(roots))]
        try:
            presults = pmap(
                _process_commits,
                roots,
                workdirs,
                chunked_histories,
                key_args={"encoder": encoder},
                desc="Create tokenized edits",
                max</s> | 
| 
	coeditor.ctx_change_encoder/C3ProblemGenerator.post_edit_analysis | 
	Modified | 
	temp-1 | 
	68df1a188774d0da509adfb2b256212eec6e146e | 
	Fix dataset creation. | 
	 <0>:<add>                 for source in srcs:
 | 
	      # module: coeditor.ctx_change_encoder
      class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
          def post_edit_analysis(
              self,
              pstate: ProjectState,
              modules: Mapping[RelPath, JModule],
              changes: Mapping[ModuleName, JModuleChange],
          ) -> list[ModuleName]:
              "Return the topological order among the modules."
              # sort modules topologically
              module_deps = dict[ModuleName, set[ModuleName]]()
              for rel_path, module in modules.items():
                  names = {n for n in module.imported_names}
                  script = pstate.scripts[rel_path]
                  deps = module_deps.setdefault(module.mname, set())
                  for n in names:
    +                 try:
    +                     srcs = _fast_goto(
    -                 for source in _fast_goto(
    +                         script, n, follow_imports=True, follow_builtin_imports=False
    -                     script, n, follow_imports=True, follow_builtin_imports=False
    +                     )
    -                 ):
    +                 except Exception as e:
    +                     if "There's a scope that was not managed:" in str(e):
    +                         self.analyzer.add_error(str(e))
    +                         continue
    +                     else:
    +                         raise
 <0>                      deps.add(source.module_name)
              module_order = sort_modules_by_imports(module_deps)
              return module_order
      
       | 
	===========unchanged ref 0===========
    at: coeditor.ctx_change_encoder
        _fast_goto(script: jedi.Script, tree_name: tree.Name, *, follow_imports=False, follow_builtin_imports=False, only_stubs=False, prefer_stubs=False) -> set[classes.Name]
    
    at: coeditor.ctx_change_encoder.C3ProblemGenerator.__init__
        self.analyzer = analyzer
    
    at: coeditor.ctx_change_encoder.JediUsageAnalyzer
        _KnownJediErrors = {
                "not enough values to unpack (expected 2",
                "'Newline' object has no attribute 'children'",
                "trailer_op is actually ",
                "There's a scope that was not managed: <Module",
                "maximum recursion depth exceeded",
                "'NoneType' object has no attribute 'type'",
            }
    
        add_error(err_text: str)
        add_error(self, err_text: str)
    
    at: spot.static_analysis
        ModuleName = str
    
    at: typing
        Mapping = _alias(collections.abc.Mapping, 2)
    
    at: typing.Mapping
        items() -> AbstractSet[Tuple[_KT, _VT_co]]
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  -     def clear_errors(self):
  -         return self.analyzer.error_counts.clear()
  - 
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  -     def get_errors(self) -> dict[str, int]:
  -         return self.analyzer.error_counts
  - 
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def clear_stats(self) -> None:
  +         return self.analyzer.error_counts.clear()
  + 
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def __repr__(self) -> str:
  +         return repr_modified_args(self)
  + 
===========changed ref 4===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     VERSION = "1.1"
  -     VERSION = "1.0"
    
===========changed ref 5===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def append_stats(self, stats: dict[str, Any]) -> None:
  +         rec_add_dict_to(stats, {"analyzer_errors": self.analyzer.error_counts})
  + 
===========changed ref 6===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     def clear_errors(self):
  -         return None
  - 
===========changed ref 7===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     def clear_errors(self):
  -         return None
  - 
===========changed ref 8===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     def get_errors(self) -> dict[str, int]:
  -         return dict()
  - 
===========changed ref 9===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     def get_errors(self) -> dict[str, int]:
  -         return dict()
  - 
===========changed ref 10===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     VERSION = "1.0"
  - 
===========changed ref 11===========
    # module: coeditor.dataset
    @dataclass
    class _ProcessingResult:
        edits: Sequence[TkC3Problem]
  +     stats: dict[str, dict | Any]
  -     processor_errors: dict[str, int]
    
===========changed ref 12===========
    # module: coeditor.code_change
    def _deep_copy_subset_(dict: dict[T1, T2], keys: Collection[T1]) -> dict[T1, T2]:
        "This is more efficient than deepcopying each value individually if they share common data."
  +     keys = {k for k in keys if k in dict}
        to_copy = {k: dict[k] for k in keys}
        copies = copy.deepcopy(to_copy)
        for k in keys:
            dict[k] = copies[k]
        return dict
    
===========changed ref 13===========
    # module: spot.utils
    @dataclass
    class TimeLogger:
  +     @staticmethod
  +     def times_to_dataframe(times: dict[str, list[float]]):
  +         names = list(times.keys())
  +         total_times = [sum(ts) for ts in times.values()]
  +         counts = [len(ts) for ts in times.values()]
  +         avg_times = [sum(ts) / len(ts) for ts in times.values()]
  + 
  +         df = pd.DataFrame(
  +             {
  +                 "name": names,
  +                 "count": counts,
  +                 "avg_time": avg_times,
  +                 "total_time": total_times,
  +             }
  +         )
  +         df.sort_values(by="total_time", ascending=False, inplace=True)
  +         return df
  + 
===========changed ref 14===========
    # module: spot.utils
    @dataclass
    class TimeLogger:
        def as_dataframe(self):
  -         names = list(self.times.keys())
  -         total_times = [sum(ts) for ts in self.times.values()]
  -         counts = [len(ts) for ts in self.times.values()]
  -         avg_times = [sum(ts) / len(ts) for ts in self.times.values()]
  +         return self.times_to_dataframe(self.times)
    
  -         df = pd.DataFrame(
  -             {
  -                 "name": names,
  -                 "count": counts,
  -                 "avg_time": avg_times,
  -                 "total_time": total_times,
  -             }
  -         )
  -         df.sort_values(by="total_time", ascending=False, inplace=True)
  -         return df
  - 
===========changed ref 15===========
    # module: coeditor.dataset
    def _process_commits(
        root: Path,
        workdir: Path,
        commits: Sequence[CommitInfo],
        encoder: C3EditEncoder,
    ) -> _ProcessingResult:
        # use process-specific parso cache
        _fix_jedi_cache(workdir)
  +     coeditor.code_change._tlogger.clear()
        try:
            # cannot return here since subprocess will be killed after returning
            edits = edits_from_commit_history(
                root,
                commits,
                tempdir=workdir / "code",
                change_processor=encoder.change_processor,
                edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                silent=True,
            )
        except UnicodeDecodeError as e:
            # this might happen in rare cases
            warnings.warn(f"Unable to process project: {root}\nError: {e}")
            edits = []
  +     stats = dict()
  +     encoder.change_processor.append_stats(stats)
  +     rec_add_dict_to(stats, {"tlogger": coeditor.code_change._tlogger.times})
  +     return _ProcessingResult(edits, stats)
  -     return _ProcessingResult(
  -         edits,
  -         encoder.change_processor.get_errors(),
  -     )
     | 
| 
	coeditor.ctx_change_encoder/JediUsageAnalyzer.get_line_usages | 
	Modified | 
	temp-1 | 
	68df1a188774d0da509adfb2b256212eec6e146e | 
	Fix dataset creation. | 
	 <0>:<add>                 self.add_error(err_text)
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class JediUsageAnalyzer:
          def get_line_usages(
              self,
              script: jedi.Script,
              proj_root: Path,
              lines_to_analyze: Collection[int],
              silent: bool = False,
          ):
      <s> unexpected = dict[str, int]()
              for name in tqdm(all_names, f"Analyzing {script.path}", disable=silent):
                  name: tree.Name
                  line = name.start_pos[0]
                  if line not in lines_to_analyze:
                      continue
                  usages = line2usages.setdefault(line, set())
                  try:
                      defs = _fast_goto(
                          script,
                          name,
                          follow_imports=True,
                          follow_builtin_imports=False,
                      )
                      for d in defs:
                          usages.update(PyDefinition.from_name(d))
      
    -             except KeyError:
    -                 # for debugging
    -                 raise
                  except Exception as e:
                      err_text = repr(e)
    -                 is_known = any(err in err_text for err in _KnownJediErrors)
    -                 errors[err_text] = errors.get(err_text, 0) + 1
    -                 if not is_known:
    -                     unexpected[err_text] = unexpected.get(err_text, 0) + 1
    -         if unexpected:
    -             project_name = proj_root.name
    -             if script.path:
    -                 file_path = script.path.relative_to(proj_root)
    -             else:
    -                 file_path = "<unknown>"
    -             for err, count in unexpected.items():
    -                 logging.warn(
    -                     f"Unexpected error when analyzing '{project_name}/{file_path}' ({count=}): {err}"
    -                 )
    +                 str_limit = 40
    +                 if len(err_text) > str_limit:
    +                     err_text = err_text[:str_limit] + "..."
 <0>          return LineUsageAnalysis(line2usages)
      
       | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class JediUsageAnalyzer:
        def get_line_usages(
            self,
            script: jedi.Script,
            proj_root: Path,
            lines_to_analyze: Collection[int],
            silent: bool = False,
        ):
    # offset: -1
            jmod: tree.Module = script._module_node
            line2usages = dict[int, set[PyDefinition]]()
            all_names = [
                name for k, names in jmod.get_used_names()._dict.items() for name in names
            ]
            all_names.sort(key=lambda x: x.start_pos)
  -         errors = self.error_counts
  -         unexpected = dict[str, int]()
            for name in tqdm(all_names, f"Analyzing {script.path}"</s>
===========unchanged ref 0===========
    at: coeditor._utils
        TimeLogger(times: dict[str, list[float]]=field(default_factory=dict))
    
    at: coeditor.ctx_change_encoder
        PyDefinition(full_name: PyFullName, start_pos: tuple[int, int], end_pos: tuple[int, int])
    
        LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
    
        _fast_goto(script: jedi.Script, tree_name: tree.Name, *, follow_imports=False, follow_builtin_imports=False, only_stubs=False, prefer_stubs=False) -> set[classes.Name]
    
    at: coeditor.ctx_change_encoder.PyDefinition
        from_name(name: classes.BaseName) -> Iterable["PyDefinition"]
    
    at: jedi.api
        Script(code=None, *, path=None, environment=None, project=None)
    
    at: jedi.api.Script.__init__
        self.path = path.absolute() if path else None
    
        self._module_node, code = self._inference_state.parse_and_get_code(
                    code=code,
                    path=self.path,
                    use_latest_grammar=path and path.suffix == '.pyi',
                    cache=False,  # No disk cache, because the current script often changes.
                    diff_cache=settings.fast_parser,
                    cache_path=settings.cache_directory,
                )
    
    at: parso.python.tree
        Name()
    
        Module(children)
    
    at: parso.python.tree.Module
        __slots__ = ('_used_names',)
    
        type = 'file_input'
    
        get_used_names()
    
    at: parso.python.tree.UsedNamesMapping.__init__
        self._dict = dct
    
    at: parso.tree.Leaf.__init__
        self.start_pos = start_pos
    
    at: pathlib
        Path()
    
    
===========unchanged ref 1===========
    at: tqdm.std
        tqdm(iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, write_bytes=False, lock_args=None, nrows=None, colour=None, delay=0, gui=False, **kwargs)
    
    at: typing
        Collection = _alias(collections.abc.Collection, 1)
    
    at: typing.Mapping
        get(key: _KT) -> Optional[_VT_co]
        get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    _ObjId = NewType("_ObjId", int)
  - 
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  -     def clear_errors(self):
  -         return self.analyzer.error_counts.clear()
  - 
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  -     def get_errors(self) -> dict[str, int]:
  -         return self.analyzer.error_counts
  - 
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def clear_stats(self) -> None:
  +         return self.analyzer.error_counts.clear()
  + 
===========changed ref 4===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def __repr__(self) -> str:
  +         return repr_modified_args(self)
  + 
===========changed ref 5===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     VERSION = "1.1"
  -     VERSION = "1.0"
    
===========changed ref 6===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     def append_stats(self, stats: dict[str, Any]) -> None:
  +         rec_add_dict_to(stats, {"analyzer_errors": self.analyzer.error_counts})
  + 
===========changed ref 7===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
        def post_edit_analysis(
            self,
            pstate: ProjectState,
            modules: Mapping[RelPath, JModule],
            changes: Mapping[ModuleName, JModuleChange],
        ) -> list[ModuleName]:
            "Return the topological order among the modules."
            # sort modules topologically
            module_deps = dict[ModuleName, set[ModuleName]]()
            for rel_path, module in modules.items():
                names = {n for n in module.imported_names}
                script = pstate.scripts[rel_path]
                deps = module_deps.setdefault(module.mname, set())
                for n in names:
  +                 try:
  +                     srcs = _fast_goto(
  -                 for source in _fast_goto(
  +                         script, n, follow_imports=True, follow_builtin_imports=False
  -                     script, n, follow_imports=True, follow_builtin_imports=False
  +                     )
  -                 ):
  +                 except Exception as e:
  +                     if "There's a scope that was not managed:" in str(e):
  +                         self.analyzer.add_error(str(e))
  +                         continue
  +                     else:
  +                         raise
  +                 for source in srcs:
                        deps.add(source.module_name)
            module_order = sort_modules_by_imports(module_deps)
            return module_order
    
===========changed ref 8===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     def clear_errors(self):
  -         return None
  - 
===========changed ref 9===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     def clear_errors(self):
  -         return None
  - 
===========changed ref 10===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     def get_errors(self) -> dict[str, int]:
  -         return dict()
  - 
===========changed ref 11===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     def get_errors(self) -> dict[str, int]:
  -         return dict()
  - 
===========changed ref 12===========
    # module: coeditor.code_change
    @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  -     VERSION = "1.0"
  - 
===========changed ref 13===========
    # module: coeditor.dataset
    @dataclass
    class _ProcessingResult:
        edits: Sequence[TkC3Problem]
  +     stats: dict[str, dict | Any]
  -     processor_errors: dict[str, int]
     | 
| 
	spot.utils/compute_line_diffs | 
	Modified | 
	temp-1 | 
	ab374914700355dc76b7f09d9c8121b7a6855e67 | 
	- Only editing non-function spans during training. - Add time limit to data creation. - Fix fast diff. - Handle value error in `normalize_code_by_ast`. - Add max_total_ref_tks to C3ProblemTokenizer. | 
	 <0>:<add>         return compute_line_diffs_fast(before, after)
 | 
	      # module: spot.utils
      def compute_line_diffs(
          before: Sequence[str], after: Sequence[str], keep_explain_lines: bool = False
      ):
          SizeLimit = 8000
          if (
              sum(len(x) for x in before) > SizeLimit
              or sum(len(x) for x in after) > SizeLimit
          ):
    -         compute_line_diffs_fast(before, after)
 <0>      differ = difflib.Differ()
          result = []
          for line in differ.compare(before, after):
              assert len(line) >= 2
              tag = line[0]
              if keep_explain_lines or tag != "?":
                  result.append(tag + line[2:])
          return result
      
       | 
	===========unchanged ref 0===========
    at: difflib
        Differ(linejunk: Optional[_JunkCallback]=..., charjunk: Optional[_JunkCallback]=...)
    
    at: difflib.Differ
        compare(a: Sequence[_StrType], b: Sequence[_StrType]) -> Iterator[_StrType]
    
    at: spot.utils
        compute_line_diffs_fast(before: Sequence[str], after: Sequence[str])
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
     | 
| 
	coeditor.common/normalize_code_by_ast | 
	Modified | 
	temp-1 | 
	ab374914700355dc76b7f09d9c8121b7a6855e67 | 
	- Only editing non-function spans during training. - Add time limit to data creation. - Fix fast diff. - Handle value error in `normalize_code_by_ast`. - Add max_total_ref_tks to C3ProblemTokenizer. | 
	 <0>:<add>     except (SyntaxError, ValueError):
 | 
	      # module: coeditor.common
      def normalize_code_by_ast(
          code: str, sort_keyargs: bool = True, remove_doc_string: bool = True
      ) -> str:
          """Normalize the code by parsing and unparsing it using the AST module.
          If parsing fails, return the original code."""
      
          class KeyargSorter(ast.NodeTransformer):
              def visit_Call(self, node: ast.Call):
                  if node.keywords:
                      node.keywords.sort(key=lambda x: x.arg or "None")
                  return node
      
          class DocStringremover(ast.NodeTransformer):
              def visit_FunctionDef(self, node: ast.FunctionDef):
                  return self._visit_def(node)
      
              def visit_Module(self, node: ast.Module) -> Any:
                  return self._visit_def(node)
      
              def visit_ClassDef(self, node: ast.ClassDef):
                  return self._visit_def(node)
      
              def _visit_def(self, node):
                  node = as_any(self.generic_visit(node))
                  match node.body:
                      case [ast.Expr(value=ast.Constant(value=str())), *body]:
                          node.body = body
                  return node
      
          try:
              tree = ast.parse(dedent(code))
              if remove_doc_string:
                  tree = DocStringremover().visit(tree)
              if sort_keyargs:
                  tree = KeyargSorter().visit(tree)
              return ast.unparse(tree)
    -     except SyntaxError:
 <0>          return code
      
       | 
	===========unchanged ref 0===========
    at: _ast
        Module(*args: Any, **kwargs: Any)
    
        FunctionDef(*args: Any, **kwargs: Any)
    
        ClassDef(*args: Any, **kwargs: Any)
    
        Expr(*args: Any, **kwargs: Any)
    
        Call(*args: Any, **kwargs: Any)
    
        Constant(*args: Any, **kwargs: Any)
    
    at: _ast.Call
        func: expr
    
        args: typing.List[expr]
    
        keywords: typing.List[keyword]
    
    at: ast
        parse(source: Union[str, bytes], filename: Union[str, bytes]=..., mode: Literal["exec"]=..., *, type_comments: bool=..., feature_version: Union[None, int, _typing.Tuple[int, int]]=...) -> Module
        parse(source: Union[str, bytes], filename: Union[str, bytes]=..., mode: str=..., *, type_comments: bool=..., feature_version: Union[None, int, _typing.Tuple[int, int]]=...) -> AST
    
        NodeTransformer()
    
        unparse(ast_obj: AST) -> str
    
    at: ast.NodeTransformer
        generic_visit(node: AST) -> AST
    
    at: ast.NodeVisitor
        visit(node: AST) -> Any
    
        visit_Module(self, node: Module) -> Any
    
        visit_FunctionDef(self, node: FunctionDef) -> Any
    
        visit_ClassDef(self, node: ClassDef) -> Any
    
        visit_Call(self, node: Call) -> Any
    
    at: spot.utils
        as_any(x) -> Any
    
    at: textwrap
        dedent(text: str) -> str
    
    
===========changed ref 0===========
    # module: spot.utils
    def compute_line_diffs(
        before: Sequence[str], after: Sequence[str], keep_explain_lines: bool = False
    ):
        SizeLimit = 8000
        if (
            sum(len(x) for x in before) > SizeLimit
            or sum(len(x) for x in after) > SizeLimit
        ):
  +         return compute_line_diffs_fast(before, after)
  -         compute_line_diffs_fast(before, after)
        differ = difflib.Differ()
        result = []
        for line in differ.compare(before, after):
            assert len(line) >= 2
            tag = line[0]
            if keep_explain_lines or tag != "?":
                result.append(tag + line[2:])
        return result
     | 
| 
	coeditor.dataset/_process_commits | 
	Modified | 
	temp-1 | 
	ab374914700355dc76b7f09d9c8121b7a6855e67 | 
	- Only editing non-function spans during training. - Add time limit to data creation. - Fix fast diff. - Handle value error in `normalize_code_by_ast`. - Add max_total_ref_tks to C3ProblemTokenizer. | 
	 <0>:<add>         warnings.warn(f"Failed to process project: {root}\nError: {e}")
 | 
	      # module: coeditor.dataset
      def _process_commits(
          root: Path,
          workdir: Path,
          commits: Sequence[CommitInfo],
    +     is_training: bool,
          encoder: C3EditEncoder,
      ) -> _ProcessingResult:
          # use process-specific parso cache
          _fix_jedi_cache(workdir)
          coeditor.code_change._tlogger.clear()
    +     encoder.change_processor.clear_stats()
    +     encoder.change_processor.set_training(is_training)
          try:
              # cannot return here since subprocess will be killed after returning
              edits = edits_from_commit_history(
                  root,
                  commits,
                  tempdir=workdir / "code",
                  change_processor=encoder.change_processor,
                  edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                  silent=True,
    +             time_limit=time_limit_per_commit * (len(commits) + 10),
              )
    +     except Exception as e:
    +         if isinstance(e, KeyboardInterrupt):
    +             raise
    -     except UnicodeDecodeError as e:
    -         # this might happen in rare cases
    -         warnings.warn(f"Unable to process project: {root}\nError: {e}")
 <0>          edits = []
          stats = dict()
          encoder.change_processor.append_stats(stats)
          rec_add_dict_to(stats, {"tlogger": coeditor.code_change._tlogger.times})
          return _ProcessingResult(edits, stats)
      
       | 
	===========unchanged ref 0===========
    at: coeditor.dataset
        C3EditEncoder(change_processor: ProjectChangeProcessor[C3Problem]=field(
                default_factory=C3ProblemGenerator
            ), edit_tokenizer: C3ProblemTokenizer=field(default_factory=C3ProblemTokenizer))
    
        _ProcessingResult(edits: Sequence[TkC3Problem], stats: dict[str, dict | Any])
    
        time_limit_per_commit = 5.0
    
    at: coeditor.dataset.C3EditEncoder
        change_processor: ProjectChangeProcessor[C3Problem] = field(
                default_factory=C3ProblemGenerator
            )
    
        edit_tokenizer: C3ProblemTokenizer = field(default_factory=C3ProblemTokenizer)
    
    at: pathlib
        Path()
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: spot.utils
    def compute_line_diffs(
        before: Sequence[str], after: Sequence[str], keep_explain_lines: bool = False
    ):
        SizeLimit = 8000
        if (
            sum(len(x) for x in before) > SizeLimit
            or sum(len(x) for x in after) > SizeLimit
        ):
  +         return compute_line_diffs_fast(before, after)
  -         compute_line_diffs_fast(before, after)
        differ = difflib.Differ()
        result = []
        for line in differ.compare(before, after):
            assert len(line) >= 2
            tag = line[0]
            if keep_explain_lines or tag != "?":
                result.append(tag + line[2:])
        return result
    
===========changed ref 1===========
    # module: coeditor.common
    def normalize_code_by_ast(
        code: str, sort_keyargs: bool = True, remove_doc_string: bool = True
    ) -> str:
        """Normalize the code by parsing and unparsing it using the AST module.
        If parsing fails, return the original code."""
    
        class KeyargSorter(ast.NodeTransformer):
            def visit_Call(self, node: ast.Call):
                if node.keywords:
                    node.keywords.sort(key=lambda x: x.arg or "None")
                return node
    
        class DocStringremover(ast.NodeTransformer):
            def visit_FunctionDef(self, node: ast.FunctionDef):
                return self._visit_def(node)
    
            def visit_Module(self, node: ast.Module) -> Any:
                return self._visit_def(node)
    
            def visit_ClassDef(self, node: ast.ClassDef):
                return self._visit_def(node)
    
            def _visit_def(self, node):
                node = as_any(self.generic_visit(node))
                match node.body:
                    case [ast.Expr(value=ast.Constant(value=str())), *body]:
                        node.body = body
                return node
    
        try:
            tree = ast.parse(dedent(code))
            if remove_doc_string:
                tree = DocStringremover().visit(tree)
            if sort_keyargs:
                tree = KeyargSorter().visit(tree)
            return ast.unparse(tree)
  +     except (SyntaxError, ValueError):
  -     except SyntaxError:
            return code
     | 
| 
	coeditor.dataset/dataset_from_projects | 
	Modified | 
	temp-1 | 
	ab374914700355dc76b7f09d9c8121b7a6855e67 | 
	- Only editing non-function spans during training. - Add time limit to data creation. - Fix fast diff. - Handle value error in `normalize_code_by_ast`. - Add max_total_ref_tks to C3ProblemTokenizer. | 
	 <0>:<add>                 for k in sorted(errors.keys(), key=lambda k: errors[k], reverse=True):
 | 
	      # module: coeditor.dataset
      def dataset_from_projects(
          project_roots: Sequence[Path],
          encoder: C3EditEncoder,
          repo_training: Sequence[bool],
          max_history_per_repo: int = 1000,
          workers: int = DefaultWorkers,
      ) -> "TokenizedEditDataset[TkC3Problem]":
      <s>        presults = pmap(
                  _process_commits,
                  roots,
                  workdirs,
                  chunked_histories,
    +             chunk_training,
                  key_args={"encoder": encoder},
                  desc="Create tokenized edits",
                  max_workers=workers,
                  tqdm_args={"unit": "chunk"},
              )
          finally:
              if workdir.exists():
                  shutil.rmtree(workdir)
                  print("Workdir removed:", workdir)
      
          project2edits = dict[Path, list[TkC3Problem]]()
      
          try:
              stats = dict[str, Any]()
              for root, pr in zip(roots, presults):
                  project2edits.setdefault(root, []).extend(pr.edits)
                  rec_add_dict_to(stats, pr.stats)
      
              if "tlogger" in stats:
                  df = TimeLogger.times_to_dataframe(stats.pop("tlogger"))
                  print("Time stats:")
                  display(df)
              if "analyzer_errors" in list(stats.keys()):
                  errors: dict = stats.pop("analyzer_errors")
                  for k in list(errors.keys()):
                      if JediUsageAnalyzer.is_known_error(k):
                          errors.pop(k)
                  if errors:
                      print("Analyzer errors:")
    -                 for k in sorted(errors.keys(), reverse=True):
 <0>                      print(f"{k}:\t{errors[k]}")
              if stats:
                  print("Other Stats:")
                  pretty_print_dict(stats)
          except Exception as e:
              if not isinstance(e, KeyboardInterrupt):
                  print("Error while printing stats:", e)
      
          return TokenizedEditDataset(project2edits)
      
       | 
	===========above chunk 0===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
        encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
    ) -> "TokenizedEditDataset[TkC3Problem]":
    # offset: -1
        """
        Create a TokenizedEditDataset from a list of project roots and a given encoder.
        Args:
            - max_history_per_repo (int, optional): When the repo history is longer than
            this value, only the oldest portion is going to be used. Defaults to 1000.
        """
        workdir = Path(tempfile.gettempdir()) / "dataset_from_projects"
        histories = pmap(
            get_commit_history,
            project_roots,
            max_workers=workers,
            desc="Getting commit histories",
            tqdm_args={"unit": "repo"},
        )
        # keep the oldest portion of the history
        histories = [commits[-max_history_per_repo:] for commits in histories]
        # break long commit sequences into chunks for parallelization
        roots = list[Path]()
        chunk_training = list[bool]()
        chunked_histories = list[list[CommitInfo]]()
        for root, h, train in zip(project_roots, histories, repo_training):
            history_chunk_size = max(50, math.ceil(len(h) / 4))
            for i in range(0, len(h), history_chunk_size):
                roots.append(root)
                chunk_training.append(train)
                # note that we need 1 extra overlapping commit to get all diffs
                chunked_histories.append(h[i : i + history_chunk_size + 1])
        workdirs = [workdir / f"chunk-{i}" for i in range(len(roots))]
        try:
            presults = pmap(
                _process_commits,
                roots,
                workdirs,
                chunked_histories,</s>
===========unchanged ref 0===========
    at: IPython.core.display_functions
        display(*, include=None, exclude=None, metadata=None, transient=None, display_id=None, raw=False, clear=False, source=_sentinel, **kwargs)
    
    at: coeditor._utils
        DefaultWorkers: int = multiprocessing.cpu_count() // 2
        global DefaultWorkers
    
        pmap(f: Callable[..., T1], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], /, *iterables: Iterable[Any], desc: str | None=None, key_args: Mapping[str, Any] | None=None, max_workers: int | None=None, chunksize: int | None=None, tqdm_args: Mapping[str, Any] | None=None) -> list[T1]
    
        TimeLogger(times: dict[str, list[float]]=field(default_factory=dict))
    
    at: coeditor._utils.TimeLogger
        times: dict[str, list[float]] = field(default_factory=dict)
    
        times_to_dataframe(times: dict[str, list[float]])
    
    at: coeditor.common
        rec_add_dict_to(target: dict[str, Any], value: Mapping[str, Any], value_merger: Callable[[Any, Any], Any]=lambda x, y: x + y)
    
    at: coeditor.dataset
        C3EditEncoder(change_processor: ProjectChangeProcessor[C3Problem]=field(
                default_factory=C3ProblemGenerator
            ), edit_tokenizer: C3ProblemTokenizer=field(default_factory=C3ProblemTokenizer))
    
        _ProcessingResult(edits: Sequence[TkC3Problem], stats: dict[str, dict | Any])
    
        _process_commits(root: Path, workdir: Path, commits: Sequence[CommitInfo], is_training: bool, encoder: C3EditEncoder) -> _ProcessingResult
    
    at: coeditor.dataset.C3EditEncoder
        change_processor: ProjectChangeProcessor[C3Problem] = field(
                default_factory=C3ProblemGenerator
            )
    
    
===========unchanged ref 1===========
    at: coeditor.dataset._ProcessingResult
        edits: Sequence[TkC3Problem]
    
        stats: dict[str, dict | Any]
    
    at: coeditor.dataset._process_commits
        edits = edits_from_commit_history(
                    root,
                    commits,
                    tempdir=workdir / "code",
                    change_processor=encoder.change_processor,
                    edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                    silent=True,
                    time_limit=time_limit_per_commit * (len(commits) + 10),
                )
        edits = []
    
    at: math
        ceil(x: SupportsFloat, /) -> int
    
    at: pathlib
        Path()
    
    at: pathlib.Path
        __slots__ = ()
    
        exists() -> bool
    
    at: shutil
        rmtree(path: Union[bytes, StrPath], ignore_errors: bool=..., onerror: Optional[Callable[[Any, Any, Any], Any]]=...) -> None
    
    at: tempfile
        gettempdir() -> str
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    at: typing.MutableMapping
        pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
        pop(key: _KT) -> _VT
    
    
===========changed ref 0===========
    # module: coeditor.dataset
    def _process_commits(
        root: Path,
        workdir: Path,
        commits: Sequence[CommitInfo],
  +     is_training: bool,
        encoder: C3EditEncoder,
    ) -> _ProcessingResult:
        # use process-specific parso cache
        _fix_jedi_cache(workdir)
        coeditor.code_change._tlogger.clear()
  +     encoder.change_processor.clear_stats()
  +     encoder.change_processor.set_training(is_training)
        try:
            # cannot return here since subprocess will be killed after returning
            edits = edits_from_commit_history(
                root,
                commits,
                tempdir=workdir / "code",
                change_processor=encoder.change_processor,
                edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                silent=True,
  +             time_limit=time_limit_per_commit * (len(commits) + 10),
            )
  +     except Exception as e:
  +         if isinstance(e, KeyboardInterrupt):
  +             raise
  -     except UnicodeDecodeError as e:
  -         # this might happen in rare cases
  +         warnings.warn(f"Failed to process project: {root}\nError: {e}")
  -         warnings.warn(f"Unable to process project: {root}\nError: {e}")
            edits = []
        stats = dict()
        encoder.change_processor.append_stats(stats)
        rec_add_dict_to(stats, {"tlogger": coeditor.code_change._tlogger.times})
        return _ProcessingResult(edits, stats)
    
===========changed ref 1===========
    # module: spot.utils
    def compute_line_diffs(
        before: Sequence[str], after: Sequence[str], keep_explain_lines: bool = False
    ):
        SizeLimit = 8000
        if (
            sum(len(x) for x in before) > SizeLimit
            or sum(len(x) for x in after) > SizeLimit
        ):
  +         return compute_line_diffs_fast(before, after)
  -         compute_line_diffs_fast(before, after)
        differ = difflib.Differ()
        result = []
        for line in differ.compare(before, after):
            assert len(line) >= 2
            tag = line[0]
            if keep_explain_lines or tag != "?":
                result.append(tag + line[2:])
        return result
     | 
| 
	coeditor.code_change/edits_from_commit_history | 
	Modified | 
	temp-1 | 
	ab374914700355dc76b7f09d9c8121b7a6855e67 | 
	- Only editing non-function spans during training. - Add time limit to data creation. - Fix fast diff. - Handle value error in `normalize_code_by_ast`. - Add max_total_ref_tks to C3ProblemTokenizer. | 
	 <0>:<add>             time_limit=time_limit,
 | 
	      # module: coeditor.code_change
      def edits_from_commit_history(
          project_dir: Path,
          history: Sequence[CommitInfo],
          tempdir: Path,
          change_processor: ProjectChangeProcessor[TProb] = NoProcessing(),
          edit_encoder: Callable[[TProb], Iterable[TEnc]] = lambda x: [x],
          ignore_dirs=DefaultIgnoreDirs,
          silent: bool = False,
    +     time_limit: float | None = None,
      ) -> Sequence[TEnc]:
          """Incrementally compute the edits to a project from the git history.
          Note that this will change the file states in the project directory, so
          you should make a copy of the project before calling this function.
          """
          tempdir = tempdir.resolve()
          if tempdir.exists():
              raise FileExistsError(f"Workdir '{tempdir}' already exists.")
          use_fast_parser = jedi.settings.fast_parser
          tempdir.mkdir(parents=True, exist_ok=False)
          try:
              run_command(
                  ["cp", "-r", str(project_dir / ".git"), str(tempdir)],
                  cwd=project_dir.parent,
              )
      
              return _edits_from_commit_history(
    +             tempdir,
    +             history,
    +             change_processor,
    +             edit_encoder,
    +             ignore_dirs,
    +             silent,
    -             tempdir, history, change_processor, edit_encoder, ignore_dirs, silent
 <0>          )
          finally:
              shutil.rmtree(tempdir)
              jedi.settings.fast_parser = use_fast_parser
      
       | 
	===========unchanged ref 0===========
    at: coeditor.code_change
        DefaultIgnoreDirs = {".venv", ".mypy_cache", ".git", "venv", "build"}
    
        JProjectChange(changed: Mapping[ModuleName, JModuleChange], all_modules: Modified[Collection[JModule]], commit_info: "CommitInfo | None")
    
        TProb = TypeVar("TProb", covariant=True)
    
        TEnc = TypeVar("TEnc", covariant=True)
    
        ProjectChangeProcessor()
    
        NoProcessing()
    
    at: coeditor.common
        run_command(args: Sequence[str], cwd: str | Path) -> str
    
    at: jedi.settings
        fast_parser = True
    
    at: pathlib
        Path()
    
    at: pathlib.Path
        __slots__ = ()
    
        resolve(strict: bool=...) -> _P
    
        mkdir(mode: int=..., parents: bool=..., exist_ok: bool=...) -> None
    
        exists() -> bool
    
    at: pathlib.PurePath
        __slots__ = (
                '_drv', '_root', '_parts',
                '_str', '_hash', '_pparts', '_cached_cparts',
            )
    
        drive = property(attrgetter('_drv'),
                             doc="""The drive prefix (letter or UNC path), if any.""")
    
        root = property(attrgetter('_root'),
                            doc="""The root of the path, if any.""")
    
    at: typing
        Iterable = _alias(collections.abc.Iterable, 1)
    
        Callable = _CallableType(collections.abc.Callable, 2)
    
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.code_change
  - @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  +     def set_training(self, is_training: bool) -> None:
  +         return None
  + 
===========changed ref 1===========
    # module: coeditor.code_change
    @dataclass(frozen=True)
    class ChangedSpan:
  +     def _is_func_body(self) -> bool:
  +         return self.parent_scopes[-1].earlier().tree.type == ptree.Function.type
  + 
===========changed ref 2===========
    # module: coeditor.dataset
  + time_limit_per_commit = 5.0
    
    
===========changed ref 3===========
    # module: spot.utils
    def compute_line_diffs(
        before: Sequence[str], after: Sequence[str], keep_explain_lines: bool = False
    ):
        SizeLimit = 8000
        if (
            sum(len(x) for x in before) > SizeLimit
            or sum(len(x) for x in after) > SizeLimit
        ):
  +         return compute_line_diffs_fast(before, after)
  -         compute_line_diffs_fast(before, after)
        differ = difflib.Differ()
        result = []
        for line in differ.compare(before, after):
            assert len(line) >= 2
            tag = line[0]
            if keep_explain_lines or tag != "?":
                result.append(tag + line[2:])
        return result
    
===========changed ref 4===========
    # module: coeditor.dataset
    def _process_commits(
        root: Path,
        workdir: Path,
        commits: Sequence[CommitInfo],
  +     is_training: bool,
        encoder: C3EditEncoder,
    ) -> _ProcessingResult:
        # use process-specific parso cache
        _fix_jedi_cache(workdir)
        coeditor.code_change._tlogger.clear()
  +     encoder.change_processor.clear_stats()
  +     encoder.change_processor.set_training(is_training)
        try:
            # cannot return here since subprocess will be killed after returning
            edits = edits_from_commit_history(
                root,
                commits,
                tempdir=workdir / "code",
                change_processor=encoder.change_processor,
                edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                silent=True,
  +             time_limit=time_limit_per_commit * (len(commits) + 10),
            )
  +     except Exception as e:
  +         if isinstance(e, KeyboardInterrupt):
  +             raise
  -     except UnicodeDecodeError as e:
  -         # this might happen in rare cases
  +         warnings.warn(f"Failed to process project: {root}\nError: {e}")
  -         warnings.warn(f"Unable to process project: {root}\nError: {e}")
            edits = []
        stats = dict()
        encoder.change_processor.append_stats(stats)
        rec_add_dict_to(stats, {"tlogger": coeditor.code_change._tlogger.times})
        return _ProcessingResult(edits, stats)
    
===========changed ref 5===========
    # module: coeditor.common
    def normalize_code_by_ast(
        code: str, sort_keyargs: bool = True, remove_doc_string: bool = True
    ) -> str:
        """Normalize the code by parsing and unparsing it using the AST module.
        If parsing fails, return the original code."""
    
        class KeyargSorter(ast.NodeTransformer):
            def visit_Call(self, node: ast.Call):
                if node.keywords:
                    node.keywords.sort(key=lambda x: x.arg or "None")
                return node
    
        class DocStringremover(ast.NodeTransformer):
            def visit_FunctionDef(self, node: ast.FunctionDef):
                return self._visit_def(node)
    
            def visit_Module(self, node: ast.Module) -> Any:
                return self._visit_def(node)
    
            def visit_ClassDef(self, node: ast.ClassDef):
                return self._visit_def(node)
    
            def _visit_def(self, node):
                node = as_any(self.generic_visit(node))
                match node.body:
                    case [ast.Expr(value=ast.Constant(value=str())), *body]:
                        node.body = body
                return node
    
        try:
            tree = ast.parse(dedent(code))
            if remove_doc_string:
                tree = DocStringremover().visit(tree)
            if sort_keyargs:
                tree = KeyargSorter().visit(tree)
            return ast.unparse(tree)
  +     except (SyntaxError, ValueError):
  -     except SyntaxError:
            return code
     | 
| 
	coeditor.ctx_change_encoder/C3ProblemGenerator.__init__ | 
	Modified | 
	temp-1 | 
	ab374914700355dc76b7f09d9c8121b7a6855e67 | 
	- Only editing non-function spans during training. - Add time limit to data creation. - Fix fast diff. - Handle value error in `normalize_code_by_ast`. - Add max_total_ref_tks to C3ProblemTokenizer. | 
	 <0>:<add>         self._is_training: bool = False
 | 
	      # module: coeditor.ctx_change_encoder
      class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
          def __init__(self, analyzer: "JediUsageAnalyzer | None" = None):
              if analyzer is None:
                  analyzer = JediUsageAnalyzer()
    + 
              self.analyzer = analyzer
    +         # whether to only generate problems for editing functions
 <0>  
       | 
	===========unchanged ref 0===========
    at: coeditor.ctx_change_encoder
        JediUsageAnalyzer()
    
    at: coeditor.ctx_change_encoder.C3ProblemGenerator
        VERSION = "1.1"
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     VERSION = "1.2"
  -     VERSION = "1.1"
    
===========changed ref 1===========
    # module: coeditor.code_change
  - @dataclass
    class ProjectChangeProcessor(Generic[TProb], ABC):
  +     def set_training(self, is_training: bool) -> None:
  +         return None
  + 
===========changed ref 2===========
    # module: coeditor.code_change
  + _Second = float
    
    
===========changed ref 3===========
    # module: coeditor.dataset
  + time_limit_per_commit = 5.0
    
    
===========changed ref 4===========
    # module: coeditor.code_change
    @dataclass(frozen=True)
    class ChangedSpan:
  +     def _is_func_body(self) -> bool:
  +         return self.parent_scopes[-1].earlier().tree.type == ptree.Function.type
  + 
===========changed ref 5===========
    # module: spot.utils
    def compute_line_diffs(
        before: Sequence[str], after: Sequence[str], keep_explain_lines: bool = False
    ):
        SizeLimit = 8000
        if (
            sum(len(x) for x in before) > SizeLimit
            or sum(len(x) for x in after) > SizeLimit
        ):
  +         return compute_line_diffs_fast(before, after)
  -         compute_line_diffs_fast(before, after)
        differ = difflib.Differ()
        result = []
        for line in differ.compare(before, after):
            assert len(line) >= 2
            tag = line[0]
            if keep_explain_lines or tag != "?":
                result.append(tag + line[2:])
        return result
    
===========changed ref 6===========
    # module: coeditor.code_change
    def edits_from_commit_history(
        project_dir: Path,
        history: Sequence[CommitInfo],
        tempdir: Path,
        change_processor: ProjectChangeProcessor[TProb] = NoProcessing(),
        edit_encoder: Callable[[TProb], Iterable[TEnc]] = lambda x: [x],
        ignore_dirs=DefaultIgnoreDirs,
        silent: bool = False,
  +     time_limit: float | None = None,
    ) -> Sequence[TEnc]:
        """Incrementally compute the edits to a project from the git history.
        Note that this will change the file states in the project directory, so
        you should make a copy of the project before calling this function.
        """
        tempdir = tempdir.resolve()
        if tempdir.exists():
            raise FileExistsError(f"Workdir '{tempdir}' already exists.")
        use_fast_parser = jedi.settings.fast_parser
        tempdir.mkdir(parents=True, exist_ok=False)
        try:
            run_command(
                ["cp", "-r", str(project_dir / ".git"), str(tempdir)],
                cwd=project_dir.parent,
            )
    
            return _edits_from_commit_history(
  +             tempdir,
  +             history,
  +             change_processor,
  +             edit_encoder,
  +             ignore_dirs,
  +             silent,
  +             time_limit=time_limit,
  -             tempdir, history, change_processor, edit_encoder, ignore_dirs, silent
            )
        finally:
            shutil.rmtree(tempdir)
            jedi.settings.fast_parser = use_fast_parser
    
===========changed ref 7===========
    # module: coeditor.dataset
    def _process_commits(
        root: Path,
        workdir: Path,
        commits: Sequence[CommitInfo],
  +     is_training: bool,
        encoder: C3EditEncoder,
    ) -> _ProcessingResult:
        # use process-specific parso cache
        _fix_jedi_cache(workdir)
        coeditor.code_change._tlogger.clear()
  +     encoder.change_processor.clear_stats()
  +     encoder.change_processor.set_training(is_training)
        try:
            # cannot return here since subprocess will be killed after returning
            edits = edits_from_commit_history(
                root,
                commits,
                tempdir=workdir / "code",
                change_processor=encoder.change_processor,
                edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                silent=True,
  +             time_limit=time_limit_per_commit * (len(commits) + 10),
            )
  +     except Exception as e:
  +         if isinstance(e, KeyboardInterrupt):
  +             raise
  -     except UnicodeDecodeError as e:
  -         # this might happen in rare cases
  +         warnings.warn(f"Failed to process project: {root}\nError: {e}")
  -         warnings.warn(f"Unable to process project: {root}\nError: {e}")
            edits = []
        stats = dict()
        encoder.change_processor.append_stats(stats)
        rec_add_dict_to(stats, {"tlogger": coeditor.code_change._tlogger.times})
        return _ProcessingResult(edits, stats)
    
===========changed ref 8===========
    # module: coeditor.common
    def normalize_code_by_ast(
        code: str, sort_keyargs: bool = True, remove_doc_string: bool = True
    ) -> str:
        """Normalize the code by parsing and unparsing it using the AST module.
        If parsing fails, return the original code."""
    
        class KeyargSorter(ast.NodeTransformer):
            def visit_Call(self, node: ast.Call):
                if node.keywords:
                    node.keywords.sort(key=lambda x: x.arg or "None")
                return node
    
        class DocStringremover(ast.NodeTransformer):
            def visit_FunctionDef(self, node: ast.FunctionDef):
                return self._visit_def(node)
    
            def visit_Module(self, node: ast.Module) -> Any:
                return self._visit_def(node)
    
            def visit_ClassDef(self, node: ast.ClassDef):
                return self._visit_def(node)
    
            def _visit_def(self, node):
                node = as_any(self.generic_visit(node))
                match node.body:
                    case [ast.Expr(value=ast.Constant(value=str())), *body]:
                        node.body = body
                return node
    
        try:
            tree = ast.parse(dedent(code))
            if remove_doc_string:
                tree = DocStringremover().visit(tree)
            if sort_keyargs:
                tree = KeyargSorter().visit(tree)
            return ast.unparse(tree)
  +     except (SyntaxError, ValueError):
  -     except SyntaxError:
            return code
     | 
| 
	coeditor.ctx_change_encoder/C3ProblemGenerator.process_change | 
	Modified | 
	temp-1 | 
	ab374914700355dc76b7f09d9c8121b7a6855e67 | 
	- Only editing non-function spans during training. - Add time limit to data creation. - Fix fast diff. - Handle value error in `normalize_code_by_ast`. - Add max_total_ref_tks to C3ProblemTokenizer. | 
	 <0>:<add>                 if should_mk_problem:
 | 
	      # module: coeditor.ctx_change_encoder
      class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
          def process_change(
              self,
              pchange: JProjectChange,
              mod2usages: Mapping[ModuleName, LineUsageAnalysis],
              module_order: Sequence[ModuleName],
          ) -> Sequence[C3Problem]:
      <s>
                              sorted_defs.append(pydef)
      
                  # return unique cspans
                  seen = set[tuple[ModuleName, LineRange]]()
                  # we don't need to show the changed parts again
                  for cspan in (this_change, *other_changes):
                      seen.add((cspan.path.module, cspan.line_range))
                  result = list[ChangedSpan]()
                  for used in sorted_defs:
                      for cspan in get_def_spans(used):
                          key = (cspan.path.module, cspan.line_range)
                          if key not in seen:
                              result.append(cspan)
                              seen.add(key)
                  return result
      
              processed_cspans = list[ChangedSpan]()
              problems = list[C3Problem]()
              for m in module_order:
                  if (mchange := pchange.changed.get(m)) is None:
                      continue
                  for span in mchange.changed.values():
    +                 should_mk_problem = (span.change.as_char() == Modified.as_char()) and (
    -                 if span.change.as_char() == Modified.as_char():
    +                     # only consider function edits at test time
    +                     self._is_training
    +                     or span._is_func_body()
    +                 )
 <0>                      # latest changes are more relevant
                          relevant_changes = list(reversed(processed_cspans))
                          prob = C3Problem(
                              span,
                              relevant_changes=relevant_changes,
                              relevant_unchanged=get_relevant_unchanged(
                                  span, relevant_changes
                              ),
                              src_info={"commit": pchange.commit_info},
                          )
                          problems.append(prob)
                      processed_cspans.append(span)
              return problems
      
       | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Sequence[C3Problem]:
    # offset: -1
    <s>
                    body_code = stmt_span.code
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
                        stmt_span.line_range,
                    )
                    cspans.append(cspan)
    
                cspan_cache[used] = cspans
                return cspans
    
            def get_relevant_unchanged(
                this_change: ChangedSpan, other_changes: Sequence[ChangedSpan]
            ):
                if isinstance(this_change.change, Added):
                    # nothing to analyze
                    return []
                path = this_change.path
                line_usages = mod2usages[path.module]
                # parent defs are also considered as used
                parent_defs = [
                    PyDefinition.from_scope(c.earlier()) for c in this_change.parent_scopes
                ]
                # immediate parents are more relevant
                sorted_defs = list(reversed(parent_defs))
                used_defs = set(sorted_defs)
                all_lines = set(range(*this_change.line_range))
                all_lines.update(range(*this_change.header_line_range))
                for l in all_lines:
                    for pydef in line_usages.line2usages.get(l, set()):
                        if (
                            pydef.full_name.startswith(path.module)
                            and pydef.start_pos[0] in all_lines
                        ):
                            # skip self references
                            continue
                        if pydef not in used_defs:
                            used_defs.add(pydef)
                            sorted_defs.append(pydef)
    
                # return unique cspans
                seen = set[tuple[ModuleName, Line</s>
===========above chunk 1===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Sequence[C3Problem]:
    # offset: -2
    <s> match elem:
                    case ChangeScope(tree=ptree.Function()):
                        func_scopes.append(elem)
                    case ChangeScope(tree=ptree.Class()):
                        # add all attrs and methods
                        stmt_spans.extend(elem.spans)
                        func_scopes.extend(
                            s
                            for s in elem.subscopes.values()
                            if isinstance(s.tree, ptree.Function)
                        )
                    case StatementSpan():
                        stmt_spans.append(elem)
    
                # add collapsed functions
                for f_scope in func_scopes:
                    ancestors = f_scope.ancestors()
                    stmts = f_scope.spans[-1].statements
                    body_code = stmts[-1].get_code().strip("\n")
                    if len(stmts) > 1:
                        ellipsis = "    " * (len(ancestors) - 1) + "# ...\n"
                        body_code = ellipsis + body_code
  -                 h_end = f_scope.header_line_range[1]
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_code),
                        [Modified.from_unchanged(s) for s in ancestors],
  +                     f_scope.spans[-1].line_range,
  -                     line_range(h_end, h_end + len(body_code)),
                    )
                    cspans.append(cspan)
    
                # add statement spans
                for stmt_span in stmt_spans:
                    ancestors = stmt_span.scope.ancestors()
                    body_code = stmt_span.code
                    cspan = ChangedSpan(
                        Modified.from_unchanged(body_</s>
===========above chunk 2===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
        def process_change(
            self,
            pchange: JProjectChange,
            mod2usages: Mapping[ModuleName, LineUsageAnalysis],
            module_order: Sequence[ModuleName],
        ) -> Sequence[C3Problem]:
    # offset: -3
            before_mod_map = {m.mname: m for m in pchange.all_modules.before}
            mod_hier = ModuleHierarchy.from_modules(before_mod_map)
            cspan_cache = dict[PyDefinition, list[ChangedSpan]]()
    
            def get_def_spans(used: PyDefinition) -> list[ChangedSpan]:
                "Get the (pre-edit) spans for the given definition."
                if used.full_name in cspan_cache:
                    return cspan_cache[used.full_name]
                path = mod_hier.resolve_path(split_dots(used.full_name))
                cspans = list[ChangedSpan]()
                if path is None or (jmod := before_mod_map.get(path.module)) is None:
                    cspan_cache[used] = cspans
                    return cspans
                scope = jmod.as_scope
                elem = scope._search(path.path, used.start_pos[0])
                func_scopes = list[ChangeScope]()
                stmt_spans = list[StatementSpan]()</s>
===========unchanged ref 0===========
    at: coeditor._utils
        split_dots(path: str) -> tuple[str, ...]
    
    at: coeditor.code_change
        LineRange = NewType("LineRange", tuple[int, int])
    
        line_range(start: int, end: int, can_be_empty: bool=False) -> LineRange
    
        ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None")
    
        StatementSpan(nth_in_parent: int, statements: Sequence[PyNode], scope: ChangeScope)
    
        ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
    
        JProjectChange(changed: Mapping[ModuleName, JModuleChange], all_modules: Modified[Collection[JModule]], commit_info: "CommitInfo | None")
    
    at: coeditor.code_change.ChangeScope
        path: ProjectPath
    
        tree: ScopeTree
    
        spans: Sequence["StatementSpan"]
    
        subscopes: Mapping[str, Self]
    
        parent_scope: "ChangeScope | None"
    
        ancestors() -> list[Self]
    
    at: coeditor.code_change.ChangeScope.__post_init__
        self.header_line_range: LineRange = header_line_range
    
    at: coeditor.code_change.ChangedSpan
        change: Change[str]
    
        parent_scopes: Sequence[Change[ChangeScope]]
    
        line_range: LineRange
    
    at: coeditor.code_change.JModuleChange
        module_change: Change[JModule]
    
        changed: Mapping[ProjectPath, ChangedSpan]
    
    at: coeditor.code_change.JProjectChange
        changed: Mapping[ModuleName, JModuleChange]
    
        all_modules: Modified[Collection[JModule]]
    
        commit_info: "CommitInfo | None"
    
    at: coeditor.code_change.StatementSpan
        nth_in_parent: int
    
        statements: Sequence[PyNode]
    
        scope: ChangeScope
    
     | 
| 
	coeditor.dataset/_process_commits | 
	Modified | 
	temp-1 | 
	ec882028fee3333b952c7e20e8051e7a58402a6d | 
	Switch to TkC3Problem in retrieval model code. | 
	 <0>:<add>         traceback.print_exception(e, limit=-6)
 | 
	      # module: coeditor.dataset
      def _process_commits(
          root: Path,
          workdir: Path,
          commits: Sequence[CommitInfo],
          is_training: bool,
          encoder: C3EditEncoder,
      ) -> _ProcessingResult:
          # use process-specific parso cache
          _fix_jedi_cache(workdir)
          coeditor.code_change._tlogger.clear()
          encoder.change_processor.clear_stats()
          encoder.change_processor.set_training(is_training)
          try:
              # cannot return here since subprocess will be killed after returning
              edits = edits_from_commit_history(
                  root,
                  commits,
                  tempdir=workdir / "code",
                  change_processor=encoder.change_processor,
                  edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                  silent=True,
                  time_limit=time_limit_per_commit * (len(commits) + 10),
              )
          except Exception as e:
              if isinstance(e, KeyboardInterrupt):
                  raise
              warnings.warn(f"Failed to process project: {root}\nError: {e}")
 <0>          edits = []
          stats = dict()
          encoder.change_processor.append_stats(stats)
          rec_add_dict_to(stats, {"tlogger": coeditor.code_change._tlogger.times})
          return _ProcessingResult(edits, stats)
      
       | 
	===========unchanged ref 0===========
    at: _warnings
        warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
        warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
    
    at: coeditor.dataset
        C3EditEncoder(change_processor: ProjectChangeProcessor[C3Problem]=field(
                default_factory=C3ProblemGenerator
            ), edit_tokenizer: C3ProblemTokenizer=field(default_factory=C3ProblemTokenizer))
    
        _ProcessingResult(edits: Sequence[TkC3Problem], stats: dict[str, dict | Any])
    
    at: coeditor.dataset.C3EditEncoder
        change_processor: ProjectChangeProcessor[C3Problem] = field(
                default_factory=C3ProblemGenerator
            )
    
        edit_tokenizer: C3ProblemTokenizer = field(default_factory=C3ProblemTokenizer)
    
    at: pathlib
        Path()
    
    at: traceback
        print_exception(etype: Optional[Type[BaseException]], value: Optional[BaseException], tb: Optional[TracebackType], limit: Optional[int]=..., file: Optional[IO[str]]=..., chain: bool=...) -> None
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.dataset
  + time_limit_per_commit = 10.0
  - time_limit_per_commit = 5.0
     | 
| 
	coeditor.dataset/dataset_from_projects | 
	Modified | 
	temp-1 | 
	ec882028fee3333b952c7e20e8051e7a58402a6d | 
	Switch to TkC3Problem in retrieval model code. | 
	 <0>:<add>         history_chunk_size = max(50, math.ceil(len(h) / 10))
 | 
	      # module: coeditor.dataset
      def dataset_from_projects(
          project_roots: Sequence[Path],
          encoder: C3EditEncoder,
          repo_training: Sequence[bool],
          max_history_per_repo: int = 1000,
          workers: int = DefaultWorkers,
      ) -> "TokenizedEditDataset[TkC3Problem]":
      <s>
          workdir = Path(tempfile.gettempdir()) / "dataset_from_projects"
          histories = pmap(
              get_commit_history,
              project_roots,
              max_workers=workers,
              desc="Getting commit histories",
              tqdm_args={"unit": "repo"},
          )
          # keep the oldest portion of the history
          histories = [commits[-max_history_per_repo:] for commits in histories]
          # break long commit sequences into chunks for parallelization
          roots = list[Path]()
          chunk_training = list[bool]()
          chunked_histories = list[list[CommitInfo]]()
          for root, h, train in zip(project_roots, histories, repo_training):
    -         history_chunk_size = max(50, math.ceil(len(h) / 4))
 <0>          for i in range(0, len(h), history_chunk_size):
                  roots.append(root)
                  chunk_training.append(train)
                  # note that we need 1 extra overlapping commit to get all diffs
                  chunked_histories.append(h[i : i + history_chunk_size + 1])
          workdirs = [workdir / f"chunk-{i}" for i in range(len(roots))]
          try:
              presults = pmap(
                  _process_commits,
                  roots,
                  workdirs,
                  chunked_histories,
                  chunk_training,
                  key_args={"encoder": encoder},
                  desc="Create tokenized edits",
                  max_workers=workers,
                  tqdm_args={"unit": "chunk"},
              )
          finally:
              if workdir.exists():
                  shutil.rmtree(workdir)
                  print("Workdir removed:", workdir)
      
          project2edits</s> | 
	===========above chunk 0===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
        encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
    ) -> "TokenizedEditDataset[TkC3Problem]":
    # offset: -1
        """
        Create a TokenizedEditDataset from a list of project roots and a given encoder.
        Args:
            - max_history_per_repo (int, optional): When the repo history is longer than
            this value, only the oldest portion is going to be used. Defaults to 1000.
        """
        workdir = Path(tempfile.gettempdir()) / "dataset_from_projects"
        histories = pmap(
    </s>
===========below chunk 0===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
        encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
    ) -> "TokenizedEditDataset[TkC3Problem]":
    # offset: 1
    <s>exists():
                shutil.rmtree(workdir)
                print("Workdir removed:", workdir)
    
        project2edits = dict[Path, list[TkC3Problem]]()
    
        try:
            stats = dict[str, Any]()
            for root, pr in zip(roots, presults):
                project2edits.setdefault(root, []).extend(pr.edits)
                rec_add_dict_to(stats, pr.stats)
    
            if "tlogger" in stats:
                df = TimeLogger.times_to_dataframe(stats.pop("tlogger"))
                print("Time stats:")
                display(df)
            if "analyzer_errors" in list(stats.keys()):
                errors: dict = stats.pop("analyzer_errors")
                for k in list(errors.keys()):
                    if JediUsageAnalyzer.is_known_error(k):
                        errors.pop(k)
                if errors:
                    print("Analyzer errors:")
                    for k in sorted(errors.keys(), key=lambda k: errors[k], reverse=True):
                        print(f"{k}:\t{errors[k]}")
            if stats:
                print("Other Stats:")
                pretty_print_dict(stats)
        except Exception as e:
            if not isinstance(e, KeyboardInterrupt):
                print("Error while printing stats:", e)
    
        return TokenizedEditDataset(project2edits)
    
    
===========unchanged ref 0===========
    at: IPython.core.display_functions
        display(*, include=None, exclude=None, metadata=None, transient=None, display_id=None, raw=False, clear=False, source=_sentinel, **kwargs)
    
    at: coeditor._utils
        DefaultWorkers: int = multiprocessing.cpu_count() // 2
        global DefaultWorkers
    
        pmap(f: Callable[..., T1], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], /, *iterables: Iterable[Any], desc: str | None=None, key_args: Mapping[str, Any] | None=None, max_workers: int | None=None, chunksize: int | None=None, tqdm_args: Mapping[str, Any] | None=None) -> list[T1]
    
        TimeLogger(times: dict[str, list[float]]=field(default_factory=dict))
    
    at: coeditor._utils.TimeLogger
        times: dict[str, list[float]] = field(default_factory=dict)
    
        times_to_dataframe(times: dict[str, list[float]])
    
    at: coeditor.common
        rec_add_dict_to(target: dict[str, Any], value: Mapping[str, Any], value_merger: Callable[[Any, Any], Any]=lambda x, y: x + y)
    
    at: coeditor.dataset
        C3EditEncoder(change_processor: ProjectChangeProcessor[C3Problem]=field(
                default_factory=C3ProblemGenerator
            ), edit_tokenizer: C3ProblemTokenizer=field(default_factory=C3ProblemTokenizer))
    
        _ProcessingResult(edits: Sequence[TkC3Problem], stats: dict[str, dict | Any])
    
        _process_commits(root: Path, workdir: Path, commits: Sequence[CommitInfo], is_training: bool, encoder: C3EditEncoder) -> _ProcessingResult
    
    at: coeditor.dataset._ProcessingResult
        edits: Sequence[TkC3Problem]
    
        stats: dict[str, dict | Any]
    
    
===========unchanged ref 1===========
    at: coeditor.dataset._process_commits
        edits = edits_from_commit_history(
                    root,
                    commits,
                    tempdir=workdir / "code",
                    change_processor=encoder.change_processor,
                    edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                    silent=True,
                    time_limit=time_limit_per_commit * (len(commits) + 10),
                )
        edits = []
    
        stats = dict()
    
    at: math
        ceil(x: SupportsFloat, /) -> int
    
    at: pathlib
        Path()
    
    at: pathlib.Path
        __slots__ = ()
    
        exists() -> bool
    
    at: shutil
        rmtree(path: Union[bytes, StrPath], ignore_errors: bool=..., onerror: Optional[Callable[[Any, Any, Any], Any]]=...) -> None
    
    at: spot.utils
        pretty_print_dict(d: dict, level: int=0, max_show_level: int=1000, float_precision: int=5)
    
    at: tempfile
        gettempdir() -> str
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    at: typing.MutableMapping
        pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
        pop(key: _KT) -> _VT
    
    
===========changed ref 0===========
    # module: coeditor.dataset
    def _process_commits(
        root: Path,
        workdir: Path,
        commits: Sequence[CommitInfo],
        is_training: bool,
        encoder: C3EditEncoder,
    ) -> _ProcessingResult:
        # use process-specific parso cache
        _fix_jedi_cache(workdir)
        coeditor.code_change._tlogger.clear()
        encoder.change_processor.clear_stats()
        encoder.change_processor.set_training(is_training)
        try:
            # cannot return here since subprocess will be killed after returning
            edits = edits_from_commit_history(
                root,
                commits,
                tempdir=workdir / "code",
                change_processor=encoder.change_processor,
                edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                silent=True,
                time_limit=time_limit_per_commit * (len(commits) + 10),
            )
        except Exception as e:
            if isinstance(e, KeyboardInterrupt):
                raise
            warnings.warn(f"Failed to process project: {root}\nError: {e}")
  +         traceback.print_exception(e, limit=-6)
            edits = []
        stats = dict()
        encoder.change_processor.append_stats(stats)
        rec_add_dict_to(stats, {"tlogger": coeditor.code_change._tlogger.times})
        return _ProcessingResult(edits, stats)
    
===========changed ref 1===========
    # module: coeditor.dataset
  + time_limit_per_commit = 10.0
  - time_limit_per_commit = 5.0
     | 
| 
	coeditor.retrieval_model/_BatchSampler.estimate_n_batches | 
	Modified | 
	temp-1 | 
	ec882028fee3333b952c7e20e8051e7a58402a6d | 
	Switch to TkC3Problem in retrieval model code. | 
	 <0>:<add>         batches = tk_edits_to_batches(self.all_edits, self.batch_args, silent=True)
 | 
	      # module: coeditor.retrieval_model
      @dataclass
      class _BatchSampler:
          def estimate_n_batches(self) -> int:
    -         batches = query_edits_to_batches(self.all_edits, self.batch_args, silent=True)
 <0>          return len(batches)
      
       | 
	===========unchanged ref 0===========
    at: coeditor.retrieval_model.pad_token_seqs
        max_len = max(len(ref) for ref in seqs)
    
        pad_id = PAD_id
    
    
===========changed ref 0===========
    # module: coeditor.retrieval_model
    @dataclass
    class _BatchSampler:
  +     all_edits: list[TkC3Problem]
  -     all_edits: list[BasicTkQueryEdit]
        batch_args: BatchArgs
        shuffle: bool
        desc: str
        tqdm_args: dict | None = None
    
===========changed ref 1===========
    # module: coeditor.retrieval_model
    @dataclass
    class BatchArgs:
        @classmethod
        def eval_default(cls) -> Self:
            return BatchArgs(
                max_total_ref_tks=512 * 32,
                max_queries=32,
  -             max_ref_dropout=0.0,
                shuffle_extra_ids=False,
            )
    
===========changed ref 2===========
    # module: coeditor.retrieval_model
    @dataclass
    class BatchArgs:
        max_output_tks: int = 256
        max_query_tks: int = 512
        min_queires: int = 1
        max_queries: int = 8
        max_ref_tks: int = 512
        max_total_ref_tks: int = 512 * 16
  -     max_ref_dropout: float = 1.0
        shuffle_extra_ids: bool = True
  -     use_only_modified: bool = True
    
===========changed ref 3===========
    # module: coeditor.dataset
  + time_limit_per_commit = 10.0
  - time_limit_per_commit = 5.0
    
===========changed ref 4===========
    # module: coeditor.retrieval_model
  + def tk_edits_to_batches(
  +     query_edits: Sequence[TkC3Problem],
  +     args: BatchArgs,
  +     silent: bool = False,
  + ) -> list[dict]:
  +     def process_edit(e: TkC3Problem):
  +         labels = e.output_tks
  + 
  +         labels = wrap_bos(labels)
  + 
  +         if len(labels) > args.max_output_tks:
  +             labels = labels[: args.max_output_tks]
  + 
  +         input_ids = e.input_tks
  + 
  +         if args.shuffle_extra_ids and random.random() < 0.5:
  +             id_map = random_extra_id_map()
  +             input_ids = [id_map.get(tk, tk) for tk in input_ids]
  +             labels = [id_map.get(tk, tk) for tk in labels]
  + 
  +         return input_ids, labels
  + 
  +     cost_limit = args.cost_limit()
  +     warned_batch_size = False
  + 
  +     def edits_to_batches(
  +         edits: Sequence[TkC3Problem],
  +     ) -> Iterable[dict]:
  +         def pack_batch(rows: list[dict]):
  +             assert rows, "empty batch found"
  +             input_ids = [x["input_tks"] for x in rows]
  +             labels = [x["output_tks"] for x in rows]
  +             refs = [x["references"] for x in rows]
  +             id2ref = {id(ref): ref for row in refs for ref in row}
  +             references = [id2ref[x] for x in id2ref]
  +             id2order = {x: i for i, x in enumerate(id2ref)}
  +             query_ref_list = [[id2order[id(ref)] for ref in row] for row in refs]
  +             return {
  +                 "input_ids": input_ids,
  +                 "references": references,
  +                 "query_</s>
===========changed ref 5===========
    # module: coeditor.retrieval_model
  + def tk_edits_to_batches(
  +     query_edits: Sequence[TkC3Problem],
  +     args: BatchArgs,
  +     silent: bool = False,
  + ) -> list[dict]:
    # offset: 1
    <s> <add>             return {
  +                 "input_ids": input_ids,
  +                 "references": references,
  +                 "query_ref_list": query_ref_list,
  +                 "labels": labels,
  +             }
  + 
  +         # sample references for each query
  +         current_batch = []
  +         current_cost = 0
  +         for edit in tqdm(edits, desc="edits_to_batches", disable=silent):
  +             all_refs = [x[1] for x in edit.named_references]
  +             ref_size_sum = 0
  +             ref_selected = list[TokenSeq]()
  +             for ref in all_refs:
  +                 if ref_size_sum + len(ref) <= args.max_total_ref_tks:
  +                     ref_selected.append(ref)
  +                     ref_size_sum += len(ref)
  +             input_tks, output_tks = process_edit(edit)
  +             cost = retrieval_cost_model(
  +                 ref_size=sum(len(x) for x in ref_selected),
  +                 query_size=len(input_tks),
  +                 output_size=len(output_tks),
  +             )
  +             row = {
  +                 "input_tks": input_tks,
  +                 "output_tks": output_tks,
  +                 "references": ref_selected,
  +             }
  +             nonlocal warned_batch_size
  +             if cost > cost_limit and not warned_batch_size:
  +                 warned_batch_size = True
  +                 warnings.warn("Batch cost limit is too small.")
  +             if (not current_batch) or (
  +                 cost +</s>
===========changed ref 6===========
    # module: coeditor.retrieval_model
  + def tk_edits_to_batches(
  +     query_edits: Sequence[TkC3Problem],
  +     args: BatchArgs,
  +     silent: bool = False,
  + ) -> list[dict]:
    # offset: 2
    <s>cost <= cost_limit
  +                 and len(current_batch) < args.max_queries
  +             ):
  +                 current_batch.append(row)
  +                 current_cost += cost
  +             else:
  +                 yield pack_batch(current_batch)
  +                 current_batch = [row]
  +                 current_cost = cost
  +         if current_batch:
  +             yield pack_batch(current_batch)
  + 
  +     batches = list[dict]()
  +     bsizes = list[int]()
  +     for batch in edits_to_batches(query_edits):
  +         batches.append(batch)
  +         bsizes.append(len(batch["input_ids"]))
  + 
  +     batch_stats = {k: f"{v:.1f}" for k, v in scalar_stats(bsizes).items()}
  +     if not silent:
  +         cprint("blue", f"num batches: {len(batches)},", f"batch stats: {batch_stats}")
  + 
  +     return batches
  + 
===========changed ref 7===========
    # module: coeditor.retrieval_model
    class RetrievalEditorModel(T5PreTrainedModel):
        def eval_loss_on_data(
            self, data: TokenizedEditDataset, batch_args: "BatchArgs"
        ) -> dict[str, WeightedSum]:
            batch_args = copy.deepcopy(batch_args)
  -         batch_args.max_ref_dropout = 0.0
            batch_args.shuffle_extra_ids = False
            eval_loader = edits_to_dataloader(
                data.all_edits(),
                args=batch_args,
                shuffle=False,
                desc="Eval Epoch",
            )
            return self.eval_loss_on_loader(eval_loader)
     | 
| 
	coeditor.retrieval_model/_BatchSampler.__iter__ | 
	Modified | 
	temp-1 | 
	ec882028fee3333b952c7e20e8051e7a58402a6d | 
	Switch to TkC3Problem in retrieval model code. | 
	 <0>:<add>         batches = tk_edits_to_batches(self.all_edits, self.batch_args)
 | 
	      # module: coeditor.retrieval_model
      @dataclass
      class _BatchSampler:
          def __iter__(self) -> Iterable[Mapping]:
              if self.shuffle:
                  random.shuffle(self.all_edits)
    -         batches = query_edits_to_batches(self.all_edits, self.batch_args)
 <0>          if self.shuffle:
                  random.shuffle(batches)
      
              tqdm_args = self.tqdm_args or {"smoothing": 0.0}
              for b in tqdm(batches, desc=self.desc + f" {self.epochs}", **tqdm_args):
                  input_ids = pad_token_seqs(b["input_ids"])
                  labels = pad_token_seqs(b["labels"], pad_id=-100)
                  yield {
                      "input_ids": input_ids,
                      "references": b["references"],
                      "query_ref_list": b["query_ref_list"],
                      "labels": labels,
                  }
              self.epochs += 1
      
       | 
	===========unchanged ref 0===========
    at: coeditor.retrieval_model.pad_token_seqs
        rows = []
    
    at: math
        ceil(x: SupportsFloat, /) -> int
    
    
===========changed ref 0===========
    # module: coeditor.retrieval_model
    @dataclass
    class _BatchSampler:
        def estimate_n_batches(self) -> int:
  +         batches = tk_edits_to_batches(self.all_edits, self.batch_args, silent=True)
  -         batches = query_edits_to_batches(self.all_edits, self.batch_args, silent=True)
            return len(batches)
    
===========changed ref 1===========
    # module: coeditor.retrieval_model
    @dataclass
    class _BatchSampler:
  +     all_edits: list[TkC3Problem]
  -     all_edits: list[BasicTkQueryEdit]
        batch_args: BatchArgs
        shuffle: bool
        desc: str
        tqdm_args: dict | None = None
    
===========changed ref 2===========
    # module: coeditor.retrieval_model
    @dataclass
    class BatchArgs:
        @classmethod
        def eval_default(cls) -> Self:
            return BatchArgs(
                max_total_ref_tks=512 * 32,
                max_queries=32,
  -             max_ref_dropout=0.0,
                shuffle_extra_ids=False,
            )
    
===========changed ref 3===========
    # module: coeditor.retrieval_model
    @dataclass
    class BatchArgs:
        max_output_tks: int = 256
        max_query_tks: int = 512
        min_queires: int = 1
        max_queries: int = 8
        max_ref_tks: int = 512
        max_total_ref_tks: int = 512 * 16
  -     max_ref_dropout: float = 1.0
        shuffle_extra_ids: bool = True
  -     use_only_modified: bool = True
    
===========changed ref 4===========
    # module: coeditor.dataset
  + time_limit_per_commit = 10.0
  - time_limit_per_commit = 5.0
    
===========changed ref 5===========
    # module: coeditor.retrieval_model
  + def tk_edits_to_batches(
  +     query_edits: Sequence[TkC3Problem],
  +     args: BatchArgs,
  +     silent: bool = False,
  + ) -> list[dict]:
  +     def process_edit(e: TkC3Problem):
  +         labels = e.output_tks
  + 
  +         labels = wrap_bos(labels)
  + 
  +         if len(labels) > args.max_output_tks:
  +             labels = labels[: args.max_output_tks]
  + 
  +         input_ids = e.input_tks
  + 
  +         if args.shuffle_extra_ids and random.random() < 0.5:
  +             id_map = random_extra_id_map()
  +             input_ids = [id_map.get(tk, tk) for tk in input_ids]
  +             labels = [id_map.get(tk, tk) for tk in labels]
  + 
  +         return input_ids, labels
  + 
  +     cost_limit = args.cost_limit()
  +     warned_batch_size = False
  + 
  +     def edits_to_batches(
  +         edits: Sequence[TkC3Problem],
  +     ) -> Iterable[dict]:
  +         def pack_batch(rows: list[dict]):
  +             assert rows, "empty batch found"
  +             input_ids = [x["input_tks"] for x in rows]
  +             labels = [x["output_tks"] for x in rows]
  +             refs = [x["references"] for x in rows]
  +             id2ref = {id(ref): ref for row in refs for ref in row}
  +             references = [id2ref[x] for x in id2ref]
  +             id2order = {x: i for i, x in enumerate(id2ref)}
  +             query_ref_list = [[id2order[id(ref)] for ref in row] for row in refs]
  +             return {
  +                 "input_ids": input_ids,
  +                 "references": references,
  +                 "query_</s>
===========changed ref 6===========
    # module: coeditor.retrieval_model
  + def tk_edits_to_batches(
  +     query_edits: Sequence[TkC3Problem],
  +     args: BatchArgs,
  +     silent: bool = False,
  + ) -> list[dict]:
    # offset: 1
    <s> <add>             return {
  +                 "input_ids": input_ids,
  +                 "references": references,
  +                 "query_ref_list": query_ref_list,
  +                 "labels": labels,
  +             }
  + 
  +         # sample references for each query
  +         current_batch = []
  +         current_cost = 0
  +         for edit in tqdm(edits, desc="edits_to_batches", disable=silent):
  +             all_refs = [x[1] for x in edit.named_references]
  +             ref_size_sum = 0
  +             ref_selected = list[TokenSeq]()
  +             for ref in all_refs:
  +                 if ref_size_sum + len(ref) <= args.max_total_ref_tks:
  +                     ref_selected.append(ref)
  +                     ref_size_sum += len(ref)
  +             input_tks, output_tks = process_edit(edit)
  +             cost = retrieval_cost_model(
  +                 ref_size=sum(len(x) for x in ref_selected),
  +                 query_size=len(input_tks),
  +                 output_size=len(output_tks),
  +             )
  +             row = {
  +                 "input_tks": input_tks,
  +                 "output_tks": output_tks,
  +                 "references": ref_selected,
  +             }
  +             nonlocal warned_batch_size
  +             if cost > cost_limit and not warned_batch_size:
  +                 warned_batch_size = True
  +                 warnings.warn("Batch cost limit is too small.")
  +             if (not current_batch) or (
  +                 cost +</s>
===========changed ref 7===========
    # module: coeditor.retrieval_model
  + def tk_edits_to_batches(
  +     query_edits: Sequence[TkC3Problem],
  +     args: BatchArgs,
  +     silent: bool = False,
  + ) -> list[dict]:
    # offset: 2
    <s>cost <= cost_limit
  +                 and len(current_batch) < args.max_queries
  +             ):
  +                 current_batch.append(row)
  +                 current_cost += cost
  +             else:
  +                 yield pack_batch(current_batch)
  +                 current_batch = [row]
  +                 current_cost = cost
  +         if current_batch:
  +             yield pack_batch(current_batch)
  + 
  +     batches = list[dict]()
  +     bsizes = list[int]()
  +     for batch in edits_to_batches(query_edits):
  +         batches.append(batch)
  +         bsizes.append(len(batch["input_ids"]))
  + 
  +     batch_stats = {k: f"{v:.1f}" for k, v in scalar_stats(bsizes).items()}
  +     if not silent:
  +         cprint("blue", f"num batches: {len(batches)},", f"batch stats: {batch_stats}")
  + 
  +     return batches
  + 
===========changed ref 8===========
    # module: coeditor.retrieval_model
    class RetrievalEditorModel(T5PreTrainedModel):
        def eval_loss_on_data(
            self, data: TokenizedEditDataset, batch_args: "BatchArgs"
        ) -> dict[str, WeightedSum]:
            batch_args = copy.deepcopy(batch_args)
  -         batch_args.max_ref_dropout = 0.0
            batch_args.shuffle_extra_ids = False
            eval_loader = edits_to_dataloader(
                data.all_edits(),
                args=batch_args,
                shuffle=False,
                desc="Eval Epoch",
            )
            return self.eval_loss_on_loader(eval_loader)
     | 
| 
	coeditor.api/EditPredictionService.suggest_edit | 
	Modified | 
	temp-1 | 
	ec882028fee3333b952c7e20e8051e7a58402a6d | 
	Switch to TkC3Problem in retrieval model code. | 
	 <0>:<add>             batches = tk_edits_to_batches(qedits, self.batch_args)
 | 
	      # module: coeditor.api
      @dataclass
      class EditPredictionService:
          def suggest_edit(
              self,
              file: Path,
              line: int,
              log_dir: Path | None = Path(".coeditor_logs"),
          ) -> ServiceResponse:
      <s> elem_change = Modified(trans_elem, trans_elem)
      
              with timed("encode edits"):
                  respect_lines = (
                      self.compute_offset(now_mod, now_elem, line, drop_comments=True) + 1
                  )
                  print(f"{respect_lines = }")
                  req = EditRequest(elem_change, respect_lines)
                  qedits = list(
                      self.encoder.encode_pedit(
                          pedit,
                          self.stub_cache,
                          queries=[req],
                          training=False,
                      )
                  )
                  if qedits[0].tk_pedit.module_stubs:
                      print("stub files:", qedits[0].tk_pedit.module_stubs.keys())
                  assert len(qedits) == 1
    -             batches = query_edits_to_batches(qedits, self.batch_args)
 <0>              assert len(batches) == 1
                  batch = batches[0]
      
              with timed("run model"), torch.autocast("cuda"):
                  predictions = self.model.predict_on_batch(
                      batch, [req], self.dec_args, self.show_max_solutions
                  )
                  assert_eq(len(predictions), 1)
                  predictions = predictions[0]
                  assert predictions
      
              if log_dir is not None:
                  log_dir.mkdir(exist_ok=True)
                  input_tks = batch["input_ids"][0]
                  references = batch["references"]
                  output_truth = batch["labels"][0]
                  print(f"Writing logs to: {log_dir}")
                  for i, pred in enumerate(predictions):
                      with (log_dir / f"solution-{i}.txt").open("w") as f:
                          pred_tks = pred.out_tks
                         </s> | 
	===========above chunk 0===========
    # module: coeditor.api
    @dataclass
    class EditPredictionService:
        def suggest_edit(
            self,
            file: Path,
            line: int,
            log_dir: Path | None = Path(".coeditor_logs"),
        ) -> ServiceResponse:
    # offset: -1
    <s>
                        cst.parse_module(now_code), mname, drop_comments=False
                    ),
                )
                now_elem = get_elem_by_line(now_mod, line)
                if now_elem is None:
                    raise ValueError(
                        f"No code element found at line {line} in file {file}."
                    )
                if not isinstance(now_elem, PythonFunction):
                    raise ValueError(f"Only functions can be edited by the model.")
    
            with timed("construct project edit"):
                pedit = self.config.get_pedit(
                    project, file, self.prev_cache, self.now_cache
                )
                if mname not in pedit.changes:
                    assert mname in pedit.before.modules
                    assert mname in pedit.after.modules
                    pedit.changes[mname] = ModuleEdit.from_no_change(
                        pedit.before.modules[mname]
                    )
            match [
                c for c in pedit.all_elem_changes() if get_change_path(c) == now_elem.path
            ]:
                case [Modified(PythonFunction(), PythonFunction()) as mf]:
                    elem_change = cast(Modified[PythonFunction], mf)
                case [Added(PythonFunction()) as mf]:
                    elem_change = cast(Added[PythonFunction], mf)
                case _:
                    if self.config.drop_comments:
                        trans_tree = remove_comments(now_elem.tree)
                        trans_elem = PythonFunction(
                            now_elem.name, now_elem.path, now_elem.parent_class, trans_tree
                        )
                    else:
                        trans_elem = now_elem
                    elem_change = Modified(trans_elem, trans_elem)
    
            with timed("encode edits"):
                respect_lines =</s>
===========above chunk 1===========
    # module: coeditor.api
    @dataclass
    class EditPredictionService:
        def suggest_edit(
            self,
            file: Path,
            line: int,
            log_dir: Path | None = Path(".coeditor_logs"),
        ) -> ServiceResponse:
    # offset: -2
            """Make the suggestion in-place at the given location."""
            timed = self.tlogger.timed
            project = self.project
    
            if not file.is_absolute():
                file = project / file
    
            with timed("get target element"):
                mname = PythonProject.rel_path_to_module_name(file.relative_to(project))
                stamp = os.stat(file).st_mtime
                now_code = file.read_text()
                now_mod = self.now_cache.cached(
                    (mname, False),
                    stamp,
                    lambda: PythonModule.from_cst(
                        cst.parse_module(now_code), mname, drop_comments=False
                    ),
                )
               </s>
===========below chunk 0===========
    # module: coeditor.api
    @dataclass
    class EditPredictionService:
        def suggest_edit(
            self,
            file: Path,
            line: int,
            log_dir: Path | None = Path(".coeditor_logs"),
        ) -> ServiceResponse:
    # offset: 1
    <s> f"solution-{i}.txt").open("w") as f:
                        pred_tks = pred.out_tks
                        score = pred.score
                        print(f"{respect_lines = }", file=f)
                        print(f"{len(input_tks) = }", file=f)
                        print(f"{len(references) = }", file=f)
                        print(f"Solution score: {score:.3g}", file=f)
                        print(f"Marginalized samples:", pred.n_samples, file=f)
                        pred = RetrievalModelPrediction(
                            input_ids=input_tks,
                            output_ids=pred_tks,
                            labels=output_truth,
                            references=references,
                        )
                        pred_str = RetrievalDecodingResult.show_prediction(None, pred)
                        print(pred_str, file=f)
    
            now_span = now_mod.location_map[now_elem.tree]
            # old_elem_code = get_span(now_code, now_span)
            old_elem_code = now_elem.code
            respect_lines = (
                self.compute_offset(now_mod, now_elem, line, drop_comments=False) + 1
            )
    
            suggestions = list[EditSuggestion]()
            for pred in predictions:
                suggested_change, preview = self.apply_edit_to_elem(
                    file,
                    now_mod,
                    now_elem,
                    line,
                    pred.out_tks,
                )
                suggestion = EditSuggestion(
                    score=pred.score,
                    change_preview=preview,
                    new_code=suggested_change.after,
                )
                suggestions.append(suggestion)
    
            def as_tuple(x: CodePosition):
                return (x.line</s>
===========below chunk 1===========
    # module: coeditor.api
    @dataclass
    class EditPredictionService:
        def suggest_edit(
            self,
            file: Path,
            line: int,
            log_dir: Path | None = Path(".coeditor_logs"),
        ) -> ServiceResponse:
    # offset: 2
    <s> )
                suggestions.append(suggestion)
    
            def as_tuple(x: CodePosition):
                return (x.line, x.column)
    
            return ServiceResponse(
                target_file=file.as_posix(),
                edit_start=(now_span.start.line, 0),
                edit_end=as_tuple(now_span.end),
                old_code=old_elem_code,
                suggestions=suggestions,
            )
    
    
===========unchanged ref 0===========
    at: coeditor._utils
        assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
    
    at: coeditor.api
        EditSuggestion(score: float, change_preview: str, new_code: str)
    
        ServiceResponse(target_file: str, edit_start: tuple[int, int], edit_end: tuple[int, int], old_code: str, suggestions: list[EditSuggestion])
    
        get_elem_by_line(module: PythonModule, line: int) -> PythonElem | None
    
    at: coeditor.api.ChangeDetectionConfig
        untracked_as_additions: bool = True
    
        ignore_dirs: Collection[str] = field(
                default_factory=lambda: PythonProject.DefaultIgnoreDirs
            )
    
        drop_comments: DropComment = True
    
        get_pedit(project_root: Path, target_file: Path, prev_cache: TimedCache[tuple[ModuleName, DropComment], PythonModule, str], now_cache: TimedCache[tuple[ModuleName, DropComment], PythonModule, float]) -> ProjectEdit
    
    at: coeditor.api.EditPredictionService
        compute_offset(now_mod: PythonModule, now_elem: PythonElem, line: int, drop_comments: bool)
    
        apply_edit_to_elem(self, file: Path, now_mod: PythonModule, now_elem: PythonElem, cursor_line: int, out_tks: TokenSeq) -> tuple[Modified[str], str]
        apply_edit_to_elem(file: Path, now_mod: PythonModule, now_elem: PythonElem, cursor_line: int, out_tks: TokenSeq) -> tuple[Modified[str], str]
    
    at: coeditor.api.EditPredictionService.__init__
        self.project = project
    
        self.model = model
    
        self.batch_args = batch_args
    
        self.encoder = encoder
    
        self.dec_args = dec_args
    
        self.config = config
    
        self.show_max_solutions = 3
    
     | 
| 
	coeditor.api/EditPredictionService.apply_edit_to_elem | 
	Modified | 
	temp-1 | 
	ec882028fee3333b952c7e20e8051e7a58402a6d | 
	Switch to TkC3Problem in retrieval model code. | 
	 <0>:<add>         new_change = new_change.map(lambda s: s.strip("\n"))
 | 
	      # module: coeditor.api
      @dataclass
      class EditPredictionService:
          def apply_edit_to_elem(
              self,
              file: Path,
              now_mod: PythonModule,
              now_elem: PythonElem,
              cursor_line: int,
              out_tks: TokenSeq,
          ) -> tuple[Modified[str], str]:
      <s>, drop_comments=True)
                      + 1
                  )
                  new_out_tks = TokenSeq()
                  for k, seg in output_ids_as_seqs(out_tks).items():
                      rel_line = extra_id_to_number(k) + lines_no_comment
                      if rel_line not in line_map:
                          messages = []
                          messages.append(
                              f"predicted relative line {rel_line} (extra_id_{extra_id_to_number(k)}) is out of range."
                          )
                          messages.append(
                              f"{n_lines = }, {lines_no_comment = }, {lines_with_comment = }"
                          )
                          messages.append(f"{line_map = }")
                          if isinstance(code_change, Modified):
                              messages.append("Prev element:")
                              messages.append(add_line_numbers(code_change.before))
                          e = ValueError("\n".join(messages))
                          raise e
                      line = line_map[rel_line]
                      k1 = get_extra_id(line - lines_with_comment)
                      new_out_tks.append(k1)
                      new_out_tks.extend(seg)
                  out_tks = new_out_tks
      
              change_tks = change_to_tokens(code_change)
              new_change = apply_output_tks_to_change(change_tks, lines_with_comment, out_tks)
    -         new_change.before = new_change.before.strip("\n")
    -         new_change.after = new_change.after.strip("\n")
 <0>          preview = self.preview_changes(new_change, lines_with_comment)
              return new_change, preview
      
       | 
	===========above chunk 0===========
    # module: coeditor.api
    @dataclass
    class EditPredictionService:
        def apply_edit_to_elem(
            self,
            file: Path,
            now_mod: PythonModule,
            now_elem: PythonElem,
            cursor_line: int,
            out_tks: TokenSeq,
        ) -> tuple[Modified[str], str]:
    # offset: -1
    <s>index_stamp(self.project, path_s),
                lambda: PythonModule.from_cst(
                    cst.parse_module(file_content_from_commit(self.project, "", path_s)),
                    mname,
                    drop_comments=False,
                ),
            )
            now_code = now_elem.code
            prev_elem = prev_mod.elems_dict.get(now_elem.path.path)
            if prev_elem is None:
                code_change = Added(now_code)
            else:
                code_change = Modified(prev_elem.code, now_code)
            lines_with_comment = (
                self.compute_offset(now_mod, now_elem, cursor_line, drop_comments=False) + 1
            )
            logging.info("Now respect lines:", lines_with_comment)
            if self.config.drop_comments:
                # map the changes to the original code locations with comments
                remover = CommentRemover(prev_mod.location_map)
                if prev_elem is not None:
                    elem1 = prev_elem.tree.visit(remover)
                    assert isinstance(elem1, cst.CSTNode)
                    line_map = remover.line_map(elem1)
                else:
                    line_map = {0: 0, 1: 1}
                n_lines = len(line_map)
                line_map[n_lines] = line_map[n_lines - 1] + 1
                lines_no_comment = (
                    self.compute_offset(now_mod, now_elem, cursor_line, drop_comments=True)
                    + 1
                )
                new_out_tks = TokenSeq()
                for k</s>
===========above chunk 1===========
    # module: coeditor.api
    @dataclass
    class EditPredictionService:
        def apply_edit_to_elem(
            self,
            file: Path,
            now_mod: PythonModule,
            now_elem: PythonElem,
            cursor_line: int,
            out_tks: TokenSeq,
        ) -> tuple[Modified[str], str]:
    # offset: -2
            mname = now_elem.path.module
            path_s = file.relative_to(self.project).as_posix()
            prev_mod = self.prev_cache.cached(
                (path_s, False),
                self.config.get_index_stamp(self.project, path_s),
                lambda: PythonModule.from_cst(
                    cst.parse</s>
===========unchanged ref 0===========
    at: coeditor.api.ChangeDetectionConfig
        drop_comments: DropComment = True
    
        get_index_stamp(project_root: Path, path_s: str)
    
    at: coeditor.api.EditPredictionService
        compute_offset(now_mod: PythonModule, now_elem: PythonElem, line: int, drop_comments: bool)
    
        preview_changes(change: Modified[str], respect_lines: int) -> str
    
    at: coeditor.api.EditPredictionService.__init__
        self.project = project
    
        self.config = config
    
        self.prev_cache = TimedCache[
                    tuple[ModuleName, DropComment], PythonModule, str
                ]()
    
    at: coeditor.common
        TokenSeq = list[Token]
    
    at: coeditor.common.TimedCache
        cached(key: T1, stamp: TStamp, f: Callable[[], T2]) -> T2
    
    at: coeditor.encoding
        get_extra_id(i: int) -> Token
    
        extra_id_to_number(tk: Token) -> int
    
        change_to_tokens(change: Change[str]) -> TokenSeq
    
    at: logging
        info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
    
    at: pathlib
        Path()
    
    at: pathlib.PurePath
        as_posix() -> str
    
        relative_to(*other: Union[str, _PathLike]) -> _P
    
    at: spot.data
        output_ids_as_seqs(output_ids: Iterable[Token]) -> dict[Token, TokenSeq]
    
    at: spot.static_analysis
        PythonElem = PythonFunction | PythonVariable
    
    
===========unchanged ref 1===========
        PythonModule(functions: list[PythonFunction], global_vars: list[PythonVariable], classes: list[PythonClass], name: ModuleName, imported_modules: set[ModuleName], defined_symbols: dict[str, ProjectPath], tree: cst.Module, location_map: dict[cst.CSTNode, CodeRange], elem2pos: dict[ElemPath, CodeRange], removed_comments: list[cst.CSTNode])
    
        CommentRemover(src_map: Mapping[cst.CSTNode, CodeRange] | None=None)
    
    at: spot.static_analysis.CommentRemover
        line_map(post_node: cst.CSTNode) -> dict[int, int]
    
    at: spot.static_analysis.ProjectPath
        module: ModuleName
    
        path: ElemPath
    
    at: spot.static_analysis.PythonFunction
        path: ProjectPath
    
    at: spot.static_analysis.PythonModule
        location_map: dict[cst.CSTNode, CodeRange]
    
        from_cst(module: cst.Module, name: str, drop_comments: bool) -> "PythonModule"
    
    at: spot.static_analysis.PythonVariable
        path: ProjectPath
    
    at: spot.utils
        add_line_numbers(code: str)
    
    at: typing.Mapping
        get(key: _KT) -> Optional[_VT_co]
        get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
    
    
===========changed ref 0===========
    # module: coeditor.dataset
  + time_limit_per_commit = 10.0
  - time_limit_per_commit = 5.0
    
===========changed ref 1===========
    # module: coeditor.retrieval_model
    @dataclass
    class BatchArgs:
        @classmethod
        def eval_default(cls) -> Self:
            return BatchArgs(
                max_total_ref_tks=512 * 32,
                max_queries=32,
  -             max_ref_dropout=0.0,
                shuffle_extra_ids=False,
            )
    
===========changed ref 2===========
    # module: coeditor.retrieval_model
    @dataclass
    class _BatchSampler:
  +     all_edits: list[TkC3Problem]
  -     all_edits: list[BasicTkQueryEdit]
        batch_args: BatchArgs
        shuffle: bool
        desc: str
        tqdm_args: dict | None = None
    
===========changed ref 3===========
    # module: coeditor.retrieval_model
    @dataclass
    class _BatchSampler:
        def estimate_n_batches(self) -> int:
  +         batches = tk_edits_to_batches(self.all_edits, self.batch_args, silent=True)
  -         batches = query_edits_to_batches(self.all_edits, self.batch_args, silent=True)
            return len(batches)
    
===========changed ref 4===========
    # module: coeditor.retrieval_model
    @dataclass
    class BatchArgs:
        max_output_tks: int = 256
        max_query_tks: int = 512
        min_queires: int = 1
        max_queries: int = 8
        max_ref_tks: int = 512
        max_total_ref_tks: int = 512 * 16
  -     max_ref_dropout: float = 1.0
        shuffle_extra_ids: bool = True
  -     use_only_modified: bool = True
     | 
| 
	coeditor.retrieval_model/BatchArgs.eval_default | 
	Modified | 
	temp-1 | 
	18bed1bae904b1d484977a957aa4477ec2128c00 | 
	Training parameter adjustment. | 
	 <0>:<add>             max_total_ref_tks=512 * 24,
 | 
	      # module: coeditor.retrieval_model
      @dataclass
      class BatchArgs:
          @classmethod
          def eval_default(cls) -> Self:
              return BatchArgs(
    -             max_total_ref_tks=512 * 32,
 <0>              max_queries=32,
                  shuffle_extra_ids=False,
              )
      
       | 
	===========unchanged ref 0===========
    at: coeditor.retrieval_model
        BatchArgs(max_output_tks: int=256, max_query_tks: int=512, min_queires: int=1, max_queries: int=8, max_ref_tks: int=512, max_total_ref_tks: int=512 * 12, shuffle_extra_ids: bool=True)
    
    at: coeditor.retrieval_model.BatchArgs
        max_output_tks: int = 256
    
        max_query_tks: int = 512
    
        min_queires: int = 1
    
        max_queries: int = 8
    
        max_ref_tks: int = 512
    
        max_total_ref_tks: int = 512 * 12
    
        shuffle_extra_ids: bool = True
    
    
===========changed ref 0===========
    # module: coeditor.retrieval_model
    @dataclass
    class BatchArgs:
        max_output_tks: int = 256
        max_query_tks: int = 512
        min_queires: int = 1
        max_queries: int = 8
        max_ref_tks: int = 512
  +     max_total_ref_tks: int = 512 * 12
  -     max_total_ref_tks: int = 512 * 16
        shuffle_extra_ids: bool = True
     | 
| 
	scripts.coeditor.train_retrieval_model/train_model | 
	Modified | 
	temp-1 | 
	18bed1bae904b1d484977a957aa4477ec2128c00 | 
	Training parameter adjustment. | 
	 <0>:<add>             warmup_bargs.max_total_ref_tks //= 3
 | 
	      # module: scripts.coeditor.train_retrieval_model
      def train_model(
          dataset_name="medium",
          model_variant="-sig-analysis-post_usees",
          encoder: C3EditEncoder = C3EditEncoder(),
          batch_args=BatchArgs.train_default(),
          test_batch_args=BatchArgs.eval_default(),
          train_args=TrainingArgs(),
          recreate_data: bool = False,
          eval_only: bool = False,
      ):
      <s>, reinit_weights=train_args.reinit_weights
              )
          else:
              model = RetrievalEditorModel.load(get_model_dir() / model_name)
      
          if os.getenv("CUDA_VISIBLE_DEVICES") is None:
              warnings.warn(
                  "CUDA_VISIBLE_DEVICES not set, using 0. Note that "
                  "the Huggingface Trainer will use all visible GPUs for training."
              )
              os.environ["CUDA_VISIBLE_DEVICES"] = "0"
      
          if not eval_only:
              with timed_action("Warm-up Training"):
                  warmup_bargs = copy.deepcopy(batch_args)
    -             warmup_bargs.max_total_ref_tks //= 4
 <0>              warmup_bargs.min_queires *= 4
                  warmup_bargs.max_queries *= 2
      
                  warmup_targs = copy.deepcopy(train_args)
                  warmup_targs.learning_rate *= 4
                  warmup_targs.max_train_epochs = 1
                  all_edits = datasets["train"].all_edits()
                  warmup_edits = random_subset(all_edits, len(all_edits) // 4)
                  model.train_on_data(
                      model_name,
                      TokenizedEditDataset.from_edits(warmup_edits),
                      datasets["valid"],
                      warmup_targs,
                      batch_args=warmup_bargs,
                      eval_batch_args=test_batch_args,
                  )
              with timed_action("F</s> | 
	===========above chunk 0===========
    # module: scripts.coeditor.train_retrieval_model
    def train_model(
        dataset_name="medium",
        model_variant="-sig-analysis-post_usees",
        encoder: C3EditEncoder = C3EditEncoder(),
        batch_args=BatchArgs.train_default(),
        test_batch_args=BatchArgs.eval_default(),
        train_args=TrainingArgs(),
        recreate_data: bool = False,
        eval_only: bool = False,
    ):
    # offset: -1
        # model_variant = "-file"
        model_name = f"coeditor-{dataset_name}"
        model_name += model_variant
    
        dec_args = DecodingArgs()
        if train_args.quicktest:
            model_name = "quicktest-" + model_name
    
        if not eval_only:
            check_save_dir(model_name)
    
        datasets = make_or_load_datasets(dataset_name, encoder, recreate_data=recreate_data)
    
        config_dict = {
            k: get_modified_args(v)
            for k, v in {
                "data_args": batch_args,
                "train_args": train_args,
                "dec_args": dec_args,
            }.items()
        }
    
        project = "Coeditor" if not train_args.quicktest else "Coeditor-quicktest"
        wandb.init(dir="..", project=project, name=model_name, config=config_dict)
    
        if train_args.quicktest:
            print("Using fewer data for quick test.")
            n_quick_exs = 20
            for name, dataset in datasets.items():
                datasets[name] = TokenizedEditDataset.from_edits(
                    dataset.all_edits()[:n_quick_exs]
                )
    
        if not eval_only:
            model = RetrievalEditorModel.from_code_t5(
                "base", reuse_embed=True, reinit_weights=train_args.reinit_weights
            )
        else:
            model = RetrievalEditorModel.</s>
===========below chunk 0===========
    # module: scripts.coeditor.train_retrieval_model
    def train_model(
        dataset_name="medium",
        model_variant="-sig-analysis-post_usees",
        encoder: C3EditEncoder = C3EditEncoder(),
        batch_args=BatchArgs.train_default(),
        test_batch_args=BatchArgs.eval_default(),
        train_args=TrainingArgs(),
        recreate_data: bool = False,
        eval_only: bool = False,
    ):
    # offset: 1
    <s>up_bargs,
                    eval_batch_args=test_batch_args,
                )
            with timed_action("Fine-tune Training"):
                model.train_on_data(
                    model_name,
                    datasets["train"],
                    datasets["valid"],
                    train_args,
                    batch_args=batch_args,
                    eval_batch_args=test_batch_args,
                )
    
        model.to("cuda")
        with timed_action("Loss Evaluation"):
            eval_result = model.eval_loss_on_data(datasets["test"], test_batch_args)
            eval_dict = {f"test/{k}": v.average() for k, v in eval_result.items()}
            wandb.log(eval_dict)
    
        max_saved_samples = 300
    
        with timed_action("Accuracy Evaluation"):
            dec_result = model.predict_on_data(datasets["test"], test_batch_args, dec_args)
            pickle_dump(get_model_dir() / model_name / "dec_result.pkl", dec_result)
            exact_acc, exact_correct_map = dec_result.exact_match_accuracy()
            wandb.log({"test/exact-acc": exact_acc.average()})
    
            out_dir = get_model_dir() / model_name / "exact_match_samples"
            dec_result.save_examples_to_dir(
                out_dir, random_subset(exact_correct_map, max_saved_samples)
            )
            print("Exact-match samples saved</s>
===========below chunk 1===========
    # module: scripts.coeditor.train_retrieval_model
    def train_model(
        dataset_name="medium",
        model_variant="-sig-analysis-post_usees",
        encoder: C3EditEncoder = C3EditEncoder(),
        batch_args=BatchArgs.train_default(),
        test_batch_args=BatchArgs.eval_default(),
        train_args=TrainingArgs(),
        recreate_data: bool = False,
        eval_only: bool = False,
    ):
    # offset: 2
    <s>dir, random_subset(exact_correct_map, max_saved_samples)
            )
            print("Exact-match samples saved to:", out_dir)
    
        return model
    
    
===========unchanged ref 0===========
    at: _warnings
        warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
        warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
    
    at: coeditor._utils
        timed_action(name: str, silent: bool=False)
    
        pickle_dump(file: Path, obj: Any)
    
        get_modified_args(instance, flatten: bool=False) -> dict[str, Any] | None
    
    at: coeditor.common
        get_model_dir(trained=True) -> Path
    
        random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2]
        random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1]
    
    at: coeditor.dataset
        TokenizedEditDataset(_edits: list[TEdit])
    
    at: coeditor.dataset.TokenizedEditDataset
        _edits: list[TEdit]
    
        from_edits(edits: Iterable[TEdit]) -> "TokenizedEditDataset[TEdit]"
    
    at: coeditor.model
        DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1)
    
    at: copy
        deepcopy(x: _T, memo: Optional[Dict[int, Any]]=..., _nil: Any=...) -> _T
    
    at: os
        environ = _createenviron()
    
        getenv(key: str, default: _T) -> Union[str, _T]
        getenv(key: str) -> Optional[str]
    
    at: train_model
        check_save_dir(model_name: str) -> None
    
    at: wandb
        init = wandb_sdk.init
    
    
===========unchanged ref 1===========
        log = _preinit.PreInitCallable("wandb.log", wandb_sdk.wandb_run.Run.log)
    
    
===========changed ref 0===========
  + # module: tests.coeditor.testcases.example
  + @dataclass
  + class Weight:
  +     value: float
  + 
===========changed ref 1===========
  + # module: tests.coeditor.testcases.example
  + @dataclass
  + class Foo:
  +     value: float
  + 
===========changed ref 2===========
  + # module: tests.coeditor.testcases.example
  + @dataclass
  + class Foo:
  +     def bar(self):
  +         return Weight(self.value)
  + 
===========changed ref 3===========
  + # module: tests.coeditor.testcases.example
  + @dataclass
  + class Weight:
  +     def go(self, other):
  +         return self.value + other.value
  + 
===========changed ref 4===========
  + # module: tests.coeditor.testcases.example
  + foo(Foo(1), Foo(2))
  + 
===========changed ref 5===========
  + # module: tests.coeditor.testcases.example
  + def foo(x, y):
  +     return x.bar().go(y.bar())
  +  | 
| 
	coeditor.ctx_change_encoder/C3ProblemTokenizer.tokenize_problem | 
	Modified | 
	temp-1 | 
	f0a4cab073846109c9b3083f25a99bd2bde88b3a | 
	Make C3Problem more suitable for serialization. | 
	 <0>:<add>                 path=span.headers[-1].path,
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class C3ProblemTokenizer:
          def tokenize_problem(
              self,
              problem: C3Problem,
          ) -> Sequence[TkC3Problem]:
      <s>chunk_overlap,
                      )
                  above_chunks = [
                      (f"above chunk {i}", chunk) for i, chunk in enumerate(above_chunks)
                  ]
                  below_chunks = [
                      (f"below chunk {i}", chunk) for i, chunk in enumerate(below_chunks)
                  ]
                  all_refs = above_chunks + below_chunks + named_references
                  size_sum = 0
                  kept_refs = list[tuple[str, TokenSeq]]()
                  for (name, chunk) in all_refs:
                      if size_sum + len(chunk) <= self.max_total_ref_tks:
                          size_sum += len(chunk)
                          kept_refs.append((name, chunk))
      
                  return TkC3Problem(
                      scope_tks + chunk_input,
                      chunk_output,
    -                 path=span.parent_scopes[-1].earlier().path,
 <0>                  change_type=span.change.map(lambda _: None),
                      named_references=kept_refs,
                      src_info=problem.src_info,
                  )
      
              problems = list[TkC3Problem]()
              for l in range(len(tk_delta.deltas) + 1):
                  finished = l == len(tk_delta.deltas)
                  input_growth = len(origin_lines[l]) + 2 if l < len(origin_lines) else 1
                  if (
                      finished
                      or chunk_lines >= self.max_lines_to_edit
                      or len(chunk_input) + input_growth > input_limit
                  ):
                      if has_change(chunk_output):
                          problems.append(get_problem(chunk_input, chunk_output))
                          if len(problems) >= self.max_chunks_per_elem:
                              break
      
                      if finished:
                          break
      
                      chunk_main_input = join_list</s> | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
        def tokenize_problem(
            self,
            problem: C3Problem,
        ) -> Sequence[TkC3Problem]:
    # offset: -1
    <s> get_problem(chunk_input, chunk_output):
                # try move some prev_change_tks into the input
                above_tks = prev_change_tks
                below_tks = join_list(origin_lines[l:], Newline_id)
                chunk_input, above_tks, below_tks = self._inline_some_context(
                    chunk_input, above_tks, below_tks, input_limit
                )
    
                # limit the input size if it's too long (can happen for later chunks)
                chunk_input = truncate_section(chunk_input, TruncateAt.Right, input_limit)
                chunk_output = truncate_output_tks(chunk_input, chunk_output)
                chunk_output = truncate_section(
                    chunk_output, TruncateAt.Right, self.max_output_tks, add_bos=False
                )
    
                above_chunks = break_into_chunks(
                    above_tks,
  +                 lambda i: self._encode_headers(span.headers, -1 - i),
  -                 lambda i: self._encode_parent_scopes(span.parent_scopes, -1 - i),
                    chunk_size=self.max_ref_tks,
                    overlap=self.ref_chunk_overlap,
                    right_to_left=True,
                )
                if finished:
                    below_chunks = []
                else:
                    below_chunks = break_into_chunks(
                        below_tks,
  +                     lambda i: self._encode_headers(span.headers, i + 1),
  -                     lambda i: self._encode_parent_scopes(span.parent_scopes, i + 1),
                        chunk_size=self.max_ref_tks,
                        overlap=self.ref_chunk_overlap,
                    )
                above_chunks = [
                    (f"above chunk {i}", chunk) for i,</s>
===========above chunk 1===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
        def tokenize_problem(
            self,
            problem: C3Problem,
        ) -> Sequence[TkC3Problem]:
    # offset: -2
            span = problem.span
            named_references = list[tuple[str, TokenSeq]]()
            # compute the references that are relevant to this span
            relevant_chunks = self._group_encode_changed_refs(problem.relevant_changes)
            for i, chunk in enumerate(relevant_chunks):
                named_references.append((f"changed ref {i}", chunk))
            relevant_chunks = self._group_encode_unchanged_refs(problem.relevant_unchanged)
            for i, chunk in enumerate(relevant_chunks):
                named_references.append((f"unchanged ref {i}", chunk))
    
            diffs = change_to_line_diffs(span.change)
            original, delta = line_diffs_to_original_delta(diffs)
            origin_lines = split_list(encode_basic(original), Newline_id)
            tk_delta = delta.to_tk_delta()
            chunk_id = 0
            chunk_start_l = 0
  +         scope_tks = self._encode_headers(span.headers, 0)
  -         scope_tks = self._encode_parent_scopes(span.parent_scopes, 0)
            chunk_input = TokenSeq()
            input_limit = self.max_query_tks - len(scope_tks)
            chunk_lines = 0
            chunk_output = TokenSeq()
            prev_change_tks = TokenSeq()
    
            def get_problem(chunk_input, chunk_output):
                # try move some prev_change_tks into the input
                above</s>
===========below chunk 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
        def tokenize_problem(
            self,
            problem: C3Problem,
        ) -> Sequence[TkC3Problem]:
    # offset: 1
    <s>chunks_per_elem:
                            break
    
                    if finished:
                        break
    
                    chunk_main_input = join_list(origin_lines[chunk_start_l:l], Newline_id)
                    chunk_main_delta = tk_delta.for_input_range((chunk_start_l, l))
                    chunk_main_change = chunk_main_delta.to_change_tks(chunk_main_input)
                    prev_change_tks.extend(chunk_main_change)
                    prev_change_tks.append(Newline_id)
                    chunk_id += 1
                    chunk_input = TokenSeq()
                    chunk_lines = 0
                    chunk_output = TokenSeq()
                    chunk_start_l = l
    
                chunk_input.append(get_extra_id(chunk_lines))
                if l < len(origin_lines):
                    chunk_input.extend(origin_lines[l])
                    chunk_input.append(Newline_id)
                line_change = join_list(tk_delta.deltas[l], Newline_id)
                chunk_output.append(get_extra_id(chunk_lines))
                chunk_output.extend(line_change)
                if line_change and line_change[-1] != Del_id:
                    chunk_output.append(Newline_id)
                chunk_lines += 1
            return problems
    
    
===========unchanged ref 0===========
    at: cachetools.lru
        LRUCache(maxsize: int, getsizeof: Optional[Callable[[_VT], int]]=...)
    
    at: coeditor.common
        TokenSeq = list[Token]
    
        split_list(lst: list[T1], sep: T1) -> list[list[T1]]
    
        join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
    
    at: coeditor.ctx_change_encoder
        C3Problem(span: ChangedCodeSpan, relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], src_info: dict[str, Any])
    
        TkC3Problem(input_tks: TokenSeq, output_tks: TokenSeq, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TokenSeq]], src_info: dict[str, Any])
    
    at: coeditor.ctx_change_encoder.C3Problem
        span: ChangedCodeSpan
    
        relevant_changes: Sequence[ChangedCodeSpan]
    
        relevant_unchanged: Sequence[ChangedCodeSpan]
    
        src_info: dict[str, Any]
    
    at: coeditor.ctx_change_encoder.C3ProblemTokenizer
        _encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq
        _encode_headers(self, scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq
    
        _inline_some_context(self, input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq]
        _inline_some_context(input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq]
    
        _group_encode_unchanged_refs(elems: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq]
    
        _group_encode_changed_refs(changes: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq]
    
     | 
| 
	coeditor.ctx_change_encoder/C3ProblemTokenizer._encode_change | 
	Modified | 
	temp-1 | 
	f0a4cab073846109c9b3083f25a99bd2bde88b3a | 
	Make C3Problem more suitable for serialization. | 
	 <0>:<add>             self._change_cache[change] = change_tks
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class C3ProblemTokenizer:
          def _encode_change(self, change: Change[str]) -> TokenSeq:
    -         if (key := _ObjId(id(change))) in self._id_cache:
    -             return self._id_cache[key]
    -         change = change.map(lambda s: s.strip("\n"))
    +         if (change_tks := self._change_cache.get(change)) is None:
    +             change_tks = change_to_tokens(change)
    -         change_tks = change_to_tokens(change)
    -         self._id_cache[key] = change_tks
 <0>          return change_tks
      
       | 
	===========unchanged ref 0===========
    at: coeditor.common
        TokenSeq = list[Token]
    
    at: coeditor.ctx_change_encoder
        ChangedHeader(change: Change[str], type: str, line_range: LineRange, path: ProjectPath)
    
    at: coeditor.ctx_change_encoder.C3ProblemTokenizer
        max_scope_tks: int = 128
    
    at: coeditor.ctx_change_encoder.C3ProblemTokenizer._encode_header_change
        hchange = ch.change.map(lambda s: s.strip("\n"))
    
        tks = truncate_section(
                    change_to_tokens(hchange), TruncateAt.Left, self.max_scope_tks
                )
    
    at: coeditor.encoding
        change_to_tokens(change: Change[str]) -> TokenSeq
    
        TruncateAt()
    
    at: coeditor.encoding.TruncateAt
        Left = 0
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
  + @dataclass(frozen=True)
  + class ChangedHeader:
  +     """Represents the changes made to a header.
  +     This format does not store parent syntax nodes and is more suitable for serialization.
  +     """
  + 
  +     change: Change[str]
  +     # below are pre-edit attributes
  +     type: str
  +     line_range: LineRange
  +     path: ProjectPath
  + 
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
  +     def _encode_header_change(self, ch: ChangedHeader) -> TokenSeq:
  +         hchange = ch.change.map(lambda s: s.strip("\n"))
  +         tks = truncate_section(
  +             change_to_tokens(hchange), TruncateAt.Left, self.max_scope_tks
  +         )
  +         return tks
  + 
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
  -     def _encode_scope_change(self, c: Change[ChangeScope]) -> TokenSeq:
  -         if (key := _ObjId(id(c))) in self._scope_cache:
  -             return self._scope_cache[key]
  -         hchange = c.map(lambda s: s.header_code.strip("\n"))
  -         tks = truncate_section(
  -             change_to_tokens(hchange), TruncateAt.Left, self.max_scope_tks
  -         )
  -         self._scope_cache[key] = tks
  -         return tks
  - 
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
  -     def _encode_parent_scopes(
  -         self, scope_changes: Sequence[Change[ChangeScope]], offset: int
  -     ) -> TokenSeq:
  -         scope_tks = join_list(
  -             (self._encode_scope_change(c) for c in scope_changes), Newline_id
  -         )
  -         if offset != 0:
  -             scope_tks.extend(encode_basic(f"\n# offset: {offset}\n"))
  -         else:
  -             scope_tks.append(Newline_id)
  -         scope_tks = truncate_section(scope_tks, TruncateAt.Left, self.max_scope_tks)
  -         return scope_tks
  - 
===========changed ref 4===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
        def __post_init__(self):
  -         self._id_cache = FIFOCache[_ObjId, TokenSeq](maxsize=1000)
  +         self._change_cache = LRUCache[Change[str], TokenSeq](maxsize=5000)
  -         self._scope_cache = FIFOCache[_ObjId, TokenSeq](maxsize=1000)
  -         self._value_cache = FIFOCache[Any, Sequence[TokenSeq]](maxsize=1000)
    
===========changed ref 5===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     VERSION = "2.0"
  -     VERSION = "1.2"
  +     # change spans with more than this many lines will be ignored
  +     max_span_lines: int = 500
    
===========changed ref 6===========
    # module: coeditor.ctx_change_encoder
  + @dataclass(frozen=True)
  + class ChangedHeader:
  +     def __repr__(self) -> str:
  +         return (
  +             f"ChangedHeader(path={self.path}, range={self.line_range}, "
  +             f"change={self.change})"
  +         )
  + 
===========changed ref 7===========
    # module: coeditor.ctx_change_encoder
    @dataclass(frozen=True)
    class PyDefinition:
  -     @staticmethod
  -     def from_scope(scope: ChangeScope) -> "PyDefinition":
  -         path = scope.path
  -         full_name = PyFullName(f"{path.module}.{path.path}")
  -         start_pos = scope.header_line_range[0], 0
  -         end_pos = scope.header_line_range[1], 0
  -         return PyDefinition(full_name, start_pos, end_pos)
  - 
===========changed ref 8===========
    # module: coeditor.ctx_change_encoder
  + @dataclass(frozen=True)
  + class ChangedCodeSpan:
  +     """Represents the changes made to a span of code.
  +     This format does not store parent syntax nodes and is more suitable for serialization.
  +     """
  + 
  +     headers: Sequence[ChangedHeader]
  +     change: Change[str]
  +     # below are pre-edit attributes
  +     line_range: LineRange
  +     module: ModuleName
  + 
===========changed ref 9===========
    # module: coeditor.ctx_change_encoder
  + @dataclass(frozen=True)
  - @dataclass
    class C3Problem:
        "Contextual code change prediction problem."
  +     span: ChangedCodeSpan
  -     span: ChangedSpan
        # most relevant to least relevant
  +     relevant_changes: Sequence[ChangedCodeSpan]
  -     relevant_changes: list[ChangedSpan]
        # most relevant to least relevant
  +     relevant_unchanged: Sequence[ChangedCodeSpan]
  -     relevant_unchanged: list[ChangedSpan]
        # some optional information about how the problem was generated
        src_info: dict[str, Any]
    
===========changed ref 10===========
    # module: coeditor.history
    @dataclass(frozen=True)
    class Modified(_ChangeBase[E1]):
  +     def __repr__(self):
  +         if self.before == self.after:
  +             return f"Modified(before=after={repr(self.before)})"
  +         else:
  +             return f"Modified(before={repr(self.before)}, after={repr(self.after)})"
  + 
===========changed ref 11===========
    <s>.code_change
    def edits_from_commit_history(
        project_dir: Path,
        history: Sequence[CommitInfo],
        tempdir: Path,
        change_processor: ProjectChangeProcessor[TProb] = NoProcessing(),
  -     edit_encoder: Callable[[TProb], Iterable[TEnc]] = lambda x: [x],
        ignore_dirs=DefaultIgnoreDirs,
        silent: bool = False,
        time_limit: float | None = None,
  + ) -> Sequence[TProb]:
  - ) -> Sequence[TEnc]:
        """Incrementally compute the edits to a project from the git history.
        Note that this will change the file states in the project directory, so
        you should make a copy of the project before calling this function.
        """
        tempdir = tempdir.resolve()
        if tempdir.exists():
            raise FileExistsError(f"Workdir '{tempdir}' already exists.")
        use_fast_parser = jedi.settings.fast_parser
        tempdir.mkdir(parents=True, exist_ok=False)
        try:
            run_command(
                ["cp", "-r", str(project_dir / ".git"), str(tempdir)],
                cwd=project_dir.parent,
            )
    
            return _edits_from_commit_history(
                tempdir,
                history,
                change_processor,
  -             edit_encoder,
                ignore_dirs,
                silent,
                time_limit=time_limit,
            )
        finally:
            shutil.rmtree(tempdir)
            jedi.settings.fast_parser = use_fast_parser
     | 
| 
	coeditor.ctx_change_encoder/C3ProblemTokenizer._group_encode_changed_refs | 
	Modified | 
	temp-1 | 
	f0a4cab073846109c9b3083f25a99bd2bde88b3a | 
	Make C3Problem more suitable for serialization. | 
	 <0>:<add>                 lambda i: self._encode_headers(mod_change, i),
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class C3ProblemTokenizer:
          def _group_encode_changed_refs(
    +         self, changes: Sequence[ChangedCodeSpan]
    -         self, changes: Sequence[ChangedSpan]
          ) -> Sequence[TokenSeq]:
      <s> header_diff = list[ChangedHeader]()
    -                 scope_diff = []
    +                 for i, h in enumerate(c.headers):
    -                 for i, s in enumerate(c.parent_scopes):
    -                     if (
    -                         i >= len(last_scope)
    -                         or s.earlier().path != last_scope[i].earlier().path
    -                     ):
    +                     if i >= len(last_scope) or h.path != last_scope[i].path:
    +                         header_diff.append(h)
    -                         scope_diff.append(s)
    +                 if header_diff:
    -                 if scope_diff:
    +                     header_tks = self._encode_headers(header_diff, 0)
    -                     header_tks = self._encode_parent_scopes(scope_diff, 0)
                          file_tks.extend(header_tks)
    +                 body_tks = self._encode_change(c.change.map(lambda c: c.strip("\n")))
    -                 body_tks = self._encode_change(c.change)
                      file_tks.extend(body_tks)
                      file_tks.append(Newline_id)
                      file_tks.append(Newline_id)
    +                 last_scope = c.headers
    -                 last_scope = c.parent_scopes
      
    +             mod_change = change_group[0].headers[:1]
    -             mod_change = change_group[0].parent_scopes[:1]
                  mod_chunks = break_into_chunks(
                      file_tks,
    -                 lambda i: self._encode_parent_scopes(mod_change, i),
 <0>                  self.max_ref_tks,
                      overlap=self.ref_chunk_overlap,
                  )
                  all_chunks.extend(mod_chunks)
              return all_chunks
      
       | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
        def _group_encode_changed_refs(
  +         self, changes: Sequence[ChangedCodeSpan]
  -         self, changes: Sequence[ChangedSpan]
        ) -> Sequence[TokenSeq]:
    # offset: -1
  +         module2changes = groupby(changes, lambda c: c.module)
  -         module2changes = groupby(changes, lambda c: c.path.module)
            all_chunks = list[TokenSeq]()
            for change_group in module2changes.values():
                change_group.sort(key=lambda c: c.line_range[0])
                file_tks = TokenSeq()
                # we'll add module as the chunk header, so we start within the module
  +             last_scope = change_group[0].headers[:1]
  -             last_scope = change_group[0].parent_scopes[:1]
                for c in change_group:
  +                 header_diff = list[ChangedHeader]()
  -                 scope_diff = []
  +                 for i, h in enumerate(c.</s>
===========unchanged ref 0===========
    at: coeditor.common
        TokenSeq = list[Token]
    
    at: coeditor.ctx_change_encoder.C3ProblemTokenizer
        max_scope_tks: int = 128
    
        ref_chunk_overlap: int = 32
    
    at: coeditor.encoding
        Newline_id = get_tk_id("\n")
    
        TruncateAt()
    
        truncate_section(sec: TokenSeq, direction: TruncateAt.Value, limit: int, add_bos: bool=True, inplace: bool=False) -> TokenSeq
    
        truncate_sections(total_limit: int, *sections: tuple[TokenSeq, TruncateAt.Value], add_bos: bool, inplace: bool=False) -> tuple[TokenSeq, ...]
    
    at: coeditor.encoding.TruncateAt
        Left = 0
    
        Right = 1
    
    
===========changed ref 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
  +     def _encode_header_change(self, ch: ChangedHeader) -> TokenSeq:
  +         hchange = ch.change.map(lambda s: s.strip("\n"))
  +         tks = truncate_section(
  +             change_to_tokens(hchange), TruncateAt.Left, self.max_scope_tks
  +         )
  +         return tks
  + 
===========changed ref 1===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
  +     def _encode_headers(
  +         self, scope_changes: Sequence[ChangedHeader], offset: int
  +     ) -> TokenSeq:
  +         scope_tks = join_list(
  +             (self._encode_header_change(c) for c in scope_changes), Newline_id
  +         )
  +         if offset != 0:
  +             scope_tks.extend(encode_basic(f"\n# offset: {offset}\n"))
  +         else:
  +             scope_tks.append(Newline_id)
  +         scope_tks = truncate_section(scope_tks, TruncateAt.Left, self.max_scope_tks)
  +         return scope_tks
  + 
===========changed ref 2===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
        def _encode_change(self, change: Change[str]) -> TokenSeq:
  -         if (key := _ObjId(id(change))) in self._id_cache:
  -             return self._id_cache[key]
  -         change = change.map(lambda s: s.strip("\n"))
  +         if (change_tks := self._change_cache.get(change)) is None:
  +             change_tks = change_to_tokens(change)
  -         change_tks = change_to_tokens(change)
  +             self._change_cache[change] = change_tks
  -         self._id_cache[key] = change_tks
            return change_tks
    
===========changed ref 3===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
  -     def _encode_scope_change(self, c: Change[ChangeScope]) -> TokenSeq:
  -         if (key := _ObjId(id(c))) in self._scope_cache:
  -             return self._scope_cache[key]
  -         hchange = c.map(lambda s: s.header_code.strip("\n"))
  -         tks = truncate_section(
  -             change_to_tokens(hchange), TruncateAt.Left, self.max_scope_tks
  -         )
  -         self._scope_cache[key] = tks
  -         return tks
  - 
===========changed ref 4===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
  -     def _encode_parent_scopes(
  -         self, scope_changes: Sequence[Change[ChangeScope]], offset: int
  -     ) -> TokenSeq:
  -         scope_tks = join_list(
  -             (self._encode_scope_change(c) for c in scope_changes), Newline_id
  -         )
  -         if offset != 0:
  -             scope_tks.extend(encode_basic(f"\n# offset: {offset}\n"))
  -         else:
  -             scope_tks.append(Newline_id)
  -         scope_tks = truncate_section(scope_tks, TruncateAt.Left, self.max_scope_tks)
  -         return scope_tks
  - 
===========changed ref 5===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
        def __post_init__(self):
  -         self._id_cache = FIFOCache[_ObjId, TokenSeq](maxsize=1000)
  +         self._change_cache = LRUCache[Change[str], TokenSeq](maxsize=5000)
  -         self._scope_cache = FIFOCache[_ObjId, TokenSeq](maxsize=1000)
  -         self._value_cache = FIFOCache[Any, Sequence[TokenSeq]](maxsize=1000)
    
===========changed ref 6===========
    # module: coeditor.ctx_change_encoder
    class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
  +     VERSION = "2.0"
  -     VERSION = "1.2"
  +     # change spans with more than this many lines will be ignored
  +     max_span_lines: int = 500
    
===========changed ref 7===========
    # module: coeditor.ctx_change_encoder
  + @dataclass(frozen=True)
  + class ChangedHeader:
  +     def __repr__(self) -> str:
  +         return (
  +             f"ChangedHeader(path={self.path}, range={self.line_range}, "
  +             f"change={self.change})"
  +         )
  + 
===========changed ref 8===========
    # module: coeditor.ctx_change_encoder
    @dataclass(frozen=True)
    class PyDefinition:
  -     @staticmethod
  -     def from_scope(scope: ChangeScope) -> "PyDefinition":
  -         path = scope.path
  -         full_name = PyFullName(f"{path.module}.{path.path}")
  -         start_pos = scope.header_line_range[0], 0
  -         end_pos = scope.header_line_range[1], 0
  -         return PyDefinition(full_name, start_pos, end_pos)
  - 
===========changed ref 9===========
    # module: coeditor.ctx_change_encoder
  + @dataclass(frozen=True)
  + class ChangedCodeSpan:
  +     """Represents the changes made to a span of code.
  +     This format does not store parent syntax nodes and is more suitable for serialization.
  +     """
  + 
  +     headers: Sequence[ChangedHeader]
  +     change: Change[str]
  +     # below are pre-edit attributes
  +     line_range: LineRange
  +     module: ModuleName
  + 
===========changed ref 10===========
    # module: coeditor.ctx_change_encoder
  + @dataclass(frozen=True)
  + class ChangedHeader:
  +     """Represents the changes made to a header.
  +     This format does not store parent syntax nodes and is more suitable for serialization.
  +     """
  + 
  +     change: Change[str]
  +     # below are pre-edit attributes
  +     type: str
  +     line_range: LineRange
  +     path: ProjectPath
  +  | 
| 
	spot.utils/pmap | 
	Modified | 
	temp-1 | 
	2e823493b40c36c3f449ba3571e2fa26454e3493 | 
	Update datasets creation pipeline. | 
	 <0>:<add>         desc = "pmap: " + f.__name__
 | 
	      # module: spot.utils
      def pmap(
          f: Callable[..., T1],
          *f_args: Any,
    +     desc: str | None = None,
    -     desc: str = "parallel map",
          key_args: Mapping[str, Any] | None = None,
          max_workers: int | None = None,
          chunksize: int | None = None,
          tqdm_args: Mapping[str, Any] | None = None,
      ) -> list[T1]:
          """
          Parallel map with progress displaying.
          """
          n = len(f_args[0])
          assert_eq(n, *(len(xs) for xs in f_args))
      
          tqdm_args = dict(tqdm_args) if tqdm_args else {}
          tqdm_args.setdefault("smoothing", 0.0)
    + 
    +     if desc is None:
 <0>  
          if key_args is None:
              key_args = {}
      
          if max_workers is None:
              max_workers = DefaultWorkers
          if max_workers <= 1:
              outs = list[T1]()
              for i in tqdm(range(n), desc=desc, **tqdm_args):
                  outs.append(f(*(a[i] for a in f_args), **key_args))
              return outs
      
          if chunksize is None:
              chunksize = max(1, n // (50 * max_workers))
      
          tag_f = _TaggedFunc(f, key_args)
          arg_tuples = zip(range(n), *f_args)
      
          with (
              multiprocessing.Pool(max_workers) as pool,
              tqdm(total=n, desc=desc, **tqdm_args) as pbar,
          ):
              results = dict[int, T1]()
              for i, r in pool.imap_unordered(tag_f, arg_tuples, chunksize=chunksize):
                  results[i] = r
                  pbar.update()
          return [results[i] for i in range(n)]
      
       | 
	===========unchanged ref 0===========
    at: multiprocessing
        Pool(processes: Optional[int]=..., initializer: Optional[Callable[..., Any]]=..., initargs: Iterable[Any]=..., maxtasksperchild: Optional[int]=...) -> pool.Pool
    
    at: spot.utils
        T1 = TypeVar("T1")
    
        DefaultWorkers: int = multiprocessing.cpu_count() // 2
        global DefaultWorkers
    
        _TaggedFunc(f: Callable[..., T1], key_args: Mapping[str, Any])
    
        assert_eq(x: T1, *xs: T1, extra_message: Callable[[], str]=lambda: "") -> None
    
    at: tqdm.std
        tqdm(iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, write_bytes=False, lock_args=None, nrows=None, colour=None, delay=0, gui=False, **kwargs)
    
    at: typing
        Callable = _CallableType(collections.abc.Callable, 2)
    
        Mapping = _alias(collections.abc.Mapping, 2)
    
     | 
| 
	coeditor.dataset/TokenizedEditDataset.__repr__ | 
	Modified | 
	temp-1 | 
	2e823493b40c36c3f449ba3571e2fa26454e3493 | 
	Update datasets creation pipeline. | 
	 <0>:<add>         return f"TokenizedEditDataset(n_edits={n_edits})"
 | 
	      # module: coeditor.dataset
      @dataclass
      class TokenizedEditDataset(Generic[TEdit]):
          def __repr__(self) -> str:
    -         n_projects = len(self.project2edits)
    -         n_edits = sum(len(edits) for edits in self.project2edits.values())
    +         n_edits = len(self.all_edits())
    -         return f"TokenizedEditDataset(n_projects={n_projects}, n_edits={n_edits})"
 <0>  
       | 
	===========unchanged ref 0===========
    at: coeditor.dataset.TokenizedEditDataset
        _edits: list[TEdit]
    
        all_edits() -> list[TEdit]
        all_edits(self) -> list[TEdit]
    
    
===========changed ref 0===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
  +     _edits: list[TEdit]
  -     project2edits: dict[Path, list[TEdit]]
    
===========changed ref 1===========
    # module: spot.utils
    def pmap(
        f: Callable[..., T1],
        *f_args: Any,
  +     desc: str | None = None,
  -     desc: str = "parallel map",
        key_args: Mapping[str, Any] | None = None,
        max_workers: int | None = None,
        chunksize: int | None = None,
        tqdm_args: Mapping[str, Any] | None = None,
    ) -> list[T1]:
        """
        Parallel map with progress displaying.
        """
        n = len(f_args[0])
        assert_eq(n, *(len(xs) for xs in f_args))
    
        tqdm_args = dict(tqdm_args) if tqdm_args else {}
        tqdm_args.setdefault("smoothing", 0.0)
  + 
  +     if desc is None:
  +         desc = "pmap: " + f.__name__
    
        if key_args is None:
            key_args = {}
    
        if max_workers is None:
            max_workers = DefaultWorkers
        if max_workers <= 1:
            outs = list[T1]()
            for i in tqdm(range(n), desc=desc, **tqdm_args):
                outs.append(f(*(a[i] for a in f_args), **key_args))
            return outs
    
        if chunksize is None:
            chunksize = max(1, n // (50 * max_workers))
    
        tag_f = _TaggedFunc(f, key_args)
        arg_tuples = zip(range(n), *f_args)
    
        with (
            multiprocessing.Pool(max_workers) as pool,
            tqdm(total=n, desc=desc, **tqdm_args) as pbar,
        ):
            results = dict[int, T1]()
            for i, r in pool.imap_unordered(tag_f, arg_tuples, chunksize=chunksize):
                results[i] = r
                pbar.update()
        return [results[i] for i in range(n)]
     | 
| 
	coeditor.dataset/TokenizedEditDataset.all_edits | 
	Modified | 
	temp-1 | 
	2e823493b40c36c3f449ba3571e2fa26454e3493 | 
	Update datasets creation pipeline. | 
	 <0>:<add>         return self._edits
 | 
	      # module: coeditor.dataset
      @dataclass
      class TokenizedEditDataset(Generic[TEdit]):
          def all_edits(self) -> list[TEdit]:
    -         return join_list(self.project2edits.values())
 <0>  
       | 
	===========unchanged ref 0===========
    at: dataclasses
        field(*, init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> Any
        field(*, default_factory: Callable[[], _T], init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> _T
        field(*, default: _T, init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> _T
    
    
===========changed ref 0===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
  -     def subset(self, repos: Iterable[Path]) -> "TokenizedEditDataset":
  -         return TokenizedEditDataset({repo: self.project2edits[repo] for repo in repos})
  - 
===========changed ref 1===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
  +     _edits: list[TEdit]
  -     project2edits: dict[Path, list[TEdit]]
    
===========changed ref 2===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
  -     def map(self, f: Callable[[TEdit], TEdit]) -> "TokenizedEditDataset[TEdit]":
  -         repos = tqdm(self.project2edits.items(), desc="transforming dataset")
  -         return TokenizedEditDataset(
  -             {repo: [f(e) for e in edits] for repo, edits in repos}
  -         )
  - 
===========changed ref 3===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
        def __repr__(self) -> str:
  -         n_projects = len(self.project2edits)
  -         n_edits = sum(len(edits) for edits in self.project2edits.values())
  +         n_edits = len(self.all_edits())
  +         return f"TokenizedEditDataset(n_edits={n_edits})"
  -         return f"TokenizedEditDataset(n_projects={n_projects}, n_edits={n_edits})"
    
===========changed ref 4===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
        def overall_stats(self) -> dict:
            all_edits = self.all_edits()
            n_added = sum(isinstance(e.change_type, Added) for e in all_edits)
            basic_stats = {
  -             "n_projects": len(self.project2edits),
                "n_edits": len(all_edits),
                "n_additions": n_added,
            }
            extra_stats = dict[str, list]()
            for e in all_edits:
                for k, v in e.stats().items():
                    if k in extra_stats:
                        extra_stats[k].append(v)
                    else:
                        extra_stats[k] = [v]
            return basic_stats | {k: scalar_stats(v) for k, v in extra_stats.items()}
    
===========changed ref 5===========
    # module: spot.utils
    def pmap(
        f: Callable[..., T1],
        *f_args: Any,
  +     desc: str | None = None,
  -     desc: str = "parallel map",
        key_args: Mapping[str, Any] | None = None,
        max_workers: int | None = None,
        chunksize: int | None = None,
        tqdm_args: Mapping[str, Any] | None = None,
    ) -> list[T1]:
        """
        Parallel map with progress displaying.
        """
        n = len(f_args[0])
        assert_eq(n, *(len(xs) for xs in f_args))
    
        tqdm_args = dict(tqdm_args) if tqdm_args else {}
        tqdm_args.setdefault("smoothing", 0.0)
  + 
  +     if desc is None:
  +         desc = "pmap: " + f.__name__
    
        if key_args is None:
            key_args = {}
    
        if max_workers is None:
            max_workers = DefaultWorkers
        if max_workers <= 1:
            outs = list[T1]()
            for i in tqdm(range(n), desc=desc, **tqdm_args):
                outs.append(f(*(a[i] for a in f_args), **key_args))
            return outs
    
        if chunksize is None:
            chunksize = max(1, n // (50 * max_workers))
    
        tag_f = _TaggedFunc(f, key_args)
        arg_tuples = zip(range(n), *f_args)
    
        with (
            multiprocessing.Pool(max_workers) as pool,
            tqdm(total=n, desc=desc, **tqdm_args) as pbar,
        ):
            results = dict[int, T1]()
            for i, r in pool.imap_unordered(tag_f, arg_tuples, chunksize=chunksize):
                results[i] = r
                pbar.update()
        return [results[i] for i in range(n)]
     | 
| 
	coeditor.dataset/TokenizedEditDataset.from_edits | 
	Modified | 
	temp-1 | 
	2e823493b40c36c3f449ba3571e2fa26454e3493 | 
	Update datasets creation pipeline. | 
	 <0>:<add>         return TokenizedEditDataset(list(edits))
 | 
	      # module: coeditor.dataset
      @dataclass
      class TokenizedEditDataset(Generic[TEdit]):
          @staticmethod
    -     def from_edits(
    -         edits: Iterable[TEdit], path=Path("all")
    -     ) -> "TokenizedEditDataset[TEdit]":
    +     def from_edits(edits: Iterable[TEdit]) -> "TokenizedEditDataset[TEdit]":
    -         return TokenizedEditDataset({path: list(edits)})
 <0>  
       | 
	===========unchanged ref 0===========
    at: dataclasses
        dataclass(_cls: Type[_T]) -> Type[_T]
        dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
        dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
        def all_edits(self) -> list[TEdit]:
  +         return self._edits
  -         return join_list(self.project2edits.values())
    
===========changed ref 1===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
  -     def subset(self, repos: Iterable[Path]) -> "TokenizedEditDataset":
  -         return TokenizedEditDataset({repo: self.project2edits[repo] for repo in repos})
  - 
===========changed ref 2===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
  +     _edits: list[TEdit]
  -     project2edits: dict[Path, list[TEdit]]
    
===========changed ref 3===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
  -     def map(self, f: Callable[[TEdit], TEdit]) -> "TokenizedEditDataset[TEdit]":
  -         repos = tqdm(self.project2edits.items(), desc="transforming dataset")
  -         return TokenizedEditDataset(
  -             {repo: [f(e) for e in edits] for repo, edits in repos}
  -         )
  - 
===========changed ref 4===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
        def __repr__(self) -> str:
  -         n_projects = len(self.project2edits)
  -         n_edits = sum(len(edits) for edits in self.project2edits.values())
  +         n_edits = len(self.all_edits())
  +         return f"TokenizedEditDataset(n_edits={n_edits})"
  -         return f"TokenizedEditDataset(n_projects={n_projects}, n_edits={n_edits})"
    
===========changed ref 5===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
        def overall_stats(self) -> dict:
            all_edits = self.all_edits()
            n_added = sum(isinstance(e.change_type, Added) for e in all_edits)
            basic_stats = {
  -             "n_projects": len(self.project2edits),
                "n_edits": len(all_edits),
                "n_additions": n_added,
            }
            extra_stats = dict[str, list]()
            for e in all_edits:
                for k, v in e.stats().items():
                    if k in extra_stats:
                        extra_stats[k].append(v)
                    else:
                        extra_stats[k] = [v]
            return basic_stats | {k: scalar_stats(v) for k, v in extra_stats.items()}
    
===========changed ref 6===========
    # module: spot.utils
    def pmap(
        f: Callable[..., T1],
        *f_args: Any,
  +     desc: str | None = None,
  -     desc: str = "parallel map",
        key_args: Mapping[str, Any] | None = None,
        max_workers: int | None = None,
        chunksize: int | None = None,
        tqdm_args: Mapping[str, Any] | None = None,
    ) -> list[T1]:
        """
        Parallel map with progress displaying.
        """
        n = len(f_args[0])
        assert_eq(n, *(len(xs) for xs in f_args))
    
        tqdm_args = dict(tqdm_args) if tqdm_args else {}
        tqdm_args.setdefault("smoothing", 0.0)
  + 
  +     if desc is None:
  +         desc = "pmap: " + f.__name__
    
        if key_args is None:
            key_args = {}
    
        if max_workers is None:
            max_workers = DefaultWorkers
        if max_workers <= 1:
            outs = list[T1]()
            for i in tqdm(range(n), desc=desc, **tqdm_args):
                outs.append(f(*(a[i] for a in f_args), **key_args))
            return outs
    
        if chunksize is None:
            chunksize = max(1, n // (50 * max_workers))
    
        tag_f = _TaggedFunc(f, key_args)
        arg_tuples = zip(range(n), *f_args)
    
        with (
            multiprocessing.Pool(max_workers) as pool,
            tqdm(total=n, desc=desc, **tqdm_args) as pbar,
        ):
            results = dict[int, T1]()
            for i, r in pool.imap_unordered(tag_f, arg_tuples, chunksize=chunksize):
                results[i] = r
                pbar.update()
        return [results[i] for i in range(n)]
     | 
| 
	coeditor.dataset/_process_commits | 
	Modified | 
	temp-1 | 
	2e823493b40c36c3f449ba3571e2fa26454e3493 | 
	Update datasets creation pipeline. | 
	 <0>:<add>     change_processor.append_stats(stats)
 | 
	      # module: coeditor.dataset
      def _process_commits(
          root: Path,
          workdir: Path,
          commits: Sequence[CommitInfo],
          is_training: bool,
    +     change_processor: ProjectChangeProcessor[C3Problem],
    -     encoder: C3EditEncoder,
      ) -> _ProcessingResult:
          # use process-specific parso cache
          _fix_jedi_cache(workdir)
          coeditor.code_change._tlogger.clear()
    +     change_processor.clear_stats()
    -     encoder.change_processor.clear_stats()
    +     change_processor.set_training(is_training)
    -     encoder.change_processor.set_training(is_training)
          try:
              # cannot return here since subprocess will be killed after returning
              edits = edits_from_commit_history(
                  root,
                  commits,
                  tempdir=workdir / "code",
    +             change_processor=change_processor,
    -             change_processor=encoder.change_processor,
    -             edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                  silent=True,
                  time_limit=time_limit_per_commit * (len(commits) + 10),
              )
          except Exception as e:
              if isinstance(e, KeyboardInterrupt):
                  raise
              warnings.warn(f"Failed to process project: {root}\nError: {e}")
              traceback.print_exception(e, limit=-6)
              edits = []
          stats = dict()
    -     encoder.change_processor.append_stats(stats)
 <0>      rec_add_dict_to(stats, {"tlogger": coeditor.code_change._tlogger.times})
          return _ProcessingResult(edits, stats)
      
       | 
	===========unchanged ref 0===========
    at: _warnings
        warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
        warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
    
    at: coeditor._utils
        DefaultWorkers: int = multiprocessing.cpu_count() // 2
        global DefaultWorkers
    
    at: coeditor.common
        rec_add_dict_to(target: dict[str, Any], value: Mapping[str, Any], value_merger: Callable[[Any, Any], Any]=lambda x, y: x + y)
    
    at: coeditor.dataset
        _ProcessingResult(edits: Sequence[C3Problem], stats: dict[str, dict | Any])
    
        time_limit_per_commit = 10.0
    
    at: pathlib
        Path()
    
    at: traceback
        print_exception(etype: Optional[Type[BaseException]], value: Optional[BaseException], tb: Optional[TracebackType], limit: Optional[int]=..., file: Optional[IO[str]]=..., chain: bool=...) -> None
    
    at: typing
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.dataset
    @dataclass
    class _ProcessingResult:
  +     edits: Sequence[C3Problem]
  -     edits: Sequence[TkC3Problem]
        stats: dict[str, dict | Any]
    
===========changed ref 1===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
        @staticmethod
  -     def from_edits(
  -         edits: Iterable[TEdit], path=Path("all")
  -     ) -> "TokenizedEditDataset[TEdit]":
  +     def from_edits(edits: Iterable[TEdit]) -> "TokenizedEditDataset[TEdit]":
  +         return TokenizedEditDataset(list(edits))
  -         return TokenizedEditDataset({path: list(edits)})
    
===========changed ref 2===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
        def all_edits(self) -> list[TEdit]:
  +         return self._edits
  -         return join_list(self.project2edits.values())
    
===========changed ref 3===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
  -     def subset(self, repos: Iterable[Path]) -> "TokenizedEditDataset":
  -         return TokenizedEditDataset({repo: self.project2edits[repo] for repo in repos})
  - 
===========changed ref 4===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
  +     _edits: list[TEdit]
  -     project2edits: dict[Path, list[TEdit]]
    
===========changed ref 5===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
  -     def map(self, f: Callable[[TEdit], TEdit]) -> "TokenizedEditDataset[TEdit]":
  -         repos = tqdm(self.project2edits.items(), desc="transforming dataset")
  -         return TokenizedEditDataset(
  -             {repo: [f(e) for e in edits] for repo, edits in repos}
  -         )
  - 
===========changed ref 6===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
        def __repr__(self) -> str:
  -         n_projects = len(self.project2edits)
  -         n_edits = sum(len(edits) for edits in self.project2edits.values())
  +         n_edits = len(self.all_edits())
  +         return f"TokenizedEditDataset(n_edits={n_edits})"
  -         return f"TokenizedEditDataset(n_projects={n_projects}, n_edits={n_edits})"
    
===========changed ref 7===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
        def overall_stats(self) -> dict:
            all_edits = self.all_edits()
            n_added = sum(isinstance(e.change_type, Added) for e in all_edits)
            basic_stats = {
  -             "n_projects": len(self.project2edits),
                "n_edits": len(all_edits),
                "n_additions": n_added,
            }
            extra_stats = dict[str, list]()
            for e in all_edits:
                for k, v in e.stats().items():
                    if k in extra_stats:
                        extra_stats[k].append(v)
                    else:
                        extra_stats[k] = [v]
            return basic_stats | {k: scalar_stats(v) for k, v in extra_stats.items()}
    
===========changed ref 8===========
    # module: spot.utils
    def pmap(
        f: Callable[..., T1],
        *f_args: Any,
  +     desc: str | None = None,
  -     desc: str = "parallel map",
        key_args: Mapping[str, Any] | None = None,
        max_workers: int | None = None,
        chunksize: int | None = None,
        tqdm_args: Mapping[str, Any] | None = None,
    ) -> list[T1]:
        """
        Parallel map with progress displaying.
        """
        n = len(f_args[0])
        assert_eq(n, *(len(xs) for xs in f_args))
    
        tqdm_args = dict(tqdm_args) if tqdm_args else {}
        tqdm_args.setdefault("smoothing", 0.0)
  + 
  +     if desc is None:
  +         desc = "pmap: " + f.__name__
    
        if key_args is None:
            key_args = {}
    
        if max_workers is None:
            max_workers = DefaultWorkers
        if max_workers <= 1:
            outs = list[T1]()
            for i in tqdm(range(n), desc=desc, **tqdm_args):
                outs.append(f(*(a[i] for a in f_args), **key_args))
            return outs
    
        if chunksize is None:
            chunksize = max(1, n // (50 * max_workers))
    
        tag_f = _TaggedFunc(f, key_args)
        arg_tuples = zip(range(n), *f_args)
    
        with (
            multiprocessing.Pool(max_workers) as pool,
            tqdm(total=n, desc=desc, **tqdm_args) as pbar,
        ):
            results = dict[int, T1]()
            for i, r in pool.imap_unordered(tag_f, arg_tuples, chunksize=chunksize):
                results[i] = r
                pbar.update()
        return [results[i] for i in range(n)]
     | 
| 
	coeditor.dataset/dataset_from_projects | 
	Modified | 
	temp-1 | 
	2e823493b40c36c3f449ba3571e2fa26454e3493 | 
	Update datasets creation pipeline. | 
	 <0>:<add>     return project2edits
 | 
	      # module: coeditor.dataset
      def dataset_from_projects(
          project_roots: Sequence[Path],
    +     change_processor: ProjectChangeProcessor[C3Problem],
    -     encoder: C3EditEncoder,
          repo_training: Sequence[bool],
          max_history_per_repo: int = 1000,
          workers: int = DefaultWorkers,
    + ) -> "Mapping[Path, Sequence[C3Problem]]":
    - ) -> "TokenizedEditDataset[TkC3Problem]":
      <s>,
                  tqdm_args={"unit": "chunk"},
              )
          finally:
              if workdir.exists():
                  shutil.rmtree(workdir)
                  print("Workdir removed:", workdir)
      
    +     project2edits = dict[Path, list[C3Problem]]()
    -     project2edits = dict[Path, list[TkC3Problem]]()
      
          try:
              stats = dict[str, Any]()
              for root, pr in zip(roots, presults):
                  project2edits.setdefault(root, []).extend(pr.edits)
                  rec_add_dict_to(stats, pr.stats)
      
              if "tlogger" in stats:
                  df = TimeLogger.times_to_dataframe(stats.pop("tlogger"))
                  print("Time stats:")
                  display(df)
              if "analyzer_errors" in list(stats.keys()):
                  errors: dict = stats.pop("analyzer_errors")
                  for k in list(errors.keys()):
                      if JediUsageAnalyzer.is_known_error(k):
                          errors.pop(k)
                  if errors:
                      print("Analyzer errors:")
                      for k in sorted(errors.keys(), key=lambda k: errors[k], reverse=True):
                          print(f"{k}:\t{errors[k]}")
              if stats:
                  print("Other Stats:")
                  pretty_print_dict(stats)
          except Exception as e:
              if not isinstance(e, KeyboardInterrupt):
                  print("Error while printing stats:", e)
      
    -     return TokenizedEditDataset(project2edits)
 <0>  
       | 
	===========above chunk 0===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
  +     change_processor: ProjectChangeProcessor[C3Problem],
  -     encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
  + ) -> "Mapping[Path, Sequence[C3Problem]]":
  - ) -> "TokenizedEditDataset[TkC3Problem]":
    # offset: -1
    <s>        get_commit_history,
            project_roots,
            max_workers=workers,
            desc="Getting commit histories",
            tqdm_args={"unit": "repo"},
        )
        # keep the oldest portion of the history
        histories = [commits[-max_history_per_repo:] for commits in histories]
        # break long commit sequences into chunks for parallelization
        roots = list[Path]()
        chunk_training = list[bool]()
        chunked_histories = list[list[CommitInfo]]()
        for root, h, train in zip(project_roots, histories, repo_training):
            history_chunk_size = max(50, math.ceil(len(h) / 10))
            for i in range(0, len(h), history_chunk_size):
                roots.append(root)
                chunk_training.append(train)
                # note that we need 1 extra overlapping commit to get all diffs
                chunked_histories.append(h[i : i + history_chunk_size + 1])
        workdirs = [workdir / f"chunk-{i}" for i in range(len(roots))]
        try:
            presults = pmap(
                _process_commits,
                roots,
                workdirs,
                chunked_histories,
                chunk_training,
  -             key_args={"encoder": encoder},
  -             desc="Create tokenized edits",
  +             key_args={"change_processor": change_processor},
                max_workers=workers,
                tqdm_args={"unit": "chunk"},
            )
        finally:
            if workdir.exists():
               </s>
===========above chunk 1===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
  +     change_processor: ProjectChangeProcessor[C3Problem],
  -     encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
  + ) -> "Mapping[Path, Sequence[C3Problem]]":
  - ) -> "TokenizedEditDataset[TkC3Problem]":
    # offset: -2
        """
        Create a TokenizedEditDataset from a list of project roots and a given encoder.
        Args:
            - max_history_per_repo (int, optional): When the repo history is longer than
            this value, only the oldest portion is going to be used. Defaults to 1000.
        """
        workdir = Path(tempfile.gettempdir()) / "dataset_from_projects"
        histories = pmap(
            get_commit_history,
            project_roots,
            max_workers=workers,
            desc="Getting commit histories",</s>
===========unchanged ref 0===========
    at: IPython.core.display_functions
        display(*, include=None, exclude=None, metadata=None, transient=None, display_id=None, raw=False, clear=False, source=_sentinel, **kwargs)
    
    at: _warnings
        warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
        warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
    
    at: coeditor._utils
        DefaultWorkers: int = multiprocessing.cpu_count() // 2
        global DefaultWorkers
    
        pmap(f: Callable[..., T1], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], /, *iterables: Iterable[Any], desc: str | None=None, key_args: Mapping[str, Any] | None=None, max_workers: int | None=None, chunksize: int | None=None, tqdm_args: Mapping[str, Any] | None=None) -> list[T1]
    
        TimeLogger(times: dict[str, list[float]]=field(default_factory=dict))
    
    at: coeditor._utils.TimeLogger
        times: dict[str, list[float]] = field(default_factory=dict)
    
        times_to_dataframe(times: dict[str, list[float]])
    
    at: coeditor.common
        rec_add_dict_to(target: dict[str, Any], value: Mapping[str, Any], value_merger: Callable[[Any, Any], Any]=lambda x, y: x + y)
    
    at: coeditor.dataset
        _process_commits(root: Path, workdir: Path, commits: Sequence[CommitInfo], is_training: bool, change_processor: ProjectChangeProcessor[C3Problem]) -> _ProcessingResult
    
    at: coeditor.dataset._ProcessingResult
        edits: Sequence[C3Problem]
    
        stats: dict[str, dict | Any]
    
    
===========unchanged ref 1===========
    at: coeditor.dataset.dataset_from_projects
        workdir = Path(tempfile.gettempdir()) / "dataset_from_projects"
    
    at: math
        ceil(x: SupportsFloat, /) -> int
    
    at: pathlib
        Path()
    
    at: pathlib.Path
        __slots__ = ()
    
        iterdir() -> Generator[_P, None, None]
    
        exists() -> bool
    
    at: shutil
        rmtree(path: Union[bytes, StrPath], ignore_errors: bool=..., onerror: Optional[Callable[[Any, Any, Any], Any]]=...) -> None
    
    at: spot.utils
        pretty_print_dict(d: dict, level: int=0, max_show_level: int=1000, float_precision: int=5)
    
    at: typing
        Mapping = _alias(collections.abc.Mapping, 2)
    
        Sequence = _alias(collections.abc.Sequence, 1)
    
    at: typing.MutableMapping
        pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
        pop(key: _KT) -> _VT
    
    
===========changed ref 0===========
    # module: coeditor.dataset
    def _process_commits(
        root: Path,
        workdir: Path,
        commits: Sequence[CommitInfo],
        is_training: bool,
  +     change_processor: ProjectChangeProcessor[C3Problem],
  -     encoder: C3EditEncoder,
    ) -> _ProcessingResult:
        # use process-specific parso cache
        _fix_jedi_cache(workdir)
        coeditor.code_change._tlogger.clear()
  +     change_processor.clear_stats()
  -     encoder.change_processor.clear_stats()
  +     change_processor.set_training(is_training)
  -     encoder.change_processor.set_training(is_training)
        try:
            # cannot return here since subprocess will be killed after returning
            edits = edits_from_commit_history(
                root,
                commits,
                tempdir=workdir / "code",
  +             change_processor=change_processor,
  -             change_processor=encoder.change_processor,
  -             edit_encoder=encoder.edit_tokenizer.tokenize_problem,
                silent=True,
                time_limit=time_limit_per_commit * (len(commits) + 10),
            )
        except Exception as e:
            if isinstance(e, KeyboardInterrupt):
                raise
            warnings.warn(f"Failed to process project: {root}\nError: {e}")
            traceback.print_exception(e, limit=-6)
            edits = []
        stats = dict()
  +     change_processor.append_stats(stats)
  -     encoder.change_processor.append_stats(stats)
        rec_add_dict_to(stats, {"tlogger": coeditor.code_change._tlogger.times})
        return _ProcessingResult(edits, stats)
     | 
| 
	coeditor.dataset/datasets_from_repos | 
	Modified | 
	temp-1 | 
	2e823493b40c36c3f449ba3571e2fa26454e3493 | 
	Update datasets creation pipeline. | 
	 <0>:<add>     return {k: join_list(dataset[r] for r in repos) for k, repos in projects.items()}
 | 
	      # module: coeditor.dataset
      def datasets_from_repos(
          repos_root: Path,
    +     change_processor: ProjectChangeProcessor[C3Problem],
    -     encoder: C3EditEncoder,
          max_history_per_repo: int = 1000,
          workers: int = DefaultWorkers,
    + ) -> Mapping[str, Sequence[C3Problem]]:
    - ) -> dict[str, TokenizedEditDataset[TkC3Problem]]:
          splits = ["test", "valid", "train"]
          projects = dict[str, list[Path]]()
          split_is_training = dict[str, list[bool]]()
          for split in splits:
              if not (repos_root / split).exists():
                  warnings.warn(f"Split {split} not found at {repos_root / split}.")
                  continue
              ps = [p for p in (repos_root / split).iterdir() if p.is_dir]
              projects[split] = ps
              training = split == "train"
              split_is_training[split] = [training] * len(ps)
              if not ps:
                  warnings.warn(f"No projects found in {split} split")
      
          dataset = dataset_from_projects(
              join_list(projects.values()),
    +         change_processor=change_processor,
    -         encoder=encoder,
              repo_training=join_list(split_is_training.values()),
              max_history_per_repo=max_history_per_repo,
              workers=workers,
          )
    -     return {k: dataset.subset(v) for k, v in projects.items()}
 <0>  
       | 
	===========unchanged ref 0===========
    at: _warnings
        warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
        warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
    
    at: coeditor._utils
        DefaultWorkers: int = multiprocessing.cpu_count() // 2
        global DefaultWorkers
    
        repr_modified_args(instance, flatten: bool=False) -> str
    
    at: coeditor.common
        get_dataset_dir(dataname: str) -> Path
    
        join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
    
    at: coeditor.dataset
        dataset_from_projects(project_roots: Sequence[Path], change_processor: ProjectChangeProcessor[C3Problem], repo_training: Sequence[bool], max_history_per_repo: int=1000, workers: int=DefaultWorkers) -> "Mapping[Path, Sequence[C3Problem]]"
    
    at: coeditor.dataset.datasets_from_repos
        projects = dict[str, list[Path]]()
        projects[split] = ps
    
        ps = [p for p in (repos_root / split).iterdir() if p.is_dir]
    
        training = split == "train"
    
    at: pathlib.Path
        exists() -> bool
    
    at: typing
        Mapping = _alias(collections.abc.Mapping, 2)
    
        Sequence = _alias(collections.abc.Sequence, 1)
    
    
===========changed ref 0===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
  +     change_processor: ProjectChangeProcessor[C3Problem],
  -     encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
  + ) -> "Mapping[Path, Sequence[C3Problem]]":
  - ) -> "TokenizedEditDataset[TkC3Problem]":
        """
        Create a TokenizedEditDataset from a list of project roots and a given encoder.
        Args:
            - max_history_per_repo (int, optional): When the repo history is longer than
            this value, only the oldest portion is going to be used. Defaults to 1000.
        """
        workdir = Path(tempfile.gettempdir()) / "dataset_from_projects"
        histories = pmap(
            get_commit_history,
            project_roots,
            max_workers=workers,
            desc="Getting commit histories",
            tqdm_args={"unit": "repo"},
        )
        # keep the oldest portion of the history
        histories = [commits[-max_history_per_repo:] for commits in histories]
        # break long commit sequences into chunks for parallelization
        roots = list[Path]()
        chunk_training = list[bool]()
        chunked_histories = list[list[CommitInfo]]()
        for root, h, train in zip(project_roots, histories, repo_training):
            history_chunk_size = max(50, math.ceil(len(h) / 10))
            for i in range(0, len(h), history_chunk_size):
                roots.append(root)
                chunk_training.append(train)
                # note that we need 1 extra overlapping commit to get all diffs
                chunked_histories.append(h[i : i + history_chunk_size + 1])
        workdirs = [workdir / f"chunk-{i}" for i in range(len(roots))]
        try:
            presults = pmap(
                _process_commits,
                roots,</s>
===========changed ref 1===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
  +     change_processor: ProjectChangeProcessor[C3Problem],
  -     encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
  + ) -> "Mapping[Path, Sequence[C3Problem]]":
  - ) -> "TokenizedEditDataset[TkC3Problem]":
    # offset: 1
    <s> range(len(roots))]
        try:
            presults = pmap(
                _process_commits,
                roots,
                workdirs,
                chunked_histories,
                chunk_training,
  -             key_args={"encoder": encoder},
  -             desc="Create tokenized edits",
  +             key_args={"change_processor": change_processor},
                max_workers=workers,
                tqdm_args={"unit": "chunk"},
            )
        finally:
            if workdir.exists():
                shutil.rmtree(workdir)
                print("Workdir removed:", workdir)
    
  +     project2edits = dict[Path, list[C3Problem]]()
  -     project2edits = dict[Path, list[TkC3Problem]]()
    
        try:
            stats = dict[str, Any]()
            for root, pr in zip(roots, presults):
                project2edits.setdefault(root, []).extend(pr.edits)
                rec_add_dict_to(stats, pr.stats)
    
            if "tlogger" in stats:
                df = TimeLogger.times_to_dataframe(stats.pop("tlogger"))
                print("Time stats:")
                display(df)
            if "analyzer_errors" in list(stats.keys()):
                errors: dict = stats.pop("analyzer_errors")
                for k in list(errors.keys()):
                    if JediUsageAnalyzer.is_known_error(k):
    </s>
===========changed ref 2===========
    # module: coeditor.dataset
    def dataset_from_projects(
        project_roots: Sequence[Path],
  +     change_processor: ProjectChangeProcessor[C3Problem],
  -     encoder: C3EditEncoder,
        repo_training: Sequence[bool],
        max_history_per_repo: int = 1000,
        workers: int = DefaultWorkers,
  + ) -> "Mapping[Path, Sequence[C3Problem]]":
  - ) -> "TokenizedEditDataset[TkC3Problem]":
    # offset: 2
    <s>.pop(k)
                if errors:
                    print("Analyzer errors:")
                    for k in sorted(errors.keys(), key=lambda k: errors[k], reverse=True):
                        print(f"{k}:\t{errors[k]}")
            if stats:
                print("Other Stats:")
                pretty_print_dict(stats)
        except Exception as e:
            if not isinstance(e, KeyboardInterrupt):
                print("Error while printing stats:", e)
    
  +     return project2edits
  -     return TokenizedEditDataset(project2edits)
    
===========changed ref 3===========
    # module: coeditor.dataset
    @dataclass
    class _ProcessingResult:
  +     edits: Sequence[C3Problem]
  -     edits: Sequence[TkC3Problem]
        stats: dict[str, dict | Any]
    
===========changed ref 4===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
        @staticmethod
  -     def from_edits(
  -         edits: Iterable[TEdit], path=Path("all")
  -     ) -> "TokenizedEditDataset[TEdit]":
  +     def from_edits(edits: Iterable[TEdit]) -> "TokenizedEditDataset[TEdit]":
  +         return TokenizedEditDataset(list(edits))
  -         return TokenizedEditDataset({path: list(edits)})
    
===========changed ref 5===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
        def all_edits(self) -> list[TEdit]:
  +         return self._edits
  -         return join_list(self.project2edits.values())
    
===========changed ref 6===========
    # module: coeditor.dataset
    @dataclass
    class TokenizedEditDataset(Generic[TEdit]):
  -     def subset(self, repos: Iterable[Path]) -> "TokenizedEditDataset":
  -         return TokenizedEditDataset({repo: self.project2edits[repo] for repo in repos})
  -  | 
| 
	coeditor.ctx_change_encoder/C3ProblemTokenizer.tokenize_problem | 
	Modified | 
	temp-1 | 
	2e823493b40c36c3f449ba3571e2fa26454e3493 | 
	Update datasets creation pipeline. | 
	 <0>:<add>                 named_references=all_refs,
 | 
	      # module: coeditor.ctx_change_encoder
      @dataclass
      class C3ProblemTokenizer:
          def tokenize_problem(
              self,
              problem: C3Problem,
          ) -> Sequence[TkC3Problem]:
      <s>) for i, chunk in enumerate(above_chunks)
                  ]
                  below_chunks = [
                      (f"below chunk {i}", chunk) for i, chunk in enumerate(below_chunks)
                  ]
                  all_refs = above_chunks + below_chunks + named_references
    -             size_sum = 0
    -             kept_refs = list[tuple[str, TokenSeq]]()
    -             for (name, chunk) in all_refs:
    -                 if size_sum + len(chunk) <= self.max_total_ref_tks:
    -                     size_sum += len(chunk)
    -                     kept_refs.append((name, chunk))
      
                  return TkC3Problem(
                      scope_tks + chunk_input,
                      chunk_output,
                      path=span.headers[-1].path,
                      change_type=span.change.map(lambda _: None),
    -                 named_references=kept_refs,
 <0>                  src_info=problem.src_info,
                  )
      
              problems = list[TkC3Problem]()
              for l in range(len(tk_delta.deltas) + 1):
                  finished = l == len(tk_delta.deltas)
                  input_growth = len(origin_lines[l]) + 2 if l < len(origin_lines) else 1
                  if (
                      finished
                      or chunk_lines >= self.max_lines_to_edit
                      or len(chunk_input) + input_growth > input_limit
                  ):
                      if has_change(chunk_output):
                          problems.append(get_problem(chunk_input, chunk_output))
                          if len(problems) >= self.max_chunks_per_elem:
                              break
      
                      if finished:
                          break
      
                      chunk_main_input = join_list(origin_lines[chunk_start_l:l], Newline_id)
                      chunk_main_delta</s> | 
	===========above chunk 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
        def tokenize_problem(
            self,
            problem: C3Problem,
        ) -> Sequence[TkC3Problem]:
    # offset: -1
    <s>
            chunk_output = TokenSeq()
            prev_change_tks = TokenSeq()
    
            def get_problem(chunk_input, chunk_output):
                # try move some prev_change_tks into the input
                above_tks = prev_change_tks
                below_tks = join_list(origin_lines[l:], Newline_id)
                chunk_input, above_tks, below_tks = self._inline_some_context(
                    chunk_input, above_tks, below_tks, input_limit
                )
    
                # limit the input size if it's too long (can happen for later chunks)
                chunk_input = truncate_section(chunk_input, TruncateAt.Right, input_limit)
                chunk_output = truncate_output_tks(chunk_input, chunk_output)
                chunk_output = truncate_section(
                    chunk_output, TruncateAt.Right, self.max_output_tks, add_bos=False
                )
    
                above_chunks = break_into_chunks(
                    above_tks,
                    lambda i: self._encode_headers(span.headers, -1 - i),
                    chunk_size=self.max_ref_tks,
                    overlap=self.ref_chunk_overlap,
                    right_to_left=True,
                )
                if finished:
                    below_chunks = []
                else:
                    below_chunks = break_into_chunks(
                        below_tks,
                        lambda i: self._encode_headers(span.headers, i + 1),
                        chunk_size=self.max_ref_tks,
                        overlap=self.ref_chunk_overlap,
                    )
                above_chunks = [
                    (f"above chunk {i}", chunk) for i, chunk in enumerate(above_chunks)
                ]
                below_chunks = [
                    (f"below chunk {</s>
===========above chunk 1===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
        def tokenize_problem(
            self,
            problem: C3Problem,
        ) -> Sequence[TkC3Problem]:
    # offset: -2
            span = problem.span
            named_references = list[tuple[str, TokenSeq]]()
            # compute the references that are relevant to this span
            relevant_chunks = self._group_encode_changed_refs(problem.relevant_changes)
            for i, chunk in enumerate(relevant_chunks):
                named_references.append((f"changed ref {i}", chunk))
            relevant_chunks = self._group_encode_unchanged_refs(problem.relevant_unchanged)
            for i, chunk in enumerate(relevant_chunks):
                named_references.append((f"unchanged ref {i}", chunk))
    
            diffs = change_to_line_diffs(span.change)
            original, delta = line_diffs_to_original_delta(diffs)
            origin_lines = split_list(encode_basic(original), Newline_id)
            tk_delta = delta.to_tk_delta()
            chunk_id = 0
            chunk_start_l = 0
            scope_tks = self._encode_headers(span.headers, 0)
            chunk_input = TokenSeq()
            input_limit = self.max_query_tks - len(scope_tks)
            chunk_lines = 0
            chunk_output = TokenSeq()
            prev_change_tks = TokenSeq()
    
            def get_problem(chunk</s>
===========below chunk 0===========
    # module: coeditor.ctx_change_encoder
    @dataclass
    class C3ProblemTokenizer:
        def tokenize_problem(
            self,
            problem: C3Problem,
        ) -> Sequence[TkC3Problem]:
    # offset: 1
    <s>input = join_list(origin_lines[chunk_start_l:l], Newline_id)
                    chunk_main_delta = tk_delta.for_input_range((chunk_start_l, l))
                    chunk_main_change = chunk_main_delta.to_change_tks(chunk_main_input)
                    prev_change_tks.extend(chunk_main_change)
                    prev_change_tks.append(Newline_id)
                    chunk_id += 1
                    chunk_input = TokenSeq()
                    chunk_lines = 0
                    chunk_output = TokenSeq()
                    chunk_start_l = l
    
                chunk_input.append(get_extra_id(chunk_lines))
                if l < len(origin_lines):
                    chunk_input.extend(origin_lines[l])
                    chunk_input.append(Newline_id)
                line_change = join_list(tk_delta.deltas[l], Newline_id)
                chunk_output.append(get_extra_id(chunk_lines))
                chunk_output.extend(line_change)
                if line_change and line_change[-1] != Del_id:
                    chunk_output.append(Newline_id)
                chunk_lines += 1
            return problems
    
    
===========unchanged ref 0===========
    at: coeditor.common
        TokenSeq = list[Token]
    
        split_list(lst: list[T1], sep: T1) -> list[list[T1]]
    
        join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
    
    at: coeditor.ctx_change_encoder
        C3Problem(span: ChangedCodeSpan, relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], src_info: dict[str, Any])
    
        TkC3Problem(input_tks: TokenSeq, output_tks: TokenSeq, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TokenSeq]], src_info: dict[str, Any])
    
    at: coeditor.ctx_change_encoder.C3Problem
        span: ChangedCodeSpan
    
        relevant_changes: Sequence[ChangedCodeSpan]
    
        relevant_unchanged: Sequence[ChangedCodeSpan]
    
        src_info: dict[str, Any]
    
    at: coeditor.ctx_change_encoder.C3ProblemTokenizer
        VERSION = "1.0"
    
        max_ref_tks: int = 512
    
        max_query_tks: int = 512
    
        max_output_tks: int = 256
    
        max_scope_tks: int = 128
    
        max_lines_to_edit: int = 20
    
        ref_chunk_overlap: int = 32
    
        max_total_ref_tks: int = 512 * 64  # a very large threshold
    
        max_chunks_per_elem: int = 4
    
        skip_unchanged_problems: bool = True
    
        _encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq
    
        _inline_some_context(input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq]
    
        _group_encode_unchanged_refs(elems: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq]
    
     | 
| 
	coeditor.code_change/ChangeScope.from_tree | 
	Modified | 
	temp-1 | 
	f106fd266a6e09194f46b8cad8930f3f0978a011 | 
	Fix missing spans. | 
	 <0>:<add>         if isinstance(container, BaseNode):
 | 
	      # module: coeditor.code_change
      @dataclass
      class ChangeScope:
          @staticmethod
          def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
              spans = []
              subscopes = dict()
              scope = ChangeScope(path, tree, spans, subscopes, None)
              assert isinstance(tree, ScopeTree)
              is_func = isinstance(tree, ptree.Function)
      
              def mk_span(stmts):
                  # remove leading newlines
                  n_leading_newlines = 0
                  for s in stmts:
                      if s.type == ptree.Newline.type:
                          n_leading_newlines += 1
                      else:
                          break
                  if n_leading_newlines:
                      stmts = stmts[n_leading_newlines:]
                  if stmts:
                      yield StatementSpan(len(spans), stmts, scope)
      
              current_stmts = []
              container = tree if isinstance(tree, ptree.Module) else tree.get_suite()
    -         if isinstance(container, Node):
 <0>              content = container.children
              else:
                  content = []
              for s in content:
                  # we don't create inner scopes for function contents
                  if is_func or _is_scope_statement(as_any(s)):
                      current_stmts.append(s)
                  else:
                      if current_stmts:
                          spans.extend(mk_span(current_stmts))
                          current_stmts = []
              if current_stmts:
                  spans.extend(mk_span(current_stmts))
      
              if is_func:
                  # we don't create inner scopes for function contents
                  if not spans:
                      raise ValueError(f"Function with no spans: {path=}, {tree.get_code()=}")
                  return scope
              for stree in tree._search_in_scope(ptree.Function.type, ptree.Class.type):
                  stree: ptree.Function | ptree.Class
                  name = cast(ptree.Name, stree.name).value
                  spath = path.append(name)
                  subscope = ChangeScope.from_tree(spath, stree)
                  subscope.</s> | 
	===========below chunk 0===========
    # module: coeditor.code_change
    @dataclass
    class ChangeScope:
        @staticmethod
        def from_tree(path: ProjectPath, tree: ScopeTree) -> "ChangeScope":
    # offset: 1
    <s> = path.append(name)
                subscope = ChangeScope.from_tree(spath, stree)
                subscope.parent_scope = scope
                subscopes[name] = subscope
            return scope
    
    
===========unchanged ref 0===========
    at: coeditor._utils
        as_any(x) -> Any
    
    at: coeditor.code_change
        ScopeTree = ptree.Function | ptree.Class | ptree.Module
    
        ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None")
    
        _is_scope_statement(stmt: PyNode) -> bool
    
        StatementSpan(nth_in_parent: int, statements: Sequence[PyNode], scope: ChangeScope)
    
    at: coeditor.code_change.ChangeScope
        path: ProjectPath
    
        tree: ScopeTree
    
        spans: Sequence["StatementSpan"]
    
        subscopes: Mapping[str, Self]
    
        parent_scope: "ChangeScope | None"
    
    at: parso.python.tree
        Newline()
    
        Name()
    
        Module(children)
    
        Class(children)
    
        Function(children)
    
    at: parso.python.tree.Class
        type = 'classdef'
    
        __slots__ = ()
    
    at: parso.python.tree.ClassOrFunc
        __slots__ = ()
    
    at: parso.python.tree.Function
        type = 'funcdef'
    
    at: parso.python.tree.Newline
        __slots__ = ()
    
        type = 'newline'
    
    at: parso.python.tree.Scope
        __slots__ = ()
    
        _search_in_scope(*names)
    
        get_suite()
    
    at: parso.tree
        BaseNode(children: List[NodeOrLeaf])
    
    at: parso.tree.BaseNode
        __slots__ = ('children',)
    
        get_code(include_prefix=True)
    
    at: parso.tree.BaseNode.__init__
        self.children = children
    
    at: parso.tree.Leaf.__init__
        self.value = value
    
    at: parso.tree.NodeOrLeaf
        __slots__ = ('parent',)
    
        type: str
    
        parent: 'Optional[BaseNode]'
    
    
===========unchanged ref 1===========
    at: spot.static_analysis
        ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
    
    at: spot.static_analysis.ProjectPath
        module: ModuleName
    
        path: ElemPath
    
        append(path: ElemPath) -> "ProjectPath"
    
    at: typing
        cast(typ: Type[_T], val: Any) -> _T
        cast(typ: str, val: Any) -> Any
        cast(typ: object, val: Any) -> Any
    
    
===========changed ref 0===========
    # module: tests.coeditor.test_edits
  + def test_extra_ids():
  +     all_extra_ids = _Tokenizer.additional_special_tokens_ids
  + 
  +     for x in all_extra_ids:
  +         assert is_extra_id(x)
  +         n = extra_id_to_number(x)
  +         assert get_extra_id(n) == x
  + 
===========changed ref 1===========
    # module: tests.coeditor.test_code_change
    def test_change_scope():
        code1 = dedent(
            """\
            import os
            
            x = 1
            y = x + 1
            
            def f1():
                global x
                x *= 5
                return x
                
            if __name__ == "__main__":
                print(f1() + x)
                    
            @annotated
            def f2():
                return 1
                
            @dataclass
            class A:
                attr1: int
                
                @staticmethod
                def method1():
                    return 1
                    
                class B:
                    inner_attr1: int
            """
        )
        mod_tree = code_to_module(code1)
        scope = ChangeScope.from_tree(ProjectPath("code1", ""), mod_tree)
        global_spans = [
            dedent(
                """\
                x = 1
                y = x + 1
                """
            ),
            dedent(
                """\
                if __name__ == "__main__":
                    print(f1() + x)
                """
            ),
        ]
  +     try:
  +         for i, code in enumerate(global_spans):
  -     for i, code in enumerate(global_spans):
  +             assert_str_equal(scope.spans[i].code, code)
  -         assert_str_equal(scope.spans[i].code, code)
  +     except Exception:
  +         print_err(f"{scope.spans=}")
  +         raise
    
        f1_expect = dedent(
            """\
            global x
            x *= 5
            return x
            """
        )
        f1_code = scope.subscopes["f1"].spans_code
        assert_str_equal(f1_code, indent(f1_expect, " " * 4))
    
        f2_expect = dedent(
            """\
            @annotated
            def f2():
                return 1
            """
        )
        f2_code = scope.subscopes["f2"].all_code
        assert_str_</s>
===========changed ref 2===========
    # module: tests.coeditor.test_code_change
    def test_change_scope():
    # offset: 1
    <s>        """
        )
        f2_code = scope.subscopes["f2"].all_code
        assert_str_equal(f2_code, f2_expect)
    
        attr1_expect = dedent(
            """\
            attr1: int
            """
        )
        attr1_code = scope.subscopes["A"].spans_code
        assert_str_equal(attr1_code, indent(attr1_expect, " " * 4))
    
        method1_expect = dedent(
            """\
            @staticmethod
            def method1():
                return 1
            """
        )
        method1_code = scope.subscopes["A"].subscopes["method1"].all_code
        assert_str_equal(method1_code, indent(method1_expect, " " * 4))
    
        inner_attr1_expect = dedent(
            """\
            class B:
                inner_attr1: int
            """
        )
        inner_class_code = scope.subscopes["A"].subscopes["B"].all_code
        assert_str_equal(inner_class_code, indent(inner_attr1_expect, " " * 4))
     | 
| 
	coeditor.encoding/get_extra_id | 
	Modified | 
	temp-1 | 
	0a557eb3eef82b76ad61c98704f8bfdf960a3ca1 | 
	Improve encoder performance. | 
	 <0>:<add>     return _min_extra_id + (N_Extra_Ids - 1 - i)
 | 
	      # module: coeditor.encoding
      def get_extra_id(i: int) -> int:
          assert 0 <= i < N_Extra_Ids
    -     return _Tokenizer.additional_special_tokens_ids[N_Extra_Ids - 1 - i]
 <0>  
       | 
	===========unchanged ref 0===========
    at: coeditor.encoding
        N_Extra_Ids = 100
    
        _min_extra_id = _Tokenizer.additional_special_tokens_ids[0]
    
     | 
| 
	coeditor.encoding/change_to_tokens | 
	Modified | 
	temp-1 | 
	0a557eb3eef82b76ad61c98704f8bfdf960a3ca1 | 
	Improve encoder performance. | 
	 <0>:<add>             raise AssertionError(f"Not a change type: {change}")
 | 
	      # module: coeditor.encoding
      def change_to_tokens(change: Change[str]) -> TokenSeq:
    +     match change:
    +         case Modified(before=before, after=after, unchanged=unchanged):
    +             if unchanged or before == after:
    +                 return encode_basic(before)
    +             else:
    +                 diffs = change_to_line_diffs(change)
    -     diffs = change_to_line_diffs(change)
    +                 return encode_diffs(diffs)
    -     return encode_diffs(diffs)
    +         case Added() | Deleted():
    +             lines = split_list(encode_basic(change.earlier()), Newline_id)
    +             tk = Add_id if isinstance(change, Added) else Del_id
    +             return join_list([tk] + line for line in lines)
    +         case _:
 <0>  
       | 
	===========unchanged ref 0===========
    at: coeditor.common
        TokenSeq = list[Token]
    
    
===========changed ref 0===========
    # module: coeditor.encoding
    def get_extra_id(i: int) -> int:
        assert 0 <= i < N_Extra_Ids
  +     return _min_extra_id + (N_Extra_Ids - 1 - i)
  -     return _Tokenizer.additional_special_tokens_ids[N_Extra_Ids - 1 - i]
     | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.