12Parker commited on
Commit
ef53dcb
·
verified ·
1 Parent(s): c01572f

Upload 2 files

Browse files
Files changed (2) hide show
  1. test-dataset.json +6 -3
  2. train-dataset.json +0 -0
test-dataset.json CHANGED
@@ -11,7 +11,8 @@
11
  "test_patch": "diff --git a/celery/tests/worker/test_request.py b/celery/tests/worker/test_request.py\n--- a/celery/tests/worker/test_request.py\n+++ b/celery/tests/worker/test_request.py\n@@ -325,6 +325,20 @@ def test_on_failure_Reject_rejects_with_requeue(self):\n req_logger, req.connection_errors, True,\n )\n \n+ def test_on_failure_WrokerLostError_rejects_with_requeue(self):\n+ einfo = None\n+ try:\n+ raise WorkerLostError()\n+ except:\n+ einfo = ExceptionInfo(internal=True)\n+ req = self.get_request(self.add.s(2, 2))\n+ req.task.acks_late = True\n+ req.task.reject_on_worker_lost = True\n+ req.delivery_info['redelivered'] = False\n+ req.on_failure(einfo)\n+ req.on_reject.assert_called_with(req_logger,\n+ req.connection_errors, True)\n+\n def test_tzlocal_is_cached(self):\n req = self.get_request(self.add.s(2, 2))\n req._tzlocal = 'foo'\n",
12
  "problem_statement": "Message being acknowledged on WorkerLostError when CELERY_ACKS_LATE=True\nWhen using celery v3.0.24, with `CELERY_ACKS_LATE = True` , if the OOM killer kills the celery worker, then the worker acknowledges the message.\nAs per [this](https://github.com/celery/celery/commit/e810420c) commit. The `exc_info.internal` comes in as `false`, which means it is not a internal error, due to which the message is acknowledged.\nThe desirable behaviour, in such a case would be to not acknowledge the message (and be able to know, whether its a OOM error), so that some other worker can pick it up. \nAs a workaround, I've commented out the [code](https://github.com/siddharth96/celery/commit/427695d1b23034dadda85fd7a48f7367831be4fa), where celery acknowledges the message, because in such a case, message will be lost.\n\n",
13
  "hints_text": "This is deliberate as if a task is killed it may mean that the next invocation will also cause the same to happen. If the task is redelivered it may cause a loop where the same conditions occur again and again. Also, sadly you cannot distinguish processes killed by OOM from processes killed by other means, and if an administrator kills -9 a task going amok, you usually don't want that task to be called again.\n\nThere could be a configuration option for not acking terminated tasks, but I'm not sure how useful that would be.\nA better solution could be to use `basic_reject(requeue=False)` instead of `basic_ack`, that way you can configure\na dead letter queue so that the killed tasks will be sent to a queue for manual inspection.\n\nI must say, regardless of the status of this feature request, the documentation is misleading. Specifically, [this FAQ makes it seem that process failures would NOT acknowledge messages](http://celery.readthedocs.org/en/latest/faq.html#faq-acks-late-vs-retry). And [this FAQ boldface states](http://celery.readthedocs.org/en/latest/faq.html#id54) that in the event of a kill signal (9), that acks_late will allow the task to re-run (which again, is patently wrong based on this poorly documented behavior). Nowhere in the docs have I found that if the process _dies_, the message will be acknowledged, regardless of acks_late or not. (for instance, I have a set of 10k+ tasks, and some 1% of tasks wind up acknowledged but incomplete when a WorkerLostError is thrown in connection with the worker, although there are no other errors of any kind in any of my logs related to that task).\n\nTL;DR at the least, appropriately document the current state when describing the functionality and limitations of acks_late. A work-around would be helpful -- I'm not sure I understand the solution of using `basic_reject`, although I'll keep looking into it.\n\nThe docs are referring to killing the worker process with KILL, not the child processes. The term worker will always refer to the worker instance, not the pool processes. The section within about acks_late is probably not very helpful and should be removed\n",
14
- "created_at": "2015-10-06T05:34:34Z"
 
15
  },
16
  {
17
  "repo": "NVIDIA/NeMo",
@@ -86,7 +87,8 @@
86
  "test_patch": "diff --git a/example/current_app/test_module/__init__.py b/example/current_app/test_module/__init__.py\nnew file mode 100644\ndiff --git a/example/current_app/test_module/slack_app.py b/example/current_app/test_module/slack_app.py\nnew file mode 100644\n--- /dev/null\n+++ b/example/current_app/test_module/slack_app.py\n@@ -0,0 +1,16 @@\n+# ------------------\n+# Only for running this script here\n+import logging\n+import sys\n+from os.path import dirname\n+\n+sys.path.insert(1, f\"{dirname(__file__)}/../../..\")\n+logging.basicConfig(level=logging.DEBUG)\n+# ------------------\n+\n+from flask import current_app as app\n+from slackeventsapi import SlackEventAdapter\n+import os\n+\n+slack_signing_secret = os.environ[\"SLACK_SIGNING_SECRET\"]\n+slack_events_adapter = SlackEventAdapter(slack_signing_secret, \"/slack/events\", app)\ndiff --git a/tests/test_server.py b/tests/test_server.py\n--- a/tests/test_server.py\n+++ b/tests/test_server.py\n@@ -18,7 +18,7 @@ def test_server_not_flask():\n with pytest.raises(TypeError) as e:\n invalid_flask = \"I am not a Flask\"\n SlackEventAdapter(\"SIGNING_SECRET\", \"/slack/events\", invalid_flask)\n- assert e.value.args[0] == 'Server must be an instance of Flask or Blueprint'\n+ assert e.value.args[0] == 'Server must be an instance of Flask, Blueprint, or LocalProxy'\n \n \n def test_blueprint_server():\n",
87
  "problem_statement": "Passing Flask app proxy as server\nHi Guys,\r\n\r\nI have an app factory on my setup and the app object usually it is invoked as :\r\n`from flask import current_app as app`\r\n\r\nHowever, the slackeventsapi complains about the app object : \r\n`TypeError(\"Server must be an instance of Flask\")`\r\n\r\nI have fixed adding the following to server.py : \r\n\r\n`from werkzeug.local import LocalProxy # Importing the localproxy class`\r\n\r\nLine 25 \r\n Changed from : \r\n ` if isinstance(server, Flask):`\r\n to :\r\n `if isinstance(server, Flask) or isinstance(server, LocalProxy):`\r\n\r\nBasically, if a Flask app proxy is passed the api will carry on without complaining since it has the same methods as the Flask app object.\r\n\r\nI hope this help other people and it is considered as a solution if more information is needed I am help to provide. \r\n\r\nThanks for the good work with the API.\r\n\r\n\r\n\r\n### What type of issue is this? (place an `x` in one of the `[ ]`)\r\n- [X] bug ?\r\n- [X] enhancement (feature request)\r\n- [ ] question\r\n- [ ] documentation related\r\n- [ ] testing related\r\n- [ ] discussion\r\n\r\n### Requirements\r\n* [X] I've read and understood the [Contributing guidelines](https://github.com/slackapi/python-slack-events-api/blob/master/.github/contributing.md) and have done my best effort to follow them.\r\n* [X] I've read and agree to the [Code of Conduct](https://slackhq.github.io/code-of-conduct).\r\n* [X] I've searched for any related issues and avoided creating a duplicate issue.\r\n\r\n#### Reproducible in:\r\nslackeventsapi version: slackeventsapi==2.1.0\r\npython version: Python 3.7.3\r\nOS version(s): \r\n\r\n\r\n\nPassing Flask app proxy as server\nHi Guys,\r\n\r\nI have an app factory on my setup and the app object usually it is invoked as :\r\n`from flask import current_app as app`\r\n\r\nHowever, the slackeventsapi complains about the app object : \r\n`TypeError(\"Server must be an instance of Flask\")`\r\n\r\nI have fixed adding the following to server.py : \r\n\r\n`from werkzeug.local import LocalProxy # Importing the localproxy class`\r\n\r\nLine 25 \r\n Changed from : \r\n ` if isinstance(server, Flask):`\r\n to :\r\n `if isinstance(server, Flask) or isinstance(server, LocalProxy):`\r\n\r\nBasically, if a Flask app proxy is passed the api will carry on without complaining since it has the same methods as the Flask app object.\r\n\r\nI hope this help other people and it is considered as a solution if more information is needed I am help to provide. \r\n\r\nThanks for the good work with the API.\r\n\r\n\r\n\r\n### What type of issue is this? (place an `x` in one of the `[ ]`)\r\n- [X] bug ?\r\n- [X] enhancement (feature request)\r\n- [ ] question\r\n- [ ] documentation related\r\n- [ ] testing related\r\n- [ ] discussion\r\n\r\n### Requirements\r\n* [X] I've read and understood the [Contributing guidelines](https://github.com/slackapi/python-slack-events-api/blob/master/.github/contributing.md) and have done my best effort to follow them.\r\n* [X] I've read and agree to the [Code of Conduct](https://slackhq.github.io/code-of-conduct).\r\n* [X] I've searched for any related issues and avoided creating a duplicate issue.\r\n\r\n#### Reproducible in:\r\nslackeventsapi version: slackeventsapi==2.1.0\r\npython version: Python 3.7.3\r\nOS version(s): \r\n\r\n\r\n\n",
88
  "hints_text": "\n",
89
- "created_at": "2020-06-12T06:58:10Z"
 
90
  },
91
  {
92
  "repo": "celery/celery",
@@ -100,6 +102,7 @@
100
  "test_patch": "diff --git a/celery/tests/backends/test_amqp.py b/celery/tests/backends/test_amqp.py\n--- a/celery/tests/backends/test_amqp.py\n+++ b/celery/tests/backends/test_amqp.py\n@@ -13,6 +13,7 @@\n from celery.backends.amqp import AMQPBackend\n from celery.exceptions import TimeoutError\n from celery.five import Empty, Queue, range\n+from celery.result import AsyncResult\n from celery.utils import uuid\n \n from celery.tests.case import (\n@@ -246,10 +247,20 @@ def test_wait_for(self):\n with self.assertRaises(TimeoutError):\n b.wait_for(tid, timeout=0.01, cache=False)\n \n- def test_drain_events_remaining_timeouts(self):\n+ def test_drain_events_decodes_exceptions_in_meta(self):\n+ tid = uuid()\n+ b = self.create_backend(serializer=\"json\")\n+ b.store_result(tid, RuntimeError(\"aap\"), states.FAILURE)\n+ result = AsyncResult(tid, backend=b)\n \n- class Connection(object):\n+ with self.assertRaises(Exception) as cm:\n+ result.get()\n \n+ self.assertEqual(cm.exception.__class__.__name__, \"RuntimeError\")\n+ self.assertEqual(str(cm.exception), \"aap\")\n+\n+ def test_drain_events_remaining_timeouts(self):\n+ class Connection(object):\n def drain_events(self, timeout=None):\n pass\n \n",
101
  "problem_statement": "CELERY_RESULT_SERIALIZER = 'json' breaks Exception marshaling\nSetting `CELERY_RESULT_SERIALIZER = json` and raising an exception in the worker leads to this:\n\n```\n/path/to/lib/python2.7/site-packages/celery/result.py in get(self, timeout, propagate, interval, no_ack, follow_parents, EXCEPTION_STATES, PROPAGATE_STATES)\n 173 status = meta['status']\n 174 if status in PROPAGATE_STATES and propagate:\n--> 175 raise meta['result']\n 176 return meta['result']\n 177 wait = get # deprecated alias to :meth:`get`.\n\nTypeError: exceptions must be old-style classes or derived from BaseException, not dict\n```\n\nwhere the contents of `meta['result']` are (in my case):\n\n```\n{u'exc_message': u'unknown keys: nam', u'exc_type': u'ValueError'}\n```\n\nso it _looks_ like celery could convert the dict to a real exception before raising, but it does not currently. Changing back to `pickle` works as expected.\n\nbug can be reproduced with the following:\n\n``` python\n# jsonresults.py\nfrom celery.app.base import Celery\n\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_RESULT_BACKEND = 'amqp'\n\napp = Celery(config_source=__name__)\n\[email protected]\ndef hello():\n raise ValueError('go away')\n```\n\nworker:\n\n```\n# python -m celery --app=jsonresults:app worker\n```\n\ncaller:\n\n``` python\nimport jsonresults\njsonresults.hello.delay().get()\n```\n\n",
102
  "hints_text": "This is biting me as well. Any news?\n",
103
- "created_at": "2015-04-29T14:52:17Z"
 
104
  }
105
  ]
 
11
  "test_patch": "diff --git a/celery/tests/worker/test_request.py b/celery/tests/worker/test_request.py\n--- a/celery/tests/worker/test_request.py\n+++ b/celery/tests/worker/test_request.py\n@@ -325,6 +325,20 @@ def test_on_failure_Reject_rejects_with_requeue(self):\n req_logger, req.connection_errors, True,\n )\n \n+ def test_on_failure_WrokerLostError_rejects_with_requeue(self):\n+ einfo = None\n+ try:\n+ raise WorkerLostError()\n+ except:\n+ einfo = ExceptionInfo(internal=True)\n+ req = self.get_request(self.add.s(2, 2))\n+ req.task.acks_late = True\n+ req.task.reject_on_worker_lost = True\n+ req.delivery_info['redelivered'] = False\n+ req.on_failure(einfo)\n+ req.on_reject.assert_called_with(req_logger,\n+ req.connection_errors, True)\n+\n def test_tzlocal_is_cached(self):\n req = self.get_request(self.add.s(2, 2))\n req._tzlocal = 'foo'\n",
12
  "problem_statement": "Message being acknowledged on WorkerLostError when CELERY_ACKS_LATE=True\nWhen using celery v3.0.24, with `CELERY_ACKS_LATE = True` , if the OOM killer kills the celery worker, then the worker acknowledges the message.\nAs per [this](https://github.com/celery/celery/commit/e810420c) commit. The `exc_info.internal` comes in as `false`, which means it is not a internal error, due to which the message is acknowledged.\nThe desirable behaviour, in such a case would be to not acknowledge the message (and be able to know, whether its a OOM error), so that some other worker can pick it up. \nAs a workaround, I've commented out the [code](https://github.com/siddharth96/celery/commit/427695d1b23034dadda85fd7a48f7367831be4fa), where celery acknowledges the message, because in such a case, message will be lost.\n\n",
13
  "hints_text": "This is deliberate as if a task is killed it may mean that the next invocation will also cause the same to happen. If the task is redelivered it may cause a loop where the same conditions occur again and again. Also, sadly you cannot distinguish processes killed by OOM from processes killed by other means, and if an administrator kills -9 a task going amok, you usually don't want that task to be called again.\n\nThere could be a configuration option for not acking terminated tasks, but I'm not sure how useful that would be.\nA better solution could be to use `basic_reject(requeue=False)` instead of `basic_ack`, that way you can configure\na dead letter queue so that the killed tasks will be sent to a queue for manual inspection.\n\nI must say, regardless of the status of this feature request, the documentation is misleading. Specifically, [this FAQ makes it seem that process failures would NOT acknowledge messages](http://celery.readthedocs.org/en/latest/faq.html#faq-acks-late-vs-retry). And [this FAQ boldface states](http://celery.readthedocs.org/en/latest/faq.html#id54) that in the event of a kill signal (9), that acks_late will allow the task to re-run (which again, is patently wrong based on this poorly documented behavior). Nowhere in the docs have I found that if the process _dies_, the message will be acknowledged, regardless of acks_late or not. (for instance, I have a set of 10k+ tasks, and some 1% of tasks wind up acknowledged but incomplete when a WorkerLostError is thrown in connection with the worker, although there are no other errors of any kind in any of my logs related to that task).\n\nTL;DR at the least, appropriately document the current state when describing the functionality and limitations of acks_late. A work-around would be helpful -- I'm not sure I understand the solution of using `basic_reject`, although I'll keep looking into it.\n\nThe docs are referring to killing the worker process with KILL, not the child processes. The term worker will always refer to the worker instance, not the pool processes. The section within about acks_late is probably not very helpful and should be removed\n",
14
+ "created_at": "2015-10-06T05:34:34Z",
15
+ "version": "1.0"
16
  },
17
  {
18
  "repo": "NVIDIA/NeMo",
 
87
  "test_patch": "diff --git a/example/current_app/test_module/__init__.py b/example/current_app/test_module/__init__.py\nnew file mode 100644\ndiff --git a/example/current_app/test_module/slack_app.py b/example/current_app/test_module/slack_app.py\nnew file mode 100644\n--- /dev/null\n+++ b/example/current_app/test_module/slack_app.py\n@@ -0,0 +1,16 @@\n+# ------------------\n+# Only for running this script here\n+import logging\n+import sys\n+from os.path import dirname\n+\n+sys.path.insert(1, f\"{dirname(__file__)}/../../..\")\n+logging.basicConfig(level=logging.DEBUG)\n+# ------------------\n+\n+from flask import current_app as app\n+from slackeventsapi import SlackEventAdapter\n+import os\n+\n+slack_signing_secret = os.environ[\"SLACK_SIGNING_SECRET\"]\n+slack_events_adapter = SlackEventAdapter(slack_signing_secret, \"/slack/events\", app)\ndiff --git a/tests/test_server.py b/tests/test_server.py\n--- a/tests/test_server.py\n+++ b/tests/test_server.py\n@@ -18,7 +18,7 @@ def test_server_not_flask():\n with pytest.raises(TypeError) as e:\n invalid_flask = \"I am not a Flask\"\n SlackEventAdapter(\"SIGNING_SECRET\", \"/slack/events\", invalid_flask)\n- assert e.value.args[0] == 'Server must be an instance of Flask or Blueprint'\n+ assert e.value.args[0] == 'Server must be an instance of Flask, Blueprint, or LocalProxy'\n \n \n def test_blueprint_server():\n",
88
  "problem_statement": "Passing Flask app proxy as server\nHi Guys,\r\n\r\nI have an app factory on my setup and the app object usually it is invoked as :\r\n`from flask import current_app as app`\r\n\r\nHowever, the slackeventsapi complains about the app object : \r\n`TypeError(\"Server must be an instance of Flask\")`\r\n\r\nI have fixed adding the following to server.py : \r\n\r\n`from werkzeug.local import LocalProxy # Importing the localproxy class`\r\n\r\nLine 25 \r\n Changed from : \r\n ` if isinstance(server, Flask):`\r\n to :\r\n `if isinstance(server, Flask) or isinstance(server, LocalProxy):`\r\n\r\nBasically, if a Flask app proxy is passed the api will carry on without complaining since it has the same methods as the Flask app object.\r\n\r\nI hope this help other people and it is considered as a solution if more information is needed I am help to provide. \r\n\r\nThanks for the good work with the API.\r\n\r\n\r\n\r\n### What type of issue is this? (place an `x` in one of the `[ ]`)\r\n- [X] bug ?\r\n- [X] enhancement (feature request)\r\n- [ ] question\r\n- [ ] documentation related\r\n- [ ] testing related\r\n- [ ] discussion\r\n\r\n### Requirements\r\n* [X] I've read and understood the [Contributing guidelines](https://github.com/slackapi/python-slack-events-api/blob/master/.github/contributing.md) and have done my best effort to follow them.\r\n* [X] I've read and agree to the [Code of Conduct](https://slackhq.github.io/code-of-conduct).\r\n* [X] I've searched for any related issues and avoided creating a duplicate issue.\r\n\r\n#### Reproducible in:\r\nslackeventsapi version: slackeventsapi==2.1.0\r\npython version: Python 3.7.3\r\nOS version(s): \r\n\r\n\r\n\nPassing Flask app proxy as server\nHi Guys,\r\n\r\nI have an app factory on my setup and the app object usually it is invoked as :\r\n`from flask import current_app as app`\r\n\r\nHowever, the slackeventsapi complains about the app object : \r\n`TypeError(\"Server must be an instance of Flask\")`\r\n\r\nI have fixed adding the following to server.py : \r\n\r\n`from werkzeug.local import LocalProxy # Importing the localproxy class`\r\n\r\nLine 25 \r\n Changed from : \r\n ` if isinstance(server, Flask):`\r\n to :\r\n `if isinstance(server, Flask) or isinstance(server, LocalProxy):`\r\n\r\nBasically, if a Flask app proxy is passed the api will carry on without complaining since it has the same methods as the Flask app object.\r\n\r\nI hope this help other people and it is considered as a solution if more information is needed I am help to provide. \r\n\r\nThanks for the good work with the API.\r\n\r\n\r\n\r\n### What type of issue is this? (place an `x` in one of the `[ ]`)\r\n- [X] bug ?\r\n- [X] enhancement (feature request)\r\n- [ ] question\r\n- [ ] documentation related\r\n- [ ] testing related\r\n- [ ] discussion\r\n\r\n### Requirements\r\n* [X] I've read and understood the [Contributing guidelines](https://github.com/slackapi/python-slack-events-api/blob/master/.github/contributing.md) and have done my best effort to follow them.\r\n* [X] I've read and agree to the [Code of Conduct](https://slackhq.github.io/code-of-conduct).\r\n* [X] I've searched for any related issues and avoided creating a duplicate issue.\r\n\r\n#### Reproducible in:\r\nslackeventsapi version: slackeventsapi==2.1.0\r\npython version: Python 3.7.3\r\nOS version(s): \r\n\r\n\r\n\n",
89
  "hints_text": "\n",
90
+ "created_at": "2020-06-12T06:58:10Z",
91
+ "version": "1.0"
92
  },
93
  {
94
  "repo": "celery/celery",
 
102
  "test_patch": "diff --git a/celery/tests/backends/test_amqp.py b/celery/tests/backends/test_amqp.py\n--- a/celery/tests/backends/test_amqp.py\n+++ b/celery/tests/backends/test_amqp.py\n@@ -13,6 +13,7 @@\n from celery.backends.amqp import AMQPBackend\n from celery.exceptions import TimeoutError\n from celery.five import Empty, Queue, range\n+from celery.result import AsyncResult\n from celery.utils import uuid\n \n from celery.tests.case import (\n@@ -246,10 +247,20 @@ def test_wait_for(self):\n with self.assertRaises(TimeoutError):\n b.wait_for(tid, timeout=0.01, cache=False)\n \n- def test_drain_events_remaining_timeouts(self):\n+ def test_drain_events_decodes_exceptions_in_meta(self):\n+ tid = uuid()\n+ b = self.create_backend(serializer=\"json\")\n+ b.store_result(tid, RuntimeError(\"aap\"), states.FAILURE)\n+ result = AsyncResult(tid, backend=b)\n \n- class Connection(object):\n+ with self.assertRaises(Exception) as cm:\n+ result.get()\n \n+ self.assertEqual(cm.exception.__class__.__name__, \"RuntimeError\")\n+ self.assertEqual(str(cm.exception), \"aap\")\n+\n+ def test_drain_events_remaining_timeouts(self):\n+ class Connection(object):\n def drain_events(self, timeout=None):\n pass\n \n",
103
  "problem_statement": "CELERY_RESULT_SERIALIZER = 'json' breaks Exception marshaling\nSetting `CELERY_RESULT_SERIALIZER = json` and raising an exception in the worker leads to this:\n\n```\n/path/to/lib/python2.7/site-packages/celery/result.py in get(self, timeout, propagate, interval, no_ack, follow_parents, EXCEPTION_STATES, PROPAGATE_STATES)\n 173 status = meta['status']\n 174 if status in PROPAGATE_STATES and propagate:\n--> 175 raise meta['result']\n 176 return meta['result']\n 177 wait = get # deprecated alias to :meth:`get`.\n\nTypeError: exceptions must be old-style classes or derived from BaseException, not dict\n```\n\nwhere the contents of `meta['result']` are (in my case):\n\n```\n{u'exc_message': u'unknown keys: nam', u'exc_type': u'ValueError'}\n```\n\nso it _looks_ like celery could convert the dict to a real exception before raising, but it does not currently. Changing back to `pickle` works as expected.\n\nbug can be reproduced with the following:\n\n``` python\n# jsonresults.py\nfrom celery.app.base import Celery\n\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_RESULT_BACKEND = 'amqp'\n\napp = Celery(config_source=__name__)\n\[email protected]\ndef hello():\n raise ValueError('go away')\n```\n\nworker:\n\n```\n# python -m celery --app=jsonresults:app worker\n```\n\ncaller:\n\n``` python\nimport jsonresults\njsonresults.hello.delay().get()\n```\n\n",
104
  "hints_text": "This is biting me as well. Any news?\n",
105
+ "created_at": "2015-04-29T14:52:17Z",
106
+ "version": "1.0"
107
  }
108
  ]
train-dataset.json CHANGED
The diff for this file is too large to render. See raw diff