Posted
over 4 years
ago
by
Parijat Srivastava
My application works fine without gunicorn and eventlet, in development mode.
With the command: python3 application.py everything runs just fine.
When I run the app with gunicorn --worker-class eventlet -w 1 application:application, some
... [More]
functions don't work at all. The app still works fine, but I can't call AWS services, send email using SMTP library, or use requests library, etc. These problems occur only with eventlet/gunicorn and not with socketio.run(app).
When I run my app with gunicorn --worker-class eventlet -w 1 application:application, I get:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/gunicorn/arbiter.py", line 583, in spawn_worker
worker.init_process()
File "/usr/local/lib/python3.6/site-packages/gunicorn/workers/geventlet.py", line 99, in init_process
super().init_process()
File "/usr/local/lib/python3.6/site-packages/gunicorn/workers/base.py", line 119, in init_process
self.load_wsgi()
File "/usr/local/lib/python3.6/site-packages/gunicorn/workers/base.py", line 144, in load_wsgi
self.wsgi = self.app.wsgi()
File "/usr/local/lib/python3.6/site-packages/gunicorn/app/base.py", line 67, in wsgi
self.callable = self.load()
File "/usr/local/lib/python3.6/site-packages/gunicorn/app/wsgiapp.py", line 49, in load
return self.load_wsgiapp()
File "/usr/local/lib/python3.6/site-packages/gunicorn/app/wsgiapp.py", line 39, in load_wsgiapp
return util.import_app(self.app_uri)
File "/usr/local/lib/python3.6/site-packages/gunicorn/util.py", line 358, in import_app
mod = importlib.import_module(module)
File "/usr/lib64/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "", line 994, in _gcd_import
File "", line 971, in _find_and_load
File "", line 955, in _find_and_load_unlocked
File "", line 665, in _load_unlocked
File "", line 678, in exec_module
File "", line 219, in _call_with_frames_removed
File "/home/ec2-user/environment/cognito/application.py", line 15, in
JWKS = requests.get(JWKS_URL).json()["keys"]
File "/usr/local/lib/python3.6/site-packages/requests/api.py", line 72, in get
return request('get', url, params=params, **kwargs)
File "/usr/local/lib/python3.6/site-packages/requests/api.py", line 58, in request
return session.request(method=method, url=url, **kwargs)
File "/usr/local/lib/python3.6/site-packages/requests/sessions.py", line 508, in request
resp = self.send(prep, **send_kwargs)
File "/usr/local/lib/python3.6/site-packages/requests/sessions.py", line 618, in send
r = adapter.send(request, **kwargs)
File "/usr/local/lib/python3.6/site-packages/requests/adapters.py", line 440, in send
timeout=timeout
File "/usr/local/lib/python3.6/site-packages/urllib3/connectionpool.py", line 601, in urlopen
chunked=chunked)
File "/usr/local/lib/python3.6/site-packages/urllib3/connectionpool.py", line 346, in _make_request
self._validate_conn(conn)
File "/usr/local/lib/python3.6/site-packages/urllib3/connectionpool.py", line 850, in _validate_conn
conn.connect()
File "/usr/local/lib/python3.6/site-packages/urllib3/connection.py", line 314, in connect
cert_reqs=resolve_cert_reqs(self.cert_reqs),
File "/usr/local/lib/python3.6/site-packages/urllib3/util/ssl_.py", line 269, in create_urllib3_context
context.options |= options
File "/usr/lib64/python3.6/ssl.py", line 465, in options
super(SSLContext, SSLContext).options.__set__(self, value)
File "/usr/lib64/python3.6/ssl.py", line 465, in options
super(SSLContext, SSLContext).options.__set__(self, value)
File "/usr/lib64/python3.6/ssl.py", line 465, in options
super(SSLContext, SSLContext).options.__set__(self, value)
[Previous line repeated 317 more times]
RecursionError: maximum recursion depth exceeded while calling a Python object
In the above example I am using requests library.
When I try to send mails using SMTP Library, I get:
File "/home/vasudeva/Codes/sanskrit/application.py", line 2115, in recover
if util.sendemail(user.email, code):
File "/home/vasudeva/Codes/sanskrit/util.py", line 112, in sendemail
server = smtplib.SMTP(HOST, PORT)
File "/usr/lib/python3.7/smtplib.py", line 251, in __init__
(code, msg) = self.connect(host, port)
File "/usr/lib/python3.7/smtplib.py", line 336, in connect
self.sock = self._get_socket(host, port, self.timeout)
File "/usr/lib/python3.7/smtplib.py", line 307, in _get_socket
self.source_address)
File "/home/vasudeva/Codes/sanskrit/env/lib/python3.7/site-packages/eventlet/green/socket.py", line 44, in create_connection
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
File "/home/vasudeva/Codes/sanskrit/env/lib/python3.7/site-packages/eventlet/support/greendns.py", line 517, in getaddrinfo
qname, addrs = _getaddrinfo_lookup(host, family, flags)
File "/home/vasudeva/Codes/sanskrit/env/lib/python3.7/site-packages/eventlet/support/greendns.py", line 490, in _getaddrinfo_lookup
raise err
File "/home/vasudeva/Codes/sanskrit/env/lib/python3.7/site-packages/eventlet/support/greendns.py", line 479, in _getaddrinfo_lookup
answer = resolve(host, qfamily, False, use_network=use_network)
File "/home/vasudeva/Codes/sanskrit/env/lib/python3.7/site-packages/eventlet/support/greendns.py", line 432, in resolve
raise EAI_EAGAIN_ERROR
File "/home/vasudeva/Codes/sanskrit/env/lib/python3.7/site-packages/eventlet/support/greendns.py", line 479, in _getaddrinfo_lookup
answer = resolve(host, qfamily, False, use_network=use_network)
File "/home/vasudeva/Codes/sanskrit/env/lib/python3.7/site-packages/eventlet/support/greendns.py", line 432, in resolve
raise EAI_EAGAIN_ERROR
socket.gaierror: [Errno -3] Lookup timed out
As everything works fine without gunicorn/eventlet. So I created a simple test app without websockets. The problem is still there. App works fine with flask run command but the moment I run the app with Gunicorn command some features don't work.
[Less]
|
Posted
over 4 years
ago
by
edA-qa mort-ora-y
I want the access log for Flask, using Eventlet, to be written to stdout. (This output is handled by journalctl, or another wrapper).
I can't figure out how to enable logging though. With debug on it writes the access log to the console. With debug disabled it doesn't write it anywhere.
How do I configure the access log?
|
Posted
over 4 years
ago
by
Abhishek Soni
i am creating application with socketio, and facing trouble in slow internet connection client
i already manage lost message delivery when client is connected, but i need connection to become stable even when internet is slow
On backend
... [More]
i am using flask-socketio and configure eventlet websocket support and using redis as a message_queue but there is more reliable option which is rabbitmq so which one i use for better stability?
current socketio configurations:
socketio = SocketIO(async_mode='eventlet', logger=True, engineio_logger=True, ping_timeout=300, ping_interval=3)
socketio.init_app(app, message_queue='redis://127.0.0.1:6379/')
We have ping timeout of 5 minutes and ping interval of 5 seconds, still sometimes when the above logs appears client gets disconnect automatically,
> received event "ping" from 8339444f4dd84677b455fa025356757b [/chat]
> 8339444f4dd84677b455fa025356757b: Received packet CLOSE data None
i notice client timeout is not completed still it is disconnected from the server
is that possible client randomly disconnect on lesser ping interval?(i have 5 seconds ping interval)
or we need to do some configuration from client side ?
i am stuck with this issue from last 2 month any help/hint/findings would be appreciated!
Thanks You
[Less]
|
Posted
almost 5 years
ago
by
Dan N
We have a very high I/O bound tasks (disk writes), which made us to have a large set of workers (forked and monkey patched), with a very low pool size to distribute the requests among those workers. The requests which are coming is being
... [More]
pushed towards a particular worker, instead of round robin distribution. Is there a way to accomplish a round robin for the incoming requests?
[Less]
|
Posted
almost 5 years
ago
by
lemonlin
I'm building a flask app that displays real-time data visualizations of tweets based on a user input. Right now, the client is still hosted locally and I'm just trying to get to the point where a json of a single tweet gets emitted from
... [More]
the server to the client. This is also my first time building a flask app on my own, so I've been learning HTML/CSS/node.js/JS along the way (be gentle with me).
My problem is with the backend/server. Here is the relevant flask code:
from flask import Flask, request, render_template, session
from flask_socketio import SocketIO, emit
from time import sleep
import threading
from threading import Thread, Event, Lock
import eventlet
import json
import TweePoll_API_nostream
eventlet.monkey_patch(socket=True)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socket = SocketIO(app, async_mode=None, logger=True, engineio_logger=True)
streamer2 = TweePoll_API_nostream.getTweet()
thread = None
thread_lock = Lock()
def background_thread():
count = 0
while True:
socket.sleep(10)
x = streamer2.main(v)
count += 1
socket.emit('my_response',
{'data': x, 'count': count},
namespace='/output')
@app.route('/', methods = ['GET','POST'])
def index():
return(render_template("index.html", async_mode = socket.async_mode))
@socket.on('my_event', namespace='/output')
def test_message(message):
global v
v = message['data']
print(v)
global thread
with thread_lock:
if thread is None:
thread = socket.start_background_task(background_thread)
emit('my_response', {'data': v, 'count': 0})
if __name__ == '__main__':
socket.run(app, port = 5000, debug = True)
The error occurs on x = streamer2.main(v) in the background_thread() function. Here is the relevant code for the tweepy streamer:
import tweepy
import config
import TweePoll_NLP_single
import json
MLObj = TweePoll_NLP_single.ReadyForSQL()
class getTweet(object):
def __init__(self):
self.auth = tweepy.auth.OAuthHandler(config.ckey, config.csecret)
self.auth.set_access_token(config.atoken, config.asecret)
self.api = tweepy.API(self.auth)
def main(self, keywrd):
tweet = str()
user = str()
time = str()
status = self.api.search(q = [keywrd + " @realDonaldTrump", keywrd + " @JoeBiden"], count = 1, result_type = "recent", lang = 'en')[0]
if hasattr(status, "retweeted_status"): # Check if Retweet
try:
tweet = status.retweeted_status.extended_tweet["full_text"]
except AttributeError:
tweet = status.retweeted_status.text
else:
try:
tweet = status.extended_tweet["full_text"]
except AttributeError:
tweet = status.text
user = status.author.screen_name
time = str(status.created_at)
prez = MLObj.whichPrez(tweet)
clean = MLObj.kleenex(tweet)
NRCemo = MLObj.NRCEmo(clean)
printDF = json.dumps({"tweet":tweet, "clean":clean,
"prez":prez, "NRCEmo":NRCemo, "time":time, "username":user})
return(printDF)
This code isn't reproducible, because it connects to another file for the NLP/sentiment tagging, plus the frontend isn't included. Let me know if I need to send a link to my github or something to fix this.
Here is the error output:
Exception in thread Thread-6:
Traceback (most recent call last):
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\tweepy\binder.py", line 183, in execute
resp = self.session.request(self.method,
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\sessions.py", line 530, in request
resp = self.send(prep, **send_kwargs)
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\sessions.py", line 643, in send
r = adapter.send(request, **kwargs)
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\adapters.py", line 439, in send
resp = conn.urlopen(
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 670, in urlopen
httplib_response = self._make_request(
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 381, in _make_request
self._validate_conn(conn)
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 976, in _validate_conn
conn.connect()
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connection.py", line 342, in connect
self.ssl_context = create_urllib3_context(
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\util\ssl_.py", line 276, in create_urllib3_context
context.options |= options
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
[Previous line repeated 481 more times]
RecursionError: maximum recursion depth exceeded
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\swagj\Documents\GitHub\TweePoll\TweePoll_Server_V2.py", line 28, in background_thread
x = streamer2.main(v)
File "C:\Users\swagj\Documents\GitHub\TweePoll\TweePoll_API_nostream.py", line 21, in main
status = self.api.search(q = [keywrd + " @realDonaldTrump", keywrd + " @JoeBiden"], count = 1, result_type = "recent", lang = 'en')[0]
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\tweepy\binder.py", line 250, in _call
return method.execute()
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\tweepy\binder.py", line 191, in execute
six.reraise(TweepError, TweepError('Failed to send request: %s' % e), sys.exc_info()[2])
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\six.py", line 702, in reraise
raise value.with_traceback(tb)
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\tweepy\binder.py", line 183, in execute
resp = self.session.request(self.method,
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\sessions.py", line 530, in request
resp = self.send(prep, **send_kwargs)
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\sessions.py", line 643, in send
r = adapter.send(request, **kwargs)
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\adapters.py", line 439, in send
resp = conn.urlopen(
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 670, in urlopen
httplib_response = self._make_request(
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 381, in _make_request
self._validate_conn(conn)
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 976, in _validate_conn
conn.connect()
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connection.py", line 342, in connect
self.ssl_context = create_urllib3_context(
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\util\ssl_.py", line 276, in create_urllib3_context
context.options |= options
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
File "C:\Users\swagj\AppData\Local\Programs\Python\Python38\lib\ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
[Previous line repeated 481 more times]
tweepy.error.TweepError: Failed to send request: maximum recursion depth exceeded
Final dump, here is my info:
Windows 10
python = 3.8.3
eventlet = 0.25.2
tweepy = 3.8.0
websocket-client = 0.57.0
websockets = 8.1
Flask = 1.1.2
Flask-SocketIO = 4.3.0
I have no idea why this doesn't work, but I have a feeling it has to do with eventlet. Any clues? Happy to give more info if needed :)
[Less]
|
Posted
almost 5 years
ago
by
Daniel Greenberg
I'm basing my app on the Flask-MQTT example and it's generally working fine, except that when I run the program it runs twice and creates two instances of the MQTT client class.
Of course I set "use_reloader" to "False". The background
... [More]
process (and the rest of the code after "if name == 'main':") only runs once. It seems like the only thing being doubled is the MQTT client, which proceeds to connect twice from two separate instances. Then it subscribes twice, and gets every message twice. No bueno.
Is there a good way to make sure the program is only being run once (I assume the "mqtt = Mqtt(app)" line is being run twice), or at least prevent two clients from being created?
image of heroku logs showing double client connection (and two workers?)
import eventlet
from flask import Flask, render_template, current_app
from flask_mqtt import Mqtt, MQTT_LOG_ERR
from flask_socketio import SocketIO
from flask_bootstrap import Bootstrap
eventlet.monkey_patch()
import json
from flask_cors import CORS
from threading import Lock
import os
import time
from dotenv import load_dotenv
import message_handler
import ui_data
import background_processes
load_dotenv(verbose=True)
app = Flask(__name__, template_folder='./templates')
app.config['SECRET'] = 'my secret key'
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.config['MQTT_BROKER_URL'] = os.environ.get('CLOUDMQTT_URL')
app.config['MQTT_USERNAME'] = os.environ.get('CLOUDMQTT_USERNAME')
app.config['MQTT_PASSWORD'] = os.environ.get('CLOUDMQTT_PW')
app.config['MQTT_CLIENT_ID'] = ''
app.config['MQTT_KEEPALIVE'] = 20
app.config['MQTT_CLEAN_SESSION'] = True
app.config['MQTT_TLS_ENABLED'] = True
app.config['MQTT_BROKER_PORT'] = int(os.environ.get('CLOUDMQTT_SSL_PORT'))
app.config['MQTT_TLS_CA_CERTS'] = 'ca.crt'
mqtt = Mqtt(app, mqtt_logging=False)
CORS(app)
socketio = SocketIO(app, cors_allowed_origins='*', engineio_logger=False, logger=False)
bootstrap = Bootstrap(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route('/')
def index():
return render_template('index.html')
socket_background_thread = None
thread_lock = Lock()
def update_ui(app):
last_update_data = None
with app.app_context():
while True:
socketio.sleep(10)
update_data = ui_data.get_update_data()
if last_update_data is not None and update_data != last_update_data:
data = dict(
payload=update_data
)
socketio.emit('mqtt_message', data=data)
socketio.sleep(0)
last_update_data = update_data
@socketio.on('connect')
def start_notifications_thread():
global socket_background_thread
with thread_lock:
if socket_background_thread is None:
socket_background_thread = socketio.start_background_task(update_ui, current_app._get_current_object())
@mqtt.on_connect()
def handle_connect(client, userdata, flags, rc):
topic = 'test'
mqtt.subscribe(topic)
@mqtt.on_disconnect()
def handle_disconnect():
print(f"MQTT - disconnecting ")
pass
def process_message(message):
message_text = message.payload.decode()
message_time = time.time()
message_handler.parse_and_handle_message(message_text, message_time)
socketio.sleep()
@mqtt.on_message()
def handle_mqtt_message(client, userdata, message):
message_text = message.payload.decode()
print(f'received message: {message_text}')
background_thread = socketio.start_background_task(process_message, message)
if __name__ == '__main__':
background_process = socketio.start_background_task(background_processes.start_background_threads)
port = int(os.environ.get('PORT', 33507))
host = '0.0.0.0'
socketio.run(app, host=host, port=port, use_reloader=False, debug=False)
[Less]
|
Posted
almost 5 years
ago
by
Daniel Greenberg
I'm basing my app on the Flask-MQTT example and it's generally working fine, except that when I run the program it runs twice and creates two instances of the MQTT client class.
Of course I set "use_reloader" to "False". The background
... [More]
process (and the rest of the code after "if name == 'main':") only runs once. It seems like the only thing being doubled is the MQTT client, which proceeds to connect twice from two separate instances. Then it subscribes twice, and gets every message twice. No bueno.
Is there a good way to make sure the program is only being run once (I assume the "mqtt = Mqtt(app)" line is being run twice), or at least prevent two clients from being created?
image of heroku logs showing double client connection (and two workers?)
import eventlet
from flask import Flask, render_template, current_app
from flask_mqtt import Mqtt, MQTT_LOG_ERR
from flask_socketio import SocketIO
from flask_bootstrap import Bootstrap
eventlet.monkey_patch()
import json
from flask_cors import CORS
from threading import Lock
import os
import time
from dotenv import load_dotenv
import message_handler
import ui_data
import background_processes
load_dotenv(verbose=True)
app = Flask(__name__, template_folder='./templates')
app.config['SECRET'] = 'my secret key'
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.config['MQTT_BROKER_URL'] = os.environ.get('CLOUDMQTT_URL')
app.config['MQTT_USERNAME'] = os.environ.get('CLOUDMQTT_USERNAME')
app.config['MQTT_PASSWORD'] = os.environ.get('CLOUDMQTT_PW')
app.config['MQTT_CLIENT_ID'] = ''
app.config['MQTT_KEEPALIVE'] = 20
app.config['MQTT_CLEAN_SESSION'] = True
app.config['MQTT_TLS_ENABLED'] = True
app.config['MQTT_BROKER_PORT'] = int(os.environ.get('CLOUDMQTT_SSL_PORT'))
app.config['MQTT_TLS_CA_CERTS'] = 'ca.crt'
mqtt = Mqtt(app, mqtt_logging=False)
CORS(app)
socketio = SocketIO(app, cors_allowed_origins='*', engineio_logger=False, logger=False)
bootstrap = Bootstrap(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route('/')
def index():
return render_template('index.html')
socket_background_thread = None
thread_lock = Lock()
def update_ui(app):
last_update_data = None
with app.app_context():
while True:
socketio.sleep(10)
update_data = ui_data.get_update_data()
if last_update_data is not None and update_data != last_update_data:
data = dict(
payload=update_data
)
socketio.emit('mqtt_message', data=data)
socketio.sleep(0)
last_update_data = update_data
@socketio.on('connect')
def start_notifications_thread():
global socket_background_thread
with thread_lock:
if socket_background_thread is None:
socket_background_thread = socketio.start_background_task(update_ui, current_app._get_current_object())
@mqtt.on_connect()
def handle_connect(client, userdata, flags, rc):
topic = 'test'
mqtt.subscribe(topic)
@mqtt.on_disconnect()
def handle_disconnect():
print(f"MQTT - disconnecting ")
pass
def process_message(message):
message_text = message.payload.decode()
message_time = time.time()
message_handler.parse_and_handle_message(message_text, message_time)
socketio.sleep()
@mqtt.on_message()
def handle_mqtt_message(client, userdata, message):
message_text = message.payload.decode()
print(f'received message: {message_text}')
background_thread = socketio.start_background_task(process_message, message)
if __name__ == '__main__':
background_process = socketio.start_background_task(background_processes.start_background_threads)
port = int(os.environ.get('PORT', 33507))
host = '0.0.0.0'
socketio.run(app, host=host, port=port, use_reloader=False, debug=False)
[Less]
|
Posted
almost 5 years
ago
by
notacorn
I've written a Flask app that runs perfectly fine - exactly as I want it to, when using Flask's development server via flask run. It's a long web-scraping process using a lot of Google Cloud libraries.
After deploying to Google App
... [More]
Engine, I figured out that I had to wrap my Flask api with gunicorn. Okay, no problem, I installed it locally and ran it in the same way I did before. But suddenly, now, I am getting a completely new error which I have no idea how to debug - here is the stack trace:
[2020-07-20 05:26:45 -0400] [7354] [INFO] Starting gunicorn 20.0.4
[2020-07-20 05:26:45 -0400] [7354] [INFO] Listening at: http://127.0.0.1:8000 (7354)
[2020-07-20 05:26:45 -0400] [7354] [INFO] Using worker: eventlet
[2020-07-20 05:26:45 -0400] [7356] [INFO] Booting worker with pid: 7356
WARNING:root:course-collect manually triggered
ERROR:grpc._plugin_wrapping:AuthMetadataPluginCallback "" raised exception!
Traceback (most recent call last):
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/grpc/_plugin_wrapping.py", line 77, in __call__
self._metadata_plugin(
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/google/auth/transport/grpc.py", line 84, in __call__
callback(self._get_authorization_headers(context), None)
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/google/auth/transport/grpc.py", line 70, in _get_authorization_headers
self._credentials.before_request(
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/google/auth/credentials.py", line 133, in before_request
self.refresh(request)
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/google/oauth2/service_account.py", line 359, in refresh
access_token, expiry, _ = _client.jwt_grant(request, self._token_uri, assertion)
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/google/oauth2/_client.py", line 153, in jwt_grant
response_data = _token_endpoint_request(request, token_uri, body)
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/google/oauth2/_client.py", line 105, in _token_endpoint_request
response = request(method="POST", url=token_uri, headers=headers, body=body)
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/google/auth/transport/requests.py", line 180, in __call__
response = self.session.request(
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/requests/sessions.py", line 530, in request
resp = self.send(prep, **send_kwargs)
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/requests/sessions.py", line 643, in send
r = adapter.send(request, **kwargs)
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/requests/adapters.py", line 439, in send
resp = conn.urlopen(
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 670, in urlopen
httplib_response = self._make_request(
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 381, in _make_request
self._validate_conn(conn)
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 976, in _validate_conn
conn.connect()
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/urllib3/connection.py", line 342, in connect
self.ssl_context = create_urllib3_context(
File "/mnt/c/Users/*******/Projects/course_collect/venv/lib/python3.8/site-packages/urllib3/util/ssl_.py", line 276, in create_urllib3_context
context.options |= options
File "/usr/lib/python3.8/ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
File "/usr/lib/python3.8/ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
File "/usr/lib/python3.8/ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
[Previous line repeated 476 more times]
RecursionError: maximum recursion depth exceeded while calling a Python object
malloc(): mismatching next->prev_size (unsorted)
[2020-07-20 05:27:29 -0400] [7361] [INFO] Booting worker with pid: 7361
I'm currently running my app with the command gunicorn --worker-class eventlet app:app (with app.py and app = Flask(__name__)).
When I switch to just gunicorn app:app gunicorn runs the same as Flask.
But the problem is, knowing that this api's endpoints take an "undefined amount of time", make "blocking calls", and otherwise requests'ing a lot of webpages, my app seems like the cookie cutter case for using async (eventlet/gevent) workers.
Choosing a Worker Type
The default synchronous workers assume that your application is resource-bound in terms of CPU and network bandwidth. Generally this means that your application shouldn’t do anything that takes an undefined amount of time. An example of something that takes an undefined amount of time is a request to the internet. At some point the external network will fail in such a way that clients will pile up on your servers. So, in this sense, any web application which makes outgoing requests to APIs will benefit from an asynchronous worker.
This resource bound assumption is why we require a buffering proxy in front of a default configuration Gunicorn. If you exposed synchronous workers to the internet, a DOS attack would be trivial by creating a load that trickles data to the servers. For the curious, Hey is an example of this type of load.
Some examples of behavior requiring asynchronous workers:
Applications making long blocking calls (Ie, external web services)
Serving requests directly to the internet
Streaming requests and responses
Long polling
Web sockets
Comet
Can someone point out the reason (if it's somewhat obvious) why async workers break my application (pasted below)
app.py
from flask import Flask
import logging
from firebase_admin import firestore, _apps, initialize_app, credentials
from google.cloud.storage import Client
from google.cloud.scheduler_v1 import CloudSchedulerClient
from google.api_core.exceptions import NotFound, GoogleAPICallError, PermissionDenied
app = Flask(__name__)
@app.route('/init')
def start_process():
start_time = time()
storage_client = Client()
scheduler_client = CloudSchedulerClient()
scheduler_path = scheduler_client.location_path(config.PROJECT_ID, config.REGION_ID)
cred = credentials.ApplicationDefault()
try:
scheduler_client.delete_job(f"{scheduler_path}/jobs/{config.CRON_NAME}")
except GoogleAPICallError or PermissionDenied:
logging.warning("course-collect manually triggered")
# I had more code here but even all commented out, this error still happened
return "200 OK"
[Less]
|
Posted
almost 5 years
ago
by
Prutheus
I spawn an Eventlet Thread. In the spawned function, there are normal threads spawned. When I kill the eventlet thread, I want all threads of that execution be stopped/killed, how is it possible?
evt = eventlet.spawn(funcX)
# [...]
#
... [More]
inside funcX
threading.Thread(....)
# .. later ...
evt.kill()
# but: the Thread spawned in funcX is still running ... :(
How to solve this? any ideas?
If you do not understand, please add a comment so I know what I must explain more detailed ... would be nice if someone could help me. What I need to do is: Kill a eventlet Thread and all its child threads ... :(
[Less]
|
Posted
almost 5 years
ago
by
mm360x
I am using Python 3.8.3
My program is a chat application using python-socketio with a redis queue using eventlet server in a docker container and load balanced with nginx
The program works fine on my local computer
But when I try to
... [More]
run in an an AWS EC2 linux instance, the pyfcm library when trying to send a Push Notification does not work and I get an ssl error.
I am using eventlet.monkeypatch(socket=True) at the start of the program
This is the error
Exception in thread Thread-8:
container | Traceback (most recent call last):
container | File "/usr/local/lib/python3.8/threading.py", line 932, in _bootstrap_inner
container | self.run()
container | File "/usr/local/lib/python3.8/threading.py", line 870, in run
container | self._target(*self._args, **self._kwargs)
container | File "/usr/local/lib/python3.8/site-packages/socketio/server.py", line 679, in _handle_event_internal
container | r = server._trigger_event(data[0], namespace, sid, *data[1:])
container | File "/usr/local/lib/python3.8/site-packages/socketio/server.py", line 708, in _trigger_event
container | return self.handlers[namespace][event](*args)
container | File "server.py", line 37, in send_message_event
container | result = push_service.notify_single_device(registration_id=mid['apns_result']['registration_id'], message_title=mid['apns_result']['message_title'], message_body=mid['apns_result']['message_body'])
container | File "/usr/local/lib/python3.8/site-packages/pyfcm/fcm.py", line 116, in notify_single_device
container | self.send_request([payload], timeout)
container | File "/usr/local/lib/python3.8/site-packages/pyfcm/baseapi.py", line 312, in send_request
container | response = self.do_request(payload, timeout)
container | File "/usr/local/lib/python3.8/site-packages/pyfcm/baseapi.py", line 302, in do_request
container | response = self.requests_session.post(self.FCM_END_POINT, data=payload, timeout=timeout)
container | File "/usr/local/lib/python3.8/site-packages/requests/sessions.py", line 578, in post
container | return self.request('POST', url, data=data, json=json, **kwargs)
container | File "/usr/local/lib/python3.8/site-packages/requests/sessions.py", line 530, in request
container | resp = self.send(prep, **send_kwargs)
container | File "/usr/local/lib/python3.8/site-packages/requests/sessions.py", line 643, in send
container | r = adapter.send(request, **kwargs)
container | File "/usr/local/lib/python3.8/site-packages/requests/adapters.py", line 439, in send
container | resp = conn.urlopen(
container | File "/usr/local/lib/python3.8/site-packages/urllib3/connectionpool.py", line 670, in urlopen
container | httplib_response = self._make_request(
container | File "/usr/local/lib/python3.8/site-packages/urllib3/connectionpool.py", line 381, in _make_request
container | self._validate_conn(conn)
container | File "/usr/local/lib/python3.8/site-packages/urllib3/connectionpool.py", line 976, in _validate_conn
container | conn.connect()
container | File "/usr/local/lib/python3.8/site-packages/urllib3/connection.py", line 342, in connect
container | self.ssl_context = create_urllib3_context(
container | File "/usr/local/lib/python3.8/site-packages/urllib3/util/ssl_.py", line 276, in create_urllib3_context
container | context.options |= options
container | File "/usr/local/lib/python3.8/ssl.py", line 602, in options
container | super(SSLContext, SSLContext).options.__set__(self, value)
container | File "/usr/local/lib/python3.8/ssl.py", line 602, in options
container | super(SSLContext, SSLContext).options.__set__(self, value)
container | File "/usr/local/lib/python3.8/ssl.py", line 602, in options
container | super(SSLContext, SSLContext).options.__set__(self, value)
container | [Previous line repeated 481 more times]
container | RecursionError: maximum recursion depth exceeded
My Program, I have tried monkeypatching() before any other imports but then my websocket connections do not work as well as my databse connection times out. I have also tried the pyfcm import before and after the monkeypatch but nothing seems to work
import eventlet, socketio, save_history, pyfcm
from pyfcm import FCMNotification
eventlet.monkey_patch(socket= True)
#sio = socketio.Server()
mgr = socketio.RedisManager('redis://redis:PORT/0')
sio = socketio.Server(client_manager=mgr, async_mode='eventlet', logger=True)
app = socketio.WSGIApp(sio)
# Join Room when client comes online
@sio.on('subscribe')
def join_room_event(sid,data):
user = data['user_id']
channel = data['channel']
if save_history.check_user_channel(channel, user):
sio.enter_room(sid,channel)
sio.logger.log (level=20,msg="User " + str(user) + " in channel: " + str(channel) + " Joined and Verified with SID " + str(sid))
return True
else:
sio.logger.log (level=20,msg="User " + str(user) + " NOT in channel: " + str(channel) + " with SID " + str(sid))
return False
@sio.on('send_message')
def send_message_event(sid, data):
channel_id = data['channel']
user = data['user_id']
message = data['message']
if user and channel_id and message:
mid = save_history.save_message(channel_id, int(user), message)
data['message_id'] = mid['message_id']
data['time_sent'] = mid['time_sent']
sio.logger.log (level=20,msg='{Message: ' + message + ' with MID: ' +str(mid)+ ' sent by ' + str(user) + ' in channel ' + str(channel_id) + '}')
if mid['apns_result'] != 'no_apns':
push_service = FCMNotification(api_key='')
result = push_service.notify_single_device(registration_id=mid['apns_result']['registration_id'], message_title=mid['apns_result']['message_title'], message_body=mid['apns_result']['message_body'])
sio.logger.log (level=20,msg=result)
sio.emit('receive_message', data, room=channel_id)
return data
else:
sio.emit('receive_message', { 'message' : "All params not present" } , room=channel_id)
return { 'message' : "All params not present" }
Any solutions will be greatly appreciated! Thank you
[Less]
|