cassandra.cluster.NoHostAvailable unknown exception









up vote
1
down vote

favorite












Below is my python code I read the keys from a CSV file and delete them in the database.It's running fine for a while and throwing me this timeout error. I don't see any GC issue and health of the node is working fine.



Traceback (most recent call last):
File "/Users/XXX/Downloads/XXX/XXX", line 65, in <module>
parse_file(datafile)
File "/Users/XXX/Downloads/XXX/XXX", line 49, in parse_file
session = cluster.connect('XXX')
File "cassandra/cluster.py", line 1193, in cassandra.cluster.Cluster.connect (cassandra/cluster.c:17796)
File "cassandra/cluster.py", line 1240, in cassandra.cluster.Cluster._new_session (cassandra/cluster.c:18952)
File "cassandra/cluster.py", line 1980, in cassandra.cluster.Session.__init__ (cassandra/cluster.c:35191)
cassandra.cluster.NoHostAvailable: ("Unable to connect to any servers using keyspace 'qualys_ioc'", ['127.0.0.1'])


Python Code:



import argparse
import sys
import itertools
import codecs
import uuid
import os
import subprocess

try:
import cassandra
import cassandra.concurrent
except ImportError:
sys.exit('Python Cassandra driver not installed. You might try "pip install cassandra-driver".')
from cassandra.cluster import Cluster, ResultSet, Session
from cassandra.policies import DCAwareRoundRobinPolicy
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import ConsistencyLevel
from cassandra import ReadTimeout

datafile = "/Users/XXX/adf.csv"

if os.path.exists(datafile):
os.remove(datafile)

def dumptableascsv():
os.system(
"sh /Users/XXX/Documents/dse-5.0.14/bin/cqlsh 127.0.0.1 9042 -u cassandra -p cassandra -e " COPY XXX.agent_delta_fragment(agent_id,delta_id ,last_fragment_id ,processed) TO '/Users/XXX/adf.csv' WITH HEADER = true;"n"
" ")
#print datafile



def parse_file(datafile):
global fields
data =
with open(datafile, "rb") as f:
header = f.readline().split(",")
# Loop through remaining lines in file object f
for line in f:
fields = line.split(",") # Split line into list
#print fields[3]
if fields[3]:
print "connect"
print fields[0],fields[1],fields[2],fields[3]
auth_provider = PlainTextAuthProvider(username='cassandra', password='cassandra')
cluster = Cluster(['127.0.0.1'],
load_balancing_policy=DCAwareRoundRobinPolicy(local_dc='Cassandra'),
port=9042, auth_provider=auth_provider, connect_timeout=10000,)
session = cluster.connect('XXX')
#session = cluster.connect('XXX')
# session.execute("select * from XXX.agent_delta_fragment LIMIT 1")
#rows = session.execute('select agent_id from XXX.agent_delta_fragment LIMIT 1')
#for row in rows:
# print row.agent_id
#batch = BatchStatement("DELETE FROM XXX.agent_delta_fragment_detail_test WHERE agent_id=%s and delta_id=%s and fragment_id=%s", (uuid.UUID(fields[0]), uuid.UUID(fields[1]), int(fields[3])))
session.execute("DELETE FROM XXX.agent_delta_fragment_detail WHERE agent_id=%s and delta_id=%s and fragment_id=%s", (uuid.UUID(fields[0]), uuid.UUID(fields[1]), int(fields[2])), timeout=1000000)
#session.execute(batch)

else:
print fields[3]
print "connect-False"
# print fields[3]

dumptableascsv()
parse_file(datafile)









share|improve this question























  • updated formatting. Please note that questions seeking debugging help ("why isn't this code working?") must include the desired behavior, a specific problem or error and the shortest code necessary to reproduce it in the question itself. Questions without a clear problem statement are not useful to other readers. See: Minimal, Complete, and Verifiable example help center
    – lucascaro
    Nov 10 at 4:39










  • You haven't provided enough information to diagnose the source of the timeout, but I'd guess it is related to tombstones as you are potentially doing a lot of deletions. You should read up on tombstones in Cassandra if you haven't already: thelastpickle.com/blog/2016/07/27/… Something else to consider: opening new sessions is very slow. You only really need one, so you can open it outside your loop rather than creating lots of new sessions.
    – Justin Cameron
    Nov 12 at 0:21














up vote
1
down vote

favorite












Below is my python code I read the keys from a CSV file and delete them in the database.It's running fine for a while and throwing me this timeout error. I don't see any GC issue and health of the node is working fine.



Traceback (most recent call last):
File "/Users/XXX/Downloads/XXX/XXX", line 65, in <module>
parse_file(datafile)
File "/Users/XXX/Downloads/XXX/XXX", line 49, in parse_file
session = cluster.connect('XXX')
File "cassandra/cluster.py", line 1193, in cassandra.cluster.Cluster.connect (cassandra/cluster.c:17796)
File "cassandra/cluster.py", line 1240, in cassandra.cluster.Cluster._new_session (cassandra/cluster.c:18952)
File "cassandra/cluster.py", line 1980, in cassandra.cluster.Session.__init__ (cassandra/cluster.c:35191)
cassandra.cluster.NoHostAvailable: ("Unable to connect to any servers using keyspace 'qualys_ioc'", ['127.0.0.1'])


Python Code:



import argparse
import sys
import itertools
import codecs
import uuid
import os
import subprocess

try:
import cassandra
import cassandra.concurrent
except ImportError:
sys.exit('Python Cassandra driver not installed. You might try "pip install cassandra-driver".')
from cassandra.cluster import Cluster, ResultSet, Session
from cassandra.policies import DCAwareRoundRobinPolicy
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import ConsistencyLevel
from cassandra import ReadTimeout

datafile = "/Users/XXX/adf.csv"

if os.path.exists(datafile):
os.remove(datafile)

def dumptableascsv():
os.system(
"sh /Users/XXX/Documents/dse-5.0.14/bin/cqlsh 127.0.0.1 9042 -u cassandra -p cassandra -e " COPY XXX.agent_delta_fragment(agent_id,delta_id ,last_fragment_id ,processed) TO '/Users/XXX/adf.csv' WITH HEADER = true;"n"
" ")
#print datafile



def parse_file(datafile):
global fields
data =
with open(datafile, "rb") as f:
header = f.readline().split(",")
# Loop through remaining lines in file object f
for line in f:
fields = line.split(",") # Split line into list
#print fields[3]
if fields[3]:
print "connect"
print fields[0],fields[1],fields[2],fields[3]
auth_provider = PlainTextAuthProvider(username='cassandra', password='cassandra')
cluster = Cluster(['127.0.0.1'],
load_balancing_policy=DCAwareRoundRobinPolicy(local_dc='Cassandra'),
port=9042, auth_provider=auth_provider, connect_timeout=10000,)
session = cluster.connect('XXX')
#session = cluster.connect('XXX')
# session.execute("select * from XXX.agent_delta_fragment LIMIT 1")
#rows = session.execute('select agent_id from XXX.agent_delta_fragment LIMIT 1')
#for row in rows:
# print row.agent_id
#batch = BatchStatement("DELETE FROM XXX.agent_delta_fragment_detail_test WHERE agent_id=%s and delta_id=%s and fragment_id=%s", (uuid.UUID(fields[0]), uuid.UUID(fields[1]), int(fields[3])))
session.execute("DELETE FROM XXX.agent_delta_fragment_detail WHERE agent_id=%s and delta_id=%s and fragment_id=%s", (uuid.UUID(fields[0]), uuid.UUID(fields[1]), int(fields[2])), timeout=1000000)
#session.execute(batch)

else:
print fields[3]
print "connect-False"
# print fields[3]

dumptableascsv()
parse_file(datafile)









share|improve this question























  • updated formatting. Please note that questions seeking debugging help ("why isn't this code working?") must include the desired behavior, a specific problem or error and the shortest code necessary to reproduce it in the question itself. Questions without a clear problem statement are not useful to other readers. See: Minimal, Complete, and Verifiable example help center
    – lucascaro
    Nov 10 at 4:39










  • You haven't provided enough information to diagnose the source of the timeout, but I'd guess it is related to tombstones as you are potentially doing a lot of deletions. You should read up on tombstones in Cassandra if you haven't already: thelastpickle.com/blog/2016/07/27/… Something else to consider: opening new sessions is very slow. You only really need one, so you can open it outside your loop rather than creating lots of new sessions.
    – Justin Cameron
    Nov 12 at 0:21












up vote
1
down vote

favorite









up vote
1
down vote

favorite











Below is my python code I read the keys from a CSV file and delete them in the database.It's running fine for a while and throwing me this timeout error. I don't see any GC issue and health of the node is working fine.



Traceback (most recent call last):
File "/Users/XXX/Downloads/XXX/XXX", line 65, in <module>
parse_file(datafile)
File "/Users/XXX/Downloads/XXX/XXX", line 49, in parse_file
session = cluster.connect('XXX')
File "cassandra/cluster.py", line 1193, in cassandra.cluster.Cluster.connect (cassandra/cluster.c:17796)
File "cassandra/cluster.py", line 1240, in cassandra.cluster.Cluster._new_session (cassandra/cluster.c:18952)
File "cassandra/cluster.py", line 1980, in cassandra.cluster.Session.__init__ (cassandra/cluster.c:35191)
cassandra.cluster.NoHostAvailable: ("Unable to connect to any servers using keyspace 'qualys_ioc'", ['127.0.0.1'])


Python Code:



import argparse
import sys
import itertools
import codecs
import uuid
import os
import subprocess

try:
import cassandra
import cassandra.concurrent
except ImportError:
sys.exit('Python Cassandra driver not installed. You might try "pip install cassandra-driver".')
from cassandra.cluster import Cluster, ResultSet, Session
from cassandra.policies import DCAwareRoundRobinPolicy
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import ConsistencyLevel
from cassandra import ReadTimeout

datafile = "/Users/XXX/adf.csv"

if os.path.exists(datafile):
os.remove(datafile)

def dumptableascsv():
os.system(
"sh /Users/XXX/Documents/dse-5.0.14/bin/cqlsh 127.0.0.1 9042 -u cassandra -p cassandra -e " COPY XXX.agent_delta_fragment(agent_id,delta_id ,last_fragment_id ,processed) TO '/Users/XXX/adf.csv' WITH HEADER = true;"n"
" ")
#print datafile



def parse_file(datafile):
global fields
data =
with open(datafile, "rb") as f:
header = f.readline().split(",")
# Loop through remaining lines in file object f
for line in f:
fields = line.split(",") # Split line into list
#print fields[3]
if fields[3]:
print "connect"
print fields[0],fields[1],fields[2],fields[3]
auth_provider = PlainTextAuthProvider(username='cassandra', password='cassandra')
cluster = Cluster(['127.0.0.1'],
load_balancing_policy=DCAwareRoundRobinPolicy(local_dc='Cassandra'),
port=9042, auth_provider=auth_provider, connect_timeout=10000,)
session = cluster.connect('XXX')
#session = cluster.connect('XXX')
# session.execute("select * from XXX.agent_delta_fragment LIMIT 1")
#rows = session.execute('select agent_id from XXX.agent_delta_fragment LIMIT 1')
#for row in rows:
# print row.agent_id
#batch = BatchStatement("DELETE FROM XXX.agent_delta_fragment_detail_test WHERE agent_id=%s and delta_id=%s and fragment_id=%s", (uuid.UUID(fields[0]), uuid.UUID(fields[1]), int(fields[3])))
session.execute("DELETE FROM XXX.agent_delta_fragment_detail WHERE agent_id=%s and delta_id=%s and fragment_id=%s", (uuid.UUID(fields[0]), uuid.UUID(fields[1]), int(fields[2])), timeout=1000000)
#session.execute(batch)

else:
print fields[3]
print "connect-False"
# print fields[3]

dumptableascsv()
parse_file(datafile)









share|improve this question















Below is my python code I read the keys from a CSV file and delete them in the database.It's running fine for a while and throwing me this timeout error. I don't see any GC issue and health of the node is working fine.



Traceback (most recent call last):
File "/Users/XXX/Downloads/XXX/XXX", line 65, in <module>
parse_file(datafile)
File "/Users/XXX/Downloads/XXX/XXX", line 49, in parse_file
session = cluster.connect('XXX')
File "cassandra/cluster.py", line 1193, in cassandra.cluster.Cluster.connect (cassandra/cluster.c:17796)
File "cassandra/cluster.py", line 1240, in cassandra.cluster.Cluster._new_session (cassandra/cluster.c:18952)
File "cassandra/cluster.py", line 1980, in cassandra.cluster.Session.__init__ (cassandra/cluster.c:35191)
cassandra.cluster.NoHostAvailable: ("Unable to connect to any servers using keyspace 'qualys_ioc'", ['127.0.0.1'])


Python Code:



import argparse
import sys
import itertools
import codecs
import uuid
import os
import subprocess

try:
import cassandra
import cassandra.concurrent
except ImportError:
sys.exit('Python Cassandra driver not installed. You might try "pip install cassandra-driver".')
from cassandra.cluster import Cluster, ResultSet, Session
from cassandra.policies import DCAwareRoundRobinPolicy
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import ConsistencyLevel
from cassandra import ReadTimeout

datafile = "/Users/XXX/adf.csv"

if os.path.exists(datafile):
os.remove(datafile)

def dumptableascsv():
os.system(
"sh /Users/XXX/Documents/dse-5.0.14/bin/cqlsh 127.0.0.1 9042 -u cassandra -p cassandra -e " COPY XXX.agent_delta_fragment(agent_id,delta_id ,last_fragment_id ,processed) TO '/Users/XXX/adf.csv' WITH HEADER = true;"n"
" ")
#print datafile



def parse_file(datafile):
global fields
data =
with open(datafile, "rb") as f:
header = f.readline().split(",")
# Loop through remaining lines in file object f
for line in f:
fields = line.split(",") # Split line into list
#print fields[3]
if fields[3]:
print "connect"
print fields[0],fields[1],fields[2],fields[3]
auth_provider = PlainTextAuthProvider(username='cassandra', password='cassandra')
cluster = Cluster(['127.0.0.1'],
load_balancing_policy=DCAwareRoundRobinPolicy(local_dc='Cassandra'),
port=9042, auth_provider=auth_provider, connect_timeout=10000,)
session = cluster.connect('XXX')
#session = cluster.connect('XXX')
# session.execute("select * from XXX.agent_delta_fragment LIMIT 1")
#rows = session.execute('select agent_id from XXX.agent_delta_fragment LIMIT 1')
#for row in rows:
# print row.agent_id
#batch = BatchStatement("DELETE FROM XXX.agent_delta_fragment_detail_test WHERE agent_id=%s and delta_id=%s and fragment_id=%s", (uuid.UUID(fields[0]), uuid.UUID(fields[1]), int(fields[3])))
session.execute("DELETE FROM XXX.agent_delta_fragment_detail WHERE agent_id=%s and delta_id=%s and fragment_id=%s", (uuid.UUID(fields[0]), uuid.UUID(fields[1]), int(fields[2])), timeout=1000000)
#session.execute(batch)

else:
print fields[3]
print "connect-False"
# print fields[3]

dumptableascsv()
parse_file(datafile)






python cassandra datastax-enterprise cassandra-3.0 datastax-python-driver






share|improve this question















share|improve this question













share|improve this question




share|improve this question








edited Nov 10 at 4:39









lucascaro

3,36611530




3,36611530










asked Nov 10 at 1:01









Bobbie

241




241











  • updated formatting. Please note that questions seeking debugging help ("why isn't this code working?") must include the desired behavior, a specific problem or error and the shortest code necessary to reproduce it in the question itself. Questions without a clear problem statement are not useful to other readers. See: Minimal, Complete, and Verifiable example help center
    – lucascaro
    Nov 10 at 4:39










  • You haven't provided enough information to diagnose the source of the timeout, but I'd guess it is related to tombstones as you are potentially doing a lot of deletions. You should read up on tombstones in Cassandra if you haven't already: thelastpickle.com/blog/2016/07/27/… Something else to consider: opening new sessions is very slow. You only really need one, so you can open it outside your loop rather than creating lots of new sessions.
    – Justin Cameron
    Nov 12 at 0:21
















  • updated formatting. Please note that questions seeking debugging help ("why isn't this code working?") must include the desired behavior, a specific problem or error and the shortest code necessary to reproduce it in the question itself. Questions without a clear problem statement are not useful to other readers. See: Minimal, Complete, and Verifiable example help center
    – lucascaro
    Nov 10 at 4:39










  • You haven't provided enough information to diagnose the source of the timeout, but I'd guess it is related to tombstones as you are potentially doing a lot of deletions. You should read up on tombstones in Cassandra if you haven't already: thelastpickle.com/blog/2016/07/27/… Something else to consider: opening new sessions is very slow. You only really need one, so you can open it outside your loop rather than creating lots of new sessions.
    – Justin Cameron
    Nov 12 at 0:21















updated formatting. Please note that questions seeking debugging help ("why isn't this code working?") must include the desired behavior, a specific problem or error and the shortest code necessary to reproduce it in the question itself. Questions without a clear problem statement are not useful to other readers. See: Minimal, Complete, and Verifiable example help center
– lucascaro
Nov 10 at 4:39




updated formatting. Please note that questions seeking debugging help ("why isn't this code working?") must include the desired behavior, a specific problem or error and the shortest code necessary to reproduce it in the question itself. Questions without a clear problem statement are not useful to other readers. See: Minimal, Complete, and Verifiable example help center
– lucascaro
Nov 10 at 4:39












You haven't provided enough information to diagnose the source of the timeout, but I'd guess it is related to tombstones as you are potentially doing a lot of deletions. You should read up on tombstones in Cassandra if you haven't already: thelastpickle.com/blog/2016/07/27/… Something else to consider: opening new sessions is very slow. You only really need one, so you can open it outside your loop rather than creating lots of new sessions.
– Justin Cameron
Nov 12 at 0:21




You haven't provided enough information to diagnose the source of the timeout, but I'd guess it is related to tombstones as you are potentially doing a lot of deletions. You should read up on tombstones in Cassandra if you haven't already: thelastpickle.com/blog/2016/07/27/… Something else to consider: opening new sessions is very slow. You only really need one, so you can open it outside your loop rather than creating lots of new sessions.
– Justin Cameron
Nov 12 at 0:21

















active

oldest

votes











Your Answer






StackExchange.ifUsing("editor", function ()
StackExchange.using("externalEditor", function ()
StackExchange.using("snippets", function ()
StackExchange.snippets.init();
);
);
, "code-snippets");

StackExchange.ready(function()
var channelOptions =
tags: "".split(" "),
id: "1"
;
initTagRenderer("".split(" "), "".split(" "), channelOptions);

StackExchange.using("externalEditor", function()
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled)
StackExchange.using("snippets", function()
createEditor();
);

else
createEditor();

);

function createEditor()
StackExchange.prepareEditor(
heartbeatType: 'answer',
convertImagesToLinks: true,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: 10,
bindNavPrevention: true,
postfix: "",
imageUploader:
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
,
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
);



);













draft saved

draft discarded


















StackExchange.ready(
function ()
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53235120%2fcassandra-cluster-nohostavailable-unknown-exception%23new-answer', 'question_page');

);

Post as a guest















Required, but never shown






























active

oldest

votes













active

oldest

votes









active

oldest

votes






active

oldest

votes















draft saved

draft discarded
















































Thanks for contributing an answer to Stack Overflow!


  • Please be sure to answer the question. Provide details and share your research!

But avoid


  • Asking for help, clarification, or responding to other answers.

  • Making statements based on opinion; back them up with references or personal experience.

To learn more, see our tips on writing great answers.





Some of your past answers have not been well-received, and you're in danger of being blocked from answering.


Please pay close attention to the following guidance:


  • Please be sure to answer the question. Provide details and share your research!

But avoid


  • Asking for help, clarification, or responding to other answers.

  • Making statements based on opinion; back them up with references or personal experience.

To learn more, see our tips on writing great answers.




draft saved


draft discarded














StackExchange.ready(
function ()
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53235120%2fcassandra-cluster-nohostavailable-unknown-exception%23new-answer', 'question_page');

);

Post as a guest















Required, but never shown





















































Required, but never shown














Required, but never shown












Required, but never shown







Required, but never shown

































Required, but never shown














Required, but never shown












Required, but never shown







Required, but never shown







Popular posts from this blog

Use pre created SQLite database for Android project in kotlin

Darth Vader #20

Ondo